1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_IS_PARALLEL_SCSI(un) \ 779 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 780 #define SD_IS_SERIAL(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 782 783 /* 784 * Definitions used by device id registration routines 785 */ 786 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 787 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 788 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 789 790 static kmutex_t sd_sense_mutex = {0}; 791 792 /* 793 * Macros for updates of the driver state 794 */ 795 #define New_state(un, s) \ 796 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 797 #define Restore_state(un) \ 798 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 799 800 static struct sd_cdbinfo sd_cdbtab[] = { 801 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 802 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 803 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 804 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 805 }; 806 807 /* 808 * Specifies the number of seconds that must have elapsed since the last 809 * cmd. has completed for a device to be declared idle to the PM framework. 810 */ 811 static int sd_pm_idletime = 1; 812 813 /* 814 * Internal function prototypes 815 */ 816 817 #if (defined(__fibre)) 818 /* 819 * These #defines are to avoid namespace collisions that occur because this 820 * code is currently used to compile two separate driver modules: sd and ssd. 821 * All function names need to be treated this way (even if declared static) 822 * in order to allow the debugger to resolve the names properly. 823 * It is anticipated that in the near future the ssd module will be obsoleted, 824 * at which time this ugliness should go away. 825 */ 826 #define sd_log_trace ssd_log_trace 827 #define sd_log_info ssd_log_info 828 #define sd_log_err ssd_log_err 829 #define sdprobe ssdprobe 830 #define sdinfo ssdinfo 831 #define sd_prop_op ssd_prop_op 832 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 833 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 834 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 835 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 836 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 837 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 838 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 839 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 840 #define sd_spin_up_unit ssd_spin_up_unit 841 #define sd_enable_descr_sense ssd_enable_descr_sense 842 #define sd_reenable_dsense_task ssd_reenable_dsense_task 843 #define sd_set_mmc_caps ssd_set_mmc_caps 844 #define sd_read_unit_properties ssd_read_unit_properties 845 #define sd_process_sdconf_file ssd_process_sdconf_file 846 #define sd_process_sdconf_table ssd_process_sdconf_table 847 #define sd_sdconf_id_match ssd_sdconf_id_match 848 #define sd_blank_cmp ssd_blank_cmp 849 #define sd_chk_vers1_data ssd_chk_vers1_data 850 #define sd_set_vers1_properties ssd_set_vers1_properties 851 852 #define sd_get_physical_geometry ssd_get_physical_geometry 853 #define sd_get_virtual_geometry ssd_get_virtual_geometry 854 #define sd_update_block_info ssd_update_block_info 855 #define sd_register_devid ssd_register_devid 856 #define sd_get_devid ssd_get_devid 857 #define sd_create_devid ssd_create_devid 858 #define sd_write_deviceid ssd_write_deviceid 859 #define sd_check_vpd_page_support ssd_check_vpd_page_support 860 #define sd_setup_pm ssd_setup_pm 861 #define sd_create_pm_components ssd_create_pm_components 862 #define sd_ddi_suspend ssd_ddi_suspend 863 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 864 #define sd_ddi_resume ssd_ddi_resume 865 #define sd_ddi_pm_resume ssd_ddi_pm_resume 866 #define sdpower ssdpower 867 #define sdattach ssdattach 868 #define sddetach ssddetach 869 #define sd_unit_attach ssd_unit_attach 870 #define sd_unit_detach ssd_unit_detach 871 #define sd_set_unit_attributes ssd_set_unit_attributes 872 #define sd_create_errstats ssd_create_errstats 873 #define sd_set_errstats ssd_set_errstats 874 #define sd_set_pstats ssd_set_pstats 875 #define sddump ssddump 876 #define sd_scsi_poll ssd_scsi_poll 877 #define sd_send_polled_RQS ssd_send_polled_RQS 878 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 879 #define sd_init_event_callbacks ssd_init_event_callbacks 880 #define sd_event_callback ssd_event_callback 881 #define sd_cache_control ssd_cache_control 882 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 883 #define sd_get_nv_sup ssd_get_nv_sup 884 #define sd_make_device ssd_make_device 885 #define sdopen ssdopen 886 #define sdclose ssdclose 887 #define sd_ready_and_valid ssd_ready_and_valid 888 #define sdmin ssdmin 889 #define sdread ssdread 890 #define sdwrite ssdwrite 891 #define sdaread ssdaread 892 #define sdawrite ssdawrite 893 #define sdstrategy ssdstrategy 894 #define sdioctl ssdioctl 895 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 896 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 897 #define sd_checksum_iostart ssd_checksum_iostart 898 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 899 #define sd_pm_iostart ssd_pm_iostart 900 #define sd_core_iostart ssd_core_iostart 901 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 902 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 903 #define sd_checksum_iodone ssd_checksum_iodone 904 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 905 #define sd_pm_iodone ssd_pm_iodone 906 #define sd_initpkt_for_buf ssd_initpkt_for_buf 907 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 908 #define sd_setup_rw_pkt ssd_setup_rw_pkt 909 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 910 #define sd_buf_iodone ssd_buf_iodone 911 #define sd_uscsi_strategy ssd_uscsi_strategy 912 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 913 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 914 #define sd_uscsi_iodone ssd_uscsi_iodone 915 #define sd_xbuf_strategy ssd_xbuf_strategy 916 #define sd_xbuf_init ssd_xbuf_init 917 #define sd_pm_entry ssd_pm_entry 918 #define sd_pm_exit ssd_pm_exit 919 920 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 921 #define sd_pm_timeout_handler ssd_pm_timeout_handler 922 923 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 924 #define sdintr ssdintr 925 #define sd_start_cmds ssd_start_cmds 926 #define sd_send_scsi_cmd ssd_send_scsi_cmd 927 #define sd_bioclone_alloc ssd_bioclone_alloc 928 #define sd_bioclone_free ssd_bioclone_free 929 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 930 #define sd_shadow_buf_free ssd_shadow_buf_free 931 #define sd_print_transport_rejected_message \ 932 ssd_print_transport_rejected_message 933 #define sd_retry_command ssd_retry_command 934 #define sd_set_retry_bp ssd_set_retry_bp 935 #define sd_send_request_sense_command ssd_send_request_sense_command 936 #define sd_start_retry_command ssd_start_retry_command 937 #define sd_start_direct_priority_command \ 938 ssd_start_direct_priority_command 939 #define sd_return_failed_command ssd_return_failed_command 940 #define sd_return_failed_command_no_restart \ 941 ssd_return_failed_command_no_restart 942 #define sd_return_command ssd_return_command 943 #define sd_sync_with_callback ssd_sync_with_callback 944 #define sdrunout ssdrunout 945 #define sd_mark_rqs_busy ssd_mark_rqs_busy 946 #define sd_mark_rqs_idle ssd_mark_rqs_idle 947 #define sd_reduce_throttle ssd_reduce_throttle 948 #define sd_restore_throttle ssd_restore_throttle 949 #define sd_print_incomplete_msg ssd_print_incomplete_msg 950 #define sd_init_cdb_limits ssd_init_cdb_limits 951 #define sd_pkt_status_good ssd_pkt_status_good 952 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 953 #define sd_pkt_status_busy ssd_pkt_status_busy 954 #define sd_pkt_status_reservation_conflict \ 955 ssd_pkt_status_reservation_conflict 956 #define sd_pkt_status_qfull ssd_pkt_status_qfull 957 #define sd_handle_request_sense ssd_handle_request_sense 958 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 959 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 960 #define sd_validate_sense_data ssd_validate_sense_data 961 #define sd_decode_sense ssd_decode_sense 962 #define sd_print_sense_msg ssd_print_sense_msg 963 #define sd_sense_key_no_sense ssd_sense_key_no_sense 964 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 965 #define sd_sense_key_not_ready ssd_sense_key_not_ready 966 #define sd_sense_key_medium_or_hardware_error \ 967 ssd_sense_key_medium_or_hardware_error 968 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 969 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 970 #define sd_sense_key_fail_command ssd_sense_key_fail_command 971 #define sd_sense_key_blank_check ssd_sense_key_blank_check 972 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 973 #define sd_sense_key_default ssd_sense_key_default 974 #define sd_print_retry_msg ssd_print_retry_msg 975 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 976 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 977 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 978 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 979 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 980 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 981 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 982 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 983 #define sd_pkt_reason_default ssd_pkt_reason_default 984 #define sd_reset_target ssd_reset_target 985 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 986 #define sd_start_stop_unit_task ssd_start_stop_unit_task 987 #define sd_taskq_create ssd_taskq_create 988 #define sd_taskq_delete ssd_taskq_delete 989 #define sd_target_change_task ssd_target_change_task 990 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 991 #define sd_media_change_task ssd_media_change_task 992 #define sd_handle_mchange ssd_handle_mchange 993 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 994 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 995 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 996 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 997 #define sd_send_scsi_feature_GET_CONFIGURATION \ 998 sd_send_scsi_feature_GET_CONFIGURATION 999 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1000 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1001 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1002 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1003 ssd_send_scsi_PERSISTENT_RESERVE_IN 1004 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1005 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1006 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1007 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1008 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1009 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1010 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1011 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1012 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1013 #define sd_alloc_rqs ssd_alloc_rqs 1014 #define sd_free_rqs ssd_free_rqs 1015 #define sd_dump_memory ssd_dump_memory 1016 #define sd_get_media_info ssd_get_media_info 1017 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1018 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1019 #define sd_strtok_r ssd_strtok_r 1020 #define sd_set_properties ssd_set_properties 1021 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1022 #define sd_setup_next_xfer ssd_setup_next_xfer 1023 #define sd_dkio_get_temp ssd_dkio_get_temp 1024 #define sd_check_mhd ssd_check_mhd 1025 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1026 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1027 #define sd_sname ssd_sname 1028 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1029 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1030 #define sd_take_ownership ssd_take_ownership 1031 #define sd_reserve_release ssd_reserve_release 1032 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1033 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1034 #define sd_persistent_reservation_in_read_keys \ 1035 ssd_persistent_reservation_in_read_keys 1036 #define sd_persistent_reservation_in_read_resv \ 1037 ssd_persistent_reservation_in_read_resv 1038 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1039 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1040 #define sd_mhdioc_release ssd_mhdioc_release 1041 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1042 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1043 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1044 #define sr_change_blkmode ssr_change_blkmode 1045 #define sr_change_speed ssr_change_speed 1046 #define sr_atapi_change_speed ssr_atapi_change_speed 1047 #define sr_pause_resume ssr_pause_resume 1048 #define sr_play_msf ssr_play_msf 1049 #define sr_play_trkind ssr_play_trkind 1050 #define sr_read_all_subcodes ssr_read_all_subcodes 1051 #define sr_read_subchannel ssr_read_subchannel 1052 #define sr_read_tocentry ssr_read_tocentry 1053 #define sr_read_tochdr ssr_read_tochdr 1054 #define sr_read_cdda ssr_read_cdda 1055 #define sr_read_cdxa ssr_read_cdxa 1056 #define sr_read_mode1 ssr_read_mode1 1057 #define sr_read_mode2 ssr_read_mode2 1058 #define sr_read_cd_mode2 ssr_read_cd_mode2 1059 #define sr_sector_mode ssr_sector_mode 1060 #define sr_eject ssr_eject 1061 #define sr_ejected ssr_ejected 1062 #define sr_check_wp ssr_check_wp 1063 #define sd_check_media ssd_check_media 1064 #define sd_media_watch_cb ssd_media_watch_cb 1065 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1066 #define sr_volume_ctrl ssr_volume_ctrl 1067 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1068 #define sd_log_page_supported ssd_log_page_supported 1069 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1070 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1071 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1072 #define sd_range_lock ssd_range_lock 1073 #define sd_get_range ssd_get_range 1074 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1075 #define sd_range_unlock ssd_range_unlock 1076 #define sd_read_modify_write_task ssd_read_modify_write_task 1077 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1078 1079 #define sd_iostart_chain ssd_iostart_chain 1080 #define sd_iodone_chain ssd_iodone_chain 1081 #define sd_initpkt_map ssd_initpkt_map 1082 #define sd_destroypkt_map ssd_destroypkt_map 1083 #define sd_chain_type_map ssd_chain_type_map 1084 #define sd_chain_index_map ssd_chain_index_map 1085 1086 #define sd_failfast_flushctl ssd_failfast_flushctl 1087 #define sd_failfast_flushq ssd_failfast_flushq 1088 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1089 1090 #define sd_is_lsi ssd_is_lsi 1091 #define sd_tg_rdwr ssd_tg_rdwr 1092 #define sd_tg_getinfo ssd_tg_getinfo 1093 1094 #endif /* #if (defined(__fibre)) */ 1095 1096 1097 int _init(void); 1098 int _fini(void); 1099 int _info(struct modinfo *modinfop); 1100 1101 /*PRINTFLIKE3*/ 1102 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1103 /*PRINTFLIKE3*/ 1104 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1105 /*PRINTFLIKE3*/ 1106 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1107 1108 static int sdprobe(dev_info_t *devi); 1109 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1110 void **result); 1111 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1112 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1113 1114 /* 1115 * Smart probe for parallel scsi 1116 */ 1117 static void sd_scsi_probe_cache_init(void); 1118 static void sd_scsi_probe_cache_fini(void); 1119 static void sd_scsi_clear_probe_cache(void); 1120 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1121 1122 /* 1123 * Attached luns on target for parallel scsi 1124 */ 1125 static void sd_scsi_target_lun_init(void); 1126 static void sd_scsi_target_lun_fini(void); 1127 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1128 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1129 1130 static int sd_spin_up_unit(sd_ssc_t *ssc); 1131 1132 /* 1133 * Using sd_ssc_init to establish sd_ssc_t struct 1134 * Using sd_ssc_send to send uscsi internal command 1135 * Using sd_ssc_fini to free sd_ssc_t struct 1136 */ 1137 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1138 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1139 int flag, enum uio_seg dataspace, int path_flag); 1140 static void sd_ssc_fini(sd_ssc_t *ssc); 1141 1142 /* 1143 * Using sd_ssc_assessment to set correct type-of-assessment 1144 * Using sd_ssc_post to post ereport & system log 1145 * sd_ssc_post will call sd_ssc_print to print system log 1146 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1147 */ 1148 static void sd_ssc_assessment(sd_ssc_t *ssc, 1149 enum sd_type_assessment tp_assess); 1150 1151 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1152 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1153 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1154 enum sd_driver_assessment drv_assess); 1155 1156 /* 1157 * Using sd_ssc_set_info to mark an un-decodable-data error. 1158 * Using sd_ssc_extract_info to transfer information from internal 1159 * data structures to sd_ssc_t. 1160 */ 1161 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, 1162 const char *fmt, ...); 1163 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1164 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1165 1166 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1167 enum uio_seg dataspace, int path_flag); 1168 1169 #ifdef _LP64 1170 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1171 static void sd_reenable_dsense_task(void *arg); 1172 #endif /* _LP64 */ 1173 1174 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1175 1176 static void sd_read_unit_properties(struct sd_lun *un); 1177 static int sd_process_sdconf_file(struct sd_lun *un); 1178 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1179 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1180 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1181 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1182 int *data_list, sd_tunables *values); 1183 static void sd_process_sdconf_table(struct sd_lun *un); 1184 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1185 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1186 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1187 int list_len, char *dataname_ptr); 1188 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1189 sd_tunables *prop_list); 1190 1191 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1192 int reservation_flag); 1193 static int sd_get_devid(sd_ssc_t *ssc); 1194 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1195 static int sd_write_deviceid(sd_ssc_t *ssc); 1196 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1197 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1198 1199 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1200 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1201 1202 static int sd_ddi_suspend(dev_info_t *devi); 1203 static int sd_ddi_pm_suspend(struct sd_lun *un); 1204 static int sd_ddi_resume(dev_info_t *devi); 1205 static int sd_ddi_pm_resume(struct sd_lun *un); 1206 static int sdpower(dev_info_t *devi, int component, int level); 1207 1208 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1209 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1210 static int sd_unit_attach(dev_info_t *devi); 1211 static int sd_unit_detach(dev_info_t *devi); 1212 1213 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1214 static void sd_create_errstats(struct sd_lun *un, int instance); 1215 static void sd_set_errstats(struct sd_lun *un); 1216 static void sd_set_pstats(struct sd_lun *un); 1217 1218 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1219 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1220 static int sd_send_polled_RQS(struct sd_lun *un); 1221 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1222 1223 #if (defined(__fibre)) 1224 /* 1225 * Event callbacks (photon) 1226 */ 1227 static void sd_init_event_callbacks(struct sd_lun *un); 1228 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1229 #endif 1230 1231 /* 1232 * Defines for sd_cache_control 1233 */ 1234 1235 #define SD_CACHE_ENABLE 1 1236 #define SD_CACHE_DISABLE 0 1237 #define SD_CACHE_NOCHANGE -1 1238 1239 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1240 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1241 static void sd_get_nv_sup(sd_ssc_t *ssc); 1242 static dev_t sd_make_device(dev_info_t *devi); 1243 1244 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1245 uint64_t capacity); 1246 1247 /* 1248 * Driver entry point functions. 1249 */ 1250 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1251 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1252 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1253 1254 static void sdmin(struct buf *bp); 1255 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1256 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1257 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1258 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1259 1260 static int sdstrategy(struct buf *bp); 1261 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1262 1263 /* 1264 * Function prototypes for layering functions in the iostart chain. 1265 */ 1266 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1267 struct buf *bp); 1268 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1269 struct buf *bp); 1270 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1271 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1275 1276 /* 1277 * Function prototypes for layering functions in the iodone chain. 1278 */ 1279 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1281 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1282 struct buf *bp); 1283 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1284 struct buf *bp); 1285 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 1290 /* 1291 * Prototypes for functions to support buf(9S) based IO. 1292 */ 1293 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1294 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1295 static void sd_destroypkt_for_buf(struct buf *); 1296 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1297 struct buf *bp, int flags, 1298 int (*callback)(caddr_t), caddr_t callback_arg, 1299 diskaddr_t lba, uint32_t blockcount); 1300 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1301 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1302 1303 /* 1304 * Prototypes for functions to support USCSI IO. 1305 */ 1306 static int sd_uscsi_strategy(struct buf *bp); 1307 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1308 static void sd_destroypkt_for_uscsi(struct buf *); 1309 1310 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1311 uchar_t chain_type, void *pktinfop); 1312 1313 static int sd_pm_entry(struct sd_lun *un); 1314 static void sd_pm_exit(struct sd_lun *un); 1315 1316 static void sd_pm_idletimeout_handler(void *arg); 1317 1318 /* 1319 * sd_core internal functions (used at the sd_core_io layer). 1320 */ 1321 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1322 static void sdintr(struct scsi_pkt *pktp); 1323 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1324 1325 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1326 enum uio_seg dataspace, int path_flag); 1327 1328 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1329 daddr_t blkno, int (*func)(struct buf *)); 1330 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1331 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1332 static void sd_bioclone_free(struct buf *bp); 1333 static void sd_shadow_buf_free(struct buf *bp); 1334 1335 static void sd_print_transport_rejected_message(struct sd_lun *un, 1336 struct sd_xbuf *xp, int code); 1337 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1342 void *arg, int code); 1343 1344 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1345 int retry_check_flag, 1346 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1347 int c), 1348 void *user_arg, int failure_code, clock_t retry_delay, 1349 void (*statp)(kstat_io_t *)); 1350 1351 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1352 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1353 1354 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1355 struct scsi_pkt *pktp); 1356 static void sd_start_retry_command(void *arg); 1357 static void sd_start_direct_priority_command(void *arg); 1358 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1359 int errcode); 1360 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1361 struct buf *bp, int errcode); 1362 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1363 static void sd_sync_with_callback(struct sd_lun *un); 1364 static int sdrunout(caddr_t arg); 1365 1366 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1367 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1368 1369 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1370 static void sd_restore_throttle(void *arg); 1371 1372 static void sd_init_cdb_limits(struct sd_lun *un); 1373 1374 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 1377 /* 1378 * Error handling functions 1379 */ 1380 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1385 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 1389 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, size_t actual_len); 1395 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1404 uint8_t *sense_datap, 1405 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_not_ready(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_unit_attention(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1420 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1421 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1422 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_default(struct sd_lun *un, 1424 uint8_t *sense_datap, 1425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1428 void *arg, int flag); 1429 1430 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1435 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1436 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1448 1449 static void sd_start_stop_unit_callback(void *arg); 1450 static void sd_start_stop_unit_task(void *arg); 1451 1452 static void sd_taskq_create(void); 1453 static void sd_taskq_delete(void); 1454 static void sd_target_change_task(void *arg); 1455 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1456 static void sd_media_change_task(void *arg); 1457 1458 static int sd_handle_mchange(struct sd_lun *un); 1459 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1460 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1461 uint32_t *lbap, int path_flag); 1462 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1463 uint32_t *lbap, int path_flag); 1464 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1465 int path_flag); 1466 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1467 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1468 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1469 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1470 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1471 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1472 uchar_t usr_cmd, uchar_t *usr_bufp); 1473 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1474 struct dk_callback *dkc); 1475 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1476 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1478 uchar_t *bufaddr, uint_t buflen, int path_flag); 1479 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1482 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1483 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1484 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1485 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1486 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1487 size_t buflen, daddr_t start_block, int path_flag); 1488 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1489 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1490 path_flag) 1491 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1492 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1493 path_flag) 1494 1495 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1496 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1497 uint16_t param_ptr, int path_flag); 1498 1499 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1500 static void sd_free_rqs(struct sd_lun *un); 1501 1502 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1503 uchar_t *data, int len, int fmt); 1504 static void sd_panic_for_res_conflict(struct sd_lun *un); 1505 1506 /* 1507 * Disk Ioctl Function Prototypes 1508 */ 1509 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1510 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1511 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1512 1513 /* 1514 * Multi-host Ioctl Prototypes 1515 */ 1516 static int sd_check_mhd(dev_t dev, int interval); 1517 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1519 static char *sd_sname(uchar_t status); 1520 static void sd_mhd_resvd_recover(void *arg); 1521 static void sd_resv_reclaim_thread(); 1522 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1523 static int sd_reserve_release(dev_t dev, int cmd); 1524 static void sd_rmv_resv_reclaim_req(dev_t dev); 1525 static void sd_mhd_reset_notify_cb(caddr_t arg); 1526 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1527 mhioc_inkeys_t *usrp, int flag); 1528 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1529 mhioc_inresvs_t *usrp, int flag); 1530 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1531 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1532 static int sd_mhdioc_release(dev_t dev); 1533 static int sd_mhdioc_register_devid(dev_t dev); 1534 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1536 1537 /* 1538 * SCSI removable prototypes 1539 */ 1540 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1541 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1542 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1543 static int sr_pause_resume(dev_t dev, int mode); 1544 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1545 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1546 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1547 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1548 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1555 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1556 static int sr_eject(dev_t dev); 1557 static void sr_ejected(register struct sd_lun *un); 1558 static int sr_check_wp(dev_t dev); 1559 static int sd_check_media(dev_t dev, enum dkio_state state); 1560 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1561 static void sd_delayed_cv_broadcast(void *arg); 1562 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1563 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1564 1565 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1566 1567 /* 1568 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1569 */ 1570 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1571 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1572 static void sd_wm_cache_destructor(void *wm, void *un); 1573 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1574 daddr_t endb, ushort_t typ); 1575 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1576 daddr_t endb); 1577 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1578 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1579 static void sd_read_modify_write_task(void * arg); 1580 static int 1581 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1582 struct buf **bpp); 1583 1584 1585 /* 1586 * Function prototypes for failfast support. 1587 */ 1588 static void sd_failfast_flushq(struct sd_lun *un); 1589 static int sd_failfast_flushq_callback(struct buf *bp); 1590 1591 /* 1592 * Function prototypes to check for lsi devices 1593 */ 1594 static void sd_is_lsi(struct sd_lun *un); 1595 1596 /* 1597 * Function prototypes for partial DMA support 1598 */ 1599 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1600 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1601 1602 1603 /* Function prototypes for cmlb */ 1604 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1605 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1606 1607 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1608 1609 /* 1610 * Constants for failfast support: 1611 * 1612 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1613 * failfast processing being performed. 1614 * 1615 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1616 * failfast processing on all bufs with B_FAILFAST set. 1617 */ 1618 1619 #define SD_FAILFAST_INACTIVE 0 1620 #define SD_FAILFAST_ACTIVE 1 1621 1622 /* 1623 * Bitmask to control behavior of buf(9S) flushes when a transition to 1624 * the failfast state occurs. Optional bits include: 1625 * 1626 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1627 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1628 * be flushed. 1629 * 1630 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1631 * driver, in addition to the regular wait queue. This includes the xbuf 1632 * queues. When clear, only the driver's wait queue will be flushed. 1633 */ 1634 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1635 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1636 1637 /* 1638 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1639 * to flush all queues within the driver. 1640 */ 1641 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1642 1643 1644 /* 1645 * SD Testing Fault Injection 1646 */ 1647 #ifdef SD_FAULT_INJECTION 1648 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1649 static void sd_faultinjection(struct scsi_pkt *pktp); 1650 static void sd_injection_log(char *buf, struct sd_lun *un); 1651 #endif 1652 1653 /* 1654 * Device driver ops vector 1655 */ 1656 static struct cb_ops sd_cb_ops = { 1657 sdopen, /* open */ 1658 sdclose, /* close */ 1659 sdstrategy, /* strategy */ 1660 nodev, /* print */ 1661 sddump, /* dump */ 1662 sdread, /* read */ 1663 sdwrite, /* write */ 1664 sdioctl, /* ioctl */ 1665 nodev, /* devmap */ 1666 nodev, /* mmap */ 1667 nodev, /* segmap */ 1668 nochpoll, /* poll */ 1669 sd_prop_op, /* cb_prop_op */ 1670 0, /* streamtab */ 1671 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1672 CB_REV, /* cb_rev */ 1673 sdaread, /* async I/O read entry point */ 1674 sdawrite /* async I/O write entry point */ 1675 }; 1676 1677 static struct dev_ops sd_ops = { 1678 DEVO_REV, /* devo_rev, */ 1679 0, /* refcnt */ 1680 sdinfo, /* info */ 1681 nulldev, /* identify */ 1682 sdprobe, /* probe */ 1683 sdattach, /* attach */ 1684 sddetach, /* detach */ 1685 nodev, /* reset */ 1686 &sd_cb_ops, /* driver operations */ 1687 NULL, /* bus operations */ 1688 sdpower /* power */ 1689 }; 1690 1691 1692 /* 1693 * This is the loadable module wrapper. 1694 */ 1695 #include <sys/modctl.h> 1696 1697 static struct modldrv modldrv = { 1698 &mod_driverops, /* Type of module. This one is a driver */ 1699 SD_MODULE_NAME, /* Module name. */ 1700 &sd_ops /* driver ops */ 1701 }; 1702 1703 1704 static struct modlinkage modlinkage = { 1705 MODREV_1, 1706 &modldrv, 1707 NULL 1708 }; 1709 1710 static cmlb_tg_ops_t sd_tgops = { 1711 TG_DK_OPS_VERSION_1, 1712 sd_tg_rdwr, 1713 sd_tg_getinfo 1714 }; 1715 1716 static struct scsi_asq_key_strings sd_additional_codes[] = { 1717 0x81, 0, "Logical Unit is Reserved", 1718 0x85, 0, "Audio Address Not Valid", 1719 0xb6, 0, "Media Load Mechanism Failed", 1720 0xB9, 0, "Audio Play Operation Aborted", 1721 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1722 0x53, 2, "Medium removal prevented", 1723 0x6f, 0, "Authentication failed during key exchange", 1724 0x6f, 1, "Key not present", 1725 0x6f, 2, "Key not established", 1726 0x6f, 3, "Read without proper authentication", 1727 0x6f, 4, "Mismatched region to this logical unit", 1728 0x6f, 5, "Region reset count error", 1729 0xffff, 0x0, NULL 1730 }; 1731 1732 1733 /* 1734 * Struct for passing printing information for sense data messages 1735 */ 1736 struct sd_sense_info { 1737 int ssi_severity; 1738 int ssi_pfa_flag; 1739 }; 1740 1741 /* 1742 * Table of function pointers for iostart-side routines. Separate "chains" 1743 * of layered function calls are formed by placing the function pointers 1744 * sequentially in the desired order. Functions are called according to an 1745 * incrementing table index ordering. The last function in each chain must 1746 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1747 * in the sd_iodone_chain[] array. 1748 * 1749 * Note: It may seem more natural to organize both the iostart and iodone 1750 * functions together, into an array of structures (or some similar 1751 * organization) with a common index, rather than two separate arrays which 1752 * must be maintained in synchronization. The purpose of this division is 1753 * to achieve improved performance: individual arrays allows for more 1754 * effective cache line utilization on certain platforms. 1755 */ 1756 1757 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1758 1759 1760 static sd_chain_t sd_iostart_chain[] = { 1761 1762 /* Chain for buf IO for disk drive targets (PM enabled) */ 1763 sd_mapblockaddr_iostart, /* Index: 0 */ 1764 sd_pm_iostart, /* Index: 1 */ 1765 sd_core_iostart, /* Index: 2 */ 1766 1767 /* Chain for buf IO for disk drive targets (PM disabled) */ 1768 sd_mapblockaddr_iostart, /* Index: 3 */ 1769 sd_core_iostart, /* Index: 4 */ 1770 1771 /* Chain for buf IO for removable-media targets (PM enabled) */ 1772 sd_mapblockaddr_iostart, /* Index: 5 */ 1773 sd_mapblocksize_iostart, /* Index: 6 */ 1774 sd_pm_iostart, /* Index: 7 */ 1775 sd_core_iostart, /* Index: 8 */ 1776 1777 /* Chain for buf IO for removable-media targets (PM disabled) */ 1778 sd_mapblockaddr_iostart, /* Index: 9 */ 1779 sd_mapblocksize_iostart, /* Index: 10 */ 1780 sd_core_iostart, /* Index: 11 */ 1781 1782 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1783 sd_mapblockaddr_iostart, /* Index: 12 */ 1784 sd_checksum_iostart, /* Index: 13 */ 1785 sd_pm_iostart, /* Index: 14 */ 1786 sd_core_iostart, /* Index: 15 */ 1787 1788 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1789 sd_mapblockaddr_iostart, /* Index: 16 */ 1790 sd_checksum_iostart, /* Index: 17 */ 1791 sd_core_iostart, /* Index: 18 */ 1792 1793 /* Chain for USCSI commands (all targets) */ 1794 sd_pm_iostart, /* Index: 19 */ 1795 sd_core_iostart, /* Index: 20 */ 1796 1797 /* Chain for checksumming USCSI commands (all targets) */ 1798 sd_checksum_uscsi_iostart, /* Index: 21 */ 1799 sd_pm_iostart, /* Index: 22 */ 1800 sd_core_iostart, /* Index: 23 */ 1801 1802 /* Chain for "direct" USCSI commands (all targets) */ 1803 sd_core_iostart, /* Index: 24 */ 1804 1805 /* Chain for "direct priority" USCSI commands (all targets) */ 1806 sd_core_iostart, /* Index: 25 */ 1807 }; 1808 1809 /* 1810 * Macros to locate the first function of each iostart chain in the 1811 * sd_iostart_chain[] array. These are located by the index in the array. 1812 */ 1813 #define SD_CHAIN_DISK_IOSTART 0 1814 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1815 #define SD_CHAIN_RMMEDIA_IOSTART 5 1816 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1817 #define SD_CHAIN_CHKSUM_IOSTART 12 1818 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1819 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1820 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1821 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1822 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1823 1824 1825 /* 1826 * Table of function pointers for the iodone-side routines for the driver- 1827 * internal layering mechanism. The calling sequence for iodone routines 1828 * uses a decrementing table index, so the last routine called in a chain 1829 * must be at the lowest array index location for that chain. The last 1830 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1831 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1832 * of the functions in an iodone side chain must correspond to the ordering 1833 * of the iostart routines for that chain. Note that there is no iodone 1834 * side routine that corresponds to sd_core_iostart(), so there is no 1835 * entry in the table for this. 1836 */ 1837 1838 static sd_chain_t sd_iodone_chain[] = { 1839 1840 /* Chain for buf IO for disk drive targets (PM enabled) */ 1841 sd_buf_iodone, /* Index: 0 */ 1842 sd_mapblockaddr_iodone, /* Index: 1 */ 1843 sd_pm_iodone, /* Index: 2 */ 1844 1845 /* Chain for buf IO for disk drive targets (PM disabled) */ 1846 sd_buf_iodone, /* Index: 3 */ 1847 sd_mapblockaddr_iodone, /* Index: 4 */ 1848 1849 /* Chain for buf IO for removable-media targets (PM enabled) */ 1850 sd_buf_iodone, /* Index: 5 */ 1851 sd_mapblockaddr_iodone, /* Index: 6 */ 1852 sd_mapblocksize_iodone, /* Index: 7 */ 1853 sd_pm_iodone, /* Index: 8 */ 1854 1855 /* Chain for buf IO for removable-media targets (PM disabled) */ 1856 sd_buf_iodone, /* Index: 9 */ 1857 sd_mapblockaddr_iodone, /* Index: 10 */ 1858 sd_mapblocksize_iodone, /* Index: 11 */ 1859 1860 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1861 sd_buf_iodone, /* Index: 12 */ 1862 sd_mapblockaddr_iodone, /* Index: 13 */ 1863 sd_checksum_iodone, /* Index: 14 */ 1864 sd_pm_iodone, /* Index: 15 */ 1865 1866 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1867 sd_buf_iodone, /* Index: 16 */ 1868 sd_mapblockaddr_iodone, /* Index: 17 */ 1869 sd_checksum_iodone, /* Index: 18 */ 1870 1871 /* Chain for USCSI commands (non-checksum targets) */ 1872 sd_uscsi_iodone, /* Index: 19 */ 1873 sd_pm_iodone, /* Index: 20 */ 1874 1875 /* Chain for USCSI commands (checksum targets) */ 1876 sd_uscsi_iodone, /* Index: 21 */ 1877 sd_checksum_uscsi_iodone, /* Index: 22 */ 1878 sd_pm_iodone, /* Index: 22 */ 1879 1880 /* Chain for "direct" USCSI commands (all targets) */ 1881 sd_uscsi_iodone, /* Index: 24 */ 1882 1883 /* Chain for "direct priority" USCSI commands (all targets) */ 1884 sd_uscsi_iodone, /* Index: 25 */ 1885 }; 1886 1887 1888 /* 1889 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1890 * each iodone-side chain. These are located by the array index, but as the 1891 * iodone side functions are called in a decrementing-index order, the 1892 * highest index number in each chain must be specified (as these correspond 1893 * to the first function in the iodone chain that will be called by the core 1894 * at IO completion time). 1895 */ 1896 1897 #define SD_CHAIN_DISK_IODONE 2 1898 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1899 #define SD_CHAIN_RMMEDIA_IODONE 8 1900 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1901 #define SD_CHAIN_CHKSUM_IODONE 15 1902 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1903 #define SD_CHAIN_USCSI_CMD_IODONE 20 1904 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1905 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1906 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1907 1908 1909 1910 1911 /* 1912 * Array to map a layering chain index to the appropriate initpkt routine. 1913 * The redundant entries are present so that the index used for accessing 1914 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1915 * with this table as well. 1916 */ 1917 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1918 1919 static sd_initpkt_t sd_initpkt_map[] = { 1920 1921 /* Chain for buf IO for disk drive targets (PM enabled) */ 1922 sd_initpkt_for_buf, /* Index: 0 */ 1923 sd_initpkt_for_buf, /* Index: 1 */ 1924 sd_initpkt_for_buf, /* Index: 2 */ 1925 1926 /* Chain for buf IO for disk drive targets (PM disabled) */ 1927 sd_initpkt_for_buf, /* Index: 3 */ 1928 sd_initpkt_for_buf, /* Index: 4 */ 1929 1930 /* Chain for buf IO for removable-media targets (PM enabled) */ 1931 sd_initpkt_for_buf, /* Index: 5 */ 1932 sd_initpkt_for_buf, /* Index: 6 */ 1933 sd_initpkt_for_buf, /* Index: 7 */ 1934 sd_initpkt_for_buf, /* Index: 8 */ 1935 1936 /* Chain for buf IO for removable-media targets (PM disabled) */ 1937 sd_initpkt_for_buf, /* Index: 9 */ 1938 sd_initpkt_for_buf, /* Index: 10 */ 1939 sd_initpkt_for_buf, /* Index: 11 */ 1940 1941 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1942 sd_initpkt_for_buf, /* Index: 12 */ 1943 sd_initpkt_for_buf, /* Index: 13 */ 1944 sd_initpkt_for_buf, /* Index: 14 */ 1945 sd_initpkt_for_buf, /* Index: 15 */ 1946 1947 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1948 sd_initpkt_for_buf, /* Index: 16 */ 1949 sd_initpkt_for_buf, /* Index: 17 */ 1950 sd_initpkt_for_buf, /* Index: 18 */ 1951 1952 /* Chain for USCSI commands (non-checksum targets) */ 1953 sd_initpkt_for_uscsi, /* Index: 19 */ 1954 sd_initpkt_for_uscsi, /* Index: 20 */ 1955 1956 /* Chain for USCSI commands (checksum targets) */ 1957 sd_initpkt_for_uscsi, /* Index: 21 */ 1958 sd_initpkt_for_uscsi, /* Index: 22 */ 1959 sd_initpkt_for_uscsi, /* Index: 22 */ 1960 1961 /* Chain for "direct" USCSI commands (all targets) */ 1962 sd_initpkt_for_uscsi, /* Index: 24 */ 1963 1964 /* Chain for "direct priority" USCSI commands (all targets) */ 1965 sd_initpkt_for_uscsi, /* Index: 25 */ 1966 1967 }; 1968 1969 1970 /* 1971 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1972 * The redundant entries are present so that the index used for accessing 1973 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1974 * with this table as well. 1975 */ 1976 typedef void (*sd_destroypkt_t)(struct buf *); 1977 1978 static sd_destroypkt_t sd_destroypkt_map[] = { 1979 1980 /* Chain for buf IO for disk drive targets (PM enabled) */ 1981 sd_destroypkt_for_buf, /* Index: 0 */ 1982 sd_destroypkt_for_buf, /* Index: 1 */ 1983 sd_destroypkt_for_buf, /* Index: 2 */ 1984 1985 /* Chain for buf IO for disk drive targets (PM disabled) */ 1986 sd_destroypkt_for_buf, /* Index: 3 */ 1987 sd_destroypkt_for_buf, /* Index: 4 */ 1988 1989 /* Chain for buf IO for removable-media targets (PM enabled) */ 1990 sd_destroypkt_for_buf, /* Index: 5 */ 1991 sd_destroypkt_for_buf, /* Index: 6 */ 1992 sd_destroypkt_for_buf, /* Index: 7 */ 1993 sd_destroypkt_for_buf, /* Index: 8 */ 1994 1995 /* Chain for buf IO for removable-media targets (PM disabled) */ 1996 sd_destroypkt_for_buf, /* Index: 9 */ 1997 sd_destroypkt_for_buf, /* Index: 10 */ 1998 sd_destroypkt_for_buf, /* Index: 11 */ 1999 2000 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2001 sd_destroypkt_for_buf, /* Index: 12 */ 2002 sd_destroypkt_for_buf, /* Index: 13 */ 2003 sd_destroypkt_for_buf, /* Index: 14 */ 2004 sd_destroypkt_for_buf, /* Index: 15 */ 2005 2006 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2007 sd_destroypkt_for_buf, /* Index: 16 */ 2008 sd_destroypkt_for_buf, /* Index: 17 */ 2009 sd_destroypkt_for_buf, /* Index: 18 */ 2010 2011 /* Chain for USCSI commands (non-checksum targets) */ 2012 sd_destroypkt_for_uscsi, /* Index: 19 */ 2013 sd_destroypkt_for_uscsi, /* Index: 20 */ 2014 2015 /* Chain for USCSI commands (checksum targets) */ 2016 sd_destroypkt_for_uscsi, /* Index: 21 */ 2017 sd_destroypkt_for_uscsi, /* Index: 22 */ 2018 sd_destroypkt_for_uscsi, /* Index: 22 */ 2019 2020 /* Chain for "direct" USCSI commands (all targets) */ 2021 sd_destroypkt_for_uscsi, /* Index: 24 */ 2022 2023 /* Chain for "direct priority" USCSI commands (all targets) */ 2024 sd_destroypkt_for_uscsi, /* Index: 25 */ 2025 2026 }; 2027 2028 2029 2030 /* 2031 * Array to map a layering chain index to the appropriate chain "type". 2032 * The chain type indicates a specific property/usage of the chain. 2033 * The redundant entries are present so that the index used for accessing 2034 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2035 * with this table as well. 2036 */ 2037 2038 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2039 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2040 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2041 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2042 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2043 /* (for error recovery) */ 2044 2045 static int sd_chain_type_map[] = { 2046 2047 /* Chain for buf IO for disk drive targets (PM enabled) */ 2048 SD_CHAIN_BUFIO, /* Index: 0 */ 2049 SD_CHAIN_BUFIO, /* Index: 1 */ 2050 SD_CHAIN_BUFIO, /* Index: 2 */ 2051 2052 /* Chain for buf IO for disk drive targets (PM disabled) */ 2053 SD_CHAIN_BUFIO, /* Index: 3 */ 2054 SD_CHAIN_BUFIO, /* Index: 4 */ 2055 2056 /* Chain for buf IO for removable-media targets (PM enabled) */ 2057 SD_CHAIN_BUFIO, /* Index: 5 */ 2058 SD_CHAIN_BUFIO, /* Index: 6 */ 2059 SD_CHAIN_BUFIO, /* Index: 7 */ 2060 SD_CHAIN_BUFIO, /* Index: 8 */ 2061 2062 /* Chain for buf IO for removable-media targets (PM disabled) */ 2063 SD_CHAIN_BUFIO, /* Index: 9 */ 2064 SD_CHAIN_BUFIO, /* Index: 10 */ 2065 SD_CHAIN_BUFIO, /* Index: 11 */ 2066 2067 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2068 SD_CHAIN_BUFIO, /* Index: 12 */ 2069 SD_CHAIN_BUFIO, /* Index: 13 */ 2070 SD_CHAIN_BUFIO, /* Index: 14 */ 2071 SD_CHAIN_BUFIO, /* Index: 15 */ 2072 2073 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2074 SD_CHAIN_BUFIO, /* Index: 16 */ 2075 SD_CHAIN_BUFIO, /* Index: 17 */ 2076 SD_CHAIN_BUFIO, /* Index: 18 */ 2077 2078 /* Chain for USCSI commands (non-checksum targets) */ 2079 SD_CHAIN_USCSI, /* Index: 19 */ 2080 SD_CHAIN_USCSI, /* Index: 20 */ 2081 2082 /* Chain for USCSI commands (checksum targets) */ 2083 SD_CHAIN_USCSI, /* Index: 21 */ 2084 SD_CHAIN_USCSI, /* Index: 22 */ 2085 SD_CHAIN_USCSI, /* Index: 22 */ 2086 2087 /* Chain for "direct" USCSI commands (all targets) */ 2088 SD_CHAIN_DIRECT, /* Index: 24 */ 2089 2090 /* Chain for "direct priority" USCSI commands (all targets) */ 2091 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2092 }; 2093 2094 2095 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2096 #define SD_IS_BUFIO(xp) \ 2097 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2098 2099 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2100 #define SD_IS_DIRECT_PRIORITY(xp) \ 2101 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2102 2103 2104 2105 /* 2106 * Struct, array, and macros to map a specific chain to the appropriate 2107 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2108 * 2109 * The sd_chain_index_map[] array is used at attach time to set the various 2110 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2111 * chain to be used with the instance. This allows different instances to use 2112 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2113 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2114 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2115 * dynamically & without the use of locking; and (2) a layer may update the 2116 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2117 * to allow for deferred processing of an IO within the same chain from a 2118 * different execution context. 2119 */ 2120 2121 struct sd_chain_index { 2122 int sci_iostart_index; 2123 int sci_iodone_index; 2124 }; 2125 2126 static struct sd_chain_index sd_chain_index_map[] = { 2127 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2128 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2129 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2130 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2131 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2132 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2133 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2134 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2135 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2136 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2137 }; 2138 2139 2140 /* 2141 * The following are indexes into the sd_chain_index_map[] array. 2142 */ 2143 2144 /* un->un_buf_chain_type must be set to one of these */ 2145 #define SD_CHAIN_INFO_DISK 0 2146 #define SD_CHAIN_INFO_DISK_NO_PM 1 2147 #define SD_CHAIN_INFO_RMMEDIA 2 2148 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2149 #define SD_CHAIN_INFO_CHKSUM 4 2150 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2151 2152 /* un->un_uscsi_chain_type must be set to one of these */ 2153 #define SD_CHAIN_INFO_USCSI_CMD 6 2154 /* USCSI with PM disabled is the same as DIRECT */ 2155 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2156 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2157 2158 /* un->un_direct_chain_type must be set to one of these */ 2159 #define SD_CHAIN_INFO_DIRECT_CMD 8 2160 2161 /* un->un_priority_chain_type must be set to one of these */ 2162 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2163 2164 /* size for devid inquiries */ 2165 #define MAX_INQUIRY_SIZE 0xF0 2166 2167 /* 2168 * Macros used by functions to pass a given buf(9S) struct along to the 2169 * next function in the layering chain for further processing. 2170 * 2171 * In the following macros, passing more than three arguments to the called 2172 * routines causes the optimizer for the SPARC compiler to stop doing tail 2173 * call elimination which results in significant performance degradation. 2174 */ 2175 #define SD_BEGIN_IOSTART(index, un, bp) \ 2176 ((*(sd_iostart_chain[index]))(index, un, bp)) 2177 2178 #define SD_BEGIN_IODONE(index, un, bp) \ 2179 ((*(sd_iodone_chain[index]))(index, un, bp)) 2180 2181 #define SD_NEXT_IOSTART(index, un, bp) \ 2182 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2183 2184 #define SD_NEXT_IODONE(index, un, bp) \ 2185 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2186 2187 /* 2188 * Function: _init 2189 * 2190 * Description: This is the driver _init(9E) entry point. 2191 * 2192 * Return Code: Returns the value from mod_install(9F) or 2193 * ddi_soft_state_init(9F) as appropriate. 2194 * 2195 * Context: Called when driver module loaded. 2196 */ 2197 2198 int 2199 _init(void) 2200 { 2201 int err; 2202 2203 /* establish driver name from module name */ 2204 sd_label = (char *)mod_modname(&modlinkage); 2205 2206 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2207 SD_MAXUNIT); 2208 2209 if (err != 0) { 2210 return (err); 2211 } 2212 2213 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2214 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2215 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2216 2217 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2218 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2219 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2220 2221 /* 2222 * it's ok to init here even for fibre device 2223 */ 2224 sd_scsi_probe_cache_init(); 2225 2226 sd_scsi_target_lun_init(); 2227 2228 /* 2229 * Creating taskq before mod_install ensures that all callers (threads) 2230 * that enter the module after a successful mod_install encounter 2231 * a valid taskq. 2232 */ 2233 sd_taskq_create(); 2234 2235 err = mod_install(&modlinkage); 2236 if (err != 0) { 2237 /* delete taskq if install fails */ 2238 sd_taskq_delete(); 2239 2240 mutex_destroy(&sd_detach_mutex); 2241 mutex_destroy(&sd_log_mutex); 2242 mutex_destroy(&sd_label_mutex); 2243 2244 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2245 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2246 cv_destroy(&sd_tr.srq_inprocess_cv); 2247 2248 sd_scsi_probe_cache_fini(); 2249 2250 sd_scsi_target_lun_fini(); 2251 2252 ddi_soft_state_fini(&sd_state); 2253 return (err); 2254 } 2255 2256 return (err); 2257 } 2258 2259 2260 /* 2261 * Function: _fini 2262 * 2263 * Description: This is the driver _fini(9E) entry point. 2264 * 2265 * Return Code: Returns the value from mod_remove(9F) 2266 * 2267 * Context: Called when driver module is unloaded. 2268 */ 2269 2270 int 2271 _fini(void) 2272 { 2273 int err; 2274 2275 if ((err = mod_remove(&modlinkage)) != 0) { 2276 return (err); 2277 } 2278 2279 sd_taskq_delete(); 2280 2281 mutex_destroy(&sd_detach_mutex); 2282 mutex_destroy(&sd_log_mutex); 2283 mutex_destroy(&sd_label_mutex); 2284 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2285 2286 sd_scsi_probe_cache_fini(); 2287 2288 sd_scsi_target_lun_fini(); 2289 2290 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2291 cv_destroy(&sd_tr.srq_inprocess_cv); 2292 2293 ddi_soft_state_fini(&sd_state); 2294 2295 return (err); 2296 } 2297 2298 2299 /* 2300 * Function: _info 2301 * 2302 * Description: This is the driver _info(9E) entry point. 2303 * 2304 * Arguments: modinfop - pointer to the driver modinfo structure 2305 * 2306 * Return Code: Returns the value from mod_info(9F). 2307 * 2308 * Context: Kernel thread context 2309 */ 2310 2311 int 2312 _info(struct modinfo *modinfop) 2313 { 2314 return (mod_info(&modlinkage, modinfop)); 2315 } 2316 2317 2318 /* 2319 * The following routines implement the driver message logging facility. 2320 * They provide component- and level- based debug output filtering. 2321 * Output may also be restricted to messages for a single instance by 2322 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2323 * to NULL, then messages for all instances are printed. 2324 * 2325 * These routines have been cloned from each other due to the language 2326 * constraints of macros and variable argument list processing. 2327 */ 2328 2329 2330 /* 2331 * Function: sd_log_err 2332 * 2333 * Description: This routine is called by the SD_ERROR macro for debug 2334 * logging of error conditions. 2335 * 2336 * Arguments: comp - driver component being logged 2337 * dev - pointer to driver info structure 2338 * fmt - error string and format to be logged 2339 */ 2340 2341 static void 2342 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2343 { 2344 va_list ap; 2345 dev_info_t *dev; 2346 2347 ASSERT(un != NULL); 2348 dev = SD_DEVINFO(un); 2349 ASSERT(dev != NULL); 2350 2351 /* 2352 * Filter messages based on the global component and level masks. 2353 * Also print if un matches the value of sd_debug_un, or if 2354 * sd_debug_un is set to NULL. 2355 */ 2356 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2357 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2358 mutex_enter(&sd_log_mutex); 2359 va_start(ap, fmt); 2360 (void) vsprintf(sd_log_buf, fmt, ap); 2361 va_end(ap); 2362 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2363 mutex_exit(&sd_log_mutex); 2364 } 2365 #ifdef SD_FAULT_INJECTION 2366 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2367 if (un->sd_injection_mask & comp) { 2368 mutex_enter(&sd_log_mutex); 2369 va_start(ap, fmt); 2370 (void) vsprintf(sd_log_buf, fmt, ap); 2371 va_end(ap); 2372 sd_injection_log(sd_log_buf, un); 2373 mutex_exit(&sd_log_mutex); 2374 } 2375 #endif 2376 } 2377 2378 2379 /* 2380 * Function: sd_log_info 2381 * 2382 * Description: This routine is called by the SD_INFO macro for debug 2383 * logging of general purpose informational conditions. 2384 * 2385 * Arguments: comp - driver component being logged 2386 * dev - pointer to driver info structure 2387 * fmt - info string and format to be logged 2388 */ 2389 2390 static void 2391 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2392 { 2393 va_list ap; 2394 dev_info_t *dev; 2395 2396 ASSERT(un != NULL); 2397 dev = SD_DEVINFO(un); 2398 ASSERT(dev != NULL); 2399 2400 /* 2401 * Filter messages based on the global component and level masks. 2402 * Also print if un matches the value of sd_debug_un, or if 2403 * sd_debug_un is set to NULL. 2404 */ 2405 if ((sd_component_mask & component) && 2406 (sd_level_mask & SD_LOGMASK_INFO) && 2407 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2408 mutex_enter(&sd_log_mutex); 2409 va_start(ap, fmt); 2410 (void) vsprintf(sd_log_buf, fmt, ap); 2411 va_end(ap); 2412 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2413 mutex_exit(&sd_log_mutex); 2414 } 2415 #ifdef SD_FAULT_INJECTION 2416 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2417 if (un->sd_injection_mask & component) { 2418 mutex_enter(&sd_log_mutex); 2419 va_start(ap, fmt); 2420 (void) vsprintf(sd_log_buf, fmt, ap); 2421 va_end(ap); 2422 sd_injection_log(sd_log_buf, un); 2423 mutex_exit(&sd_log_mutex); 2424 } 2425 #endif 2426 } 2427 2428 2429 /* 2430 * Function: sd_log_trace 2431 * 2432 * Description: This routine is called by the SD_TRACE macro for debug 2433 * logging of trace conditions (i.e. function entry/exit). 2434 * 2435 * Arguments: comp - driver component being logged 2436 * dev - pointer to driver info structure 2437 * fmt - trace string and format to be logged 2438 */ 2439 2440 static void 2441 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2442 { 2443 va_list ap; 2444 dev_info_t *dev; 2445 2446 ASSERT(un != NULL); 2447 dev = SD_DEVINFO(un); 2448 ASSERT(dev != NULL); 2449 2450 /* 2451 * Filter messages based on the global component and level masks. 2452 * Also print if un matches the value of sd_debug_un, or if 2453 * sd_debug_un is set to NULL. 2454 */ 2455 if ((sd_component_mask & component) && 2456 (sd_level_mask & SD_LOGMASK_TRACE) && 2457 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2458 mutex_enter(&sd_log_mutex); 2459 va_start(ap, fmt); 2460 (void) vsprintf(sd_log_buf, fmt, ap); 2461 va_end(ap); 2462 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2463 mutex_exit(&sd_log_mutex); 2464 } 2465 #ifdef SD_FAULT_INJECTION 2466 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2467 if (un->sd_injection_mask & component) { 2468 mutex_enter(&sd_log_mutex); 2469 va_start(ap, fmt); 2470 (void) vsprintf(sd_log_buf, fmt, ap); 2471 va_end(ap); 2472 sd_injection_log(sd_log_buf, un); 2473 mutex_exit(&sd_log_mutex); 2474 } 2475 #endif 2476 } 2477 2478 2479 /* 2480 * Function: sdprobe 2481 * 2482 * Description: This is the driver probe(9e) entry point function. 2483 * 2484 * Arguments: devi - opaque device info handle 2485 * 2486 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2487 * DDI_PROBE_FAILURE: If the probe failed. 2488 * DDI_PROBE_PARTIAL: If the instance is not present now, 2489 * but may be present in the future. 2490 */ 2491 2492 static int 2493 sdprobe(dev_info_t *devi) 2494 { 2495 struct scsi_device *devp; 2496 int rval; 2497 int instance; 2498 2499 /* 2500 * if it wasn't for pln, sdprobe could actually be nulldev 2501 * in the "__fibre" case. 2502 */ 2503 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2504 return (DDI_PROBE_DONTCARE); 2505 } 2506 2507 devp = ddi_get_driver_private(devi); 2508 2509 if (devp == NULL) { 2510 /* Ooops... nexus driver is mis-configured... */ 2511 return (DDI_PROBE_FAILURE); 2512 } 2513 2514 instance = ddi_get_instance(devi); 2515 2516 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2517 return (DDI_PROBE_PARTIAL); 2518 } 2519 2520 /* 2521 * Call the SCSA utility probe routine to see if we actually 2522 * have a target at this SCSI nexus. 2523 */ 2524 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2525 case SCSIPROBE_EXISTS: 2526 switch (devp->sd_inq->inq_dtype) { 2527 case DTYPE_DIRECT: 2528 rval = DDI_PROBE_SUCCESS; 2529 break; 2530 case DTYPE_RODIRECT: 2531 /* CDs etc. Can be removable media */ 2532 rval = DDI_PROBE_SUCCESS; 2533 break; 2534 case DTYPE_OPTICAL: 2535 /* 2536 * Rewritable optical driver HP115AA 2537 * Can also be removable media 2538 */ 2539 2540 /* 2541 * Do not attempt to bind to DTYPE_OPTICAL if 2542 * pre solaris 9 sparc sd behavior is required 2543 * 2544 * If first time through and sd_dtype_optical_bind 2545 * has not been set in /etc/system check properties 2546 */ 2547 2548 if (sd_dtype_optical_bind < 0) { 2549 sd_dtype_optical_bind = ddi_prop_get_int 2550 (DDI_DEV_T_ANY, devi, 0, 2551 "optical-device-bind", 1); 2552 } 2553 2554 if (sd_dtype_optical_bind == 0) { 2555 rval = DDI_PROBE_FAILURE; 2556 } else { 2557 rval = DDI_PROBE_SUCCESS; 2558 } 2559 break; 2560 2561 case DTYPE_NOTPRESENT: 2562 default: 2563 rval = DDI_PROBE_FAILURE; 2564 break; 2565 } 2566 break; 2567 default: 2568 rval = DDI_PROBE_PARTIAL; 2569 break; 2570 } 2571 2572 /* 2573 * This routine checks for resource allocation prior to freeing, 2574 * so it will take care of the "smart probing" case where a 2575 * scsi_probe() may or may not have been issued and will *not* 2576 * free previously-freed resources. 2577 */ 2578 scsi_unprobe(devp); 2579 return (rval); 2580 } 2581 2582 2583 /* 2584 * Function: sdinfo 2585 * 2586 * Description: This is the driver getinfo(9e) entry point function. 2587 * Given the device number, return the devinfo pointer from 2588 * the scsi_device structure or the instance number 2589 * associated with the dev_t. 2590 * 2591 * Arguments: dip - pointer to device info structure 2592 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2593 * DDI_INFO_DEVT2INSTANCE) 2594 * arg - driver dev_t 2595 * resultp - user buffer for request response 2596 * 2597 * Return Code: DDI_SUCCESS 2598 * DDI_FAILURE 2599 */ 2600 /* ARGSUSED */ 2601 static int 2602 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2603 { 2604 struct sd_lun *un; 2605 dev_t dev; 2606 int instance; 2607 int error; 2608 2609 switch (infocmd) { 2610 case DDI_INFO_DEVT2DEVINFO: 2611 dev = (dev_t)arg; 2612 instance = SDUNIT(dev); 2613 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2614 return (DDI_FAILURE); 2615 } 2616 *result = (void *) SD_DEVINFO(un); 2617 error = DDI_SUCCESS; 2618 break; 2619 case DDI_INFO_DEVT2INSTANCE: 2620 dev = (dev_t)arg; 2621 instance = SDUNIT(dev); 2622 *result = (void *)(uintptr_t)instance; 2623 error = DDI_SUCCESS; 2624 break; 2625 default: 2626 error = DDI_FAILURE; 2627 } 2628 return (error); 2629 } 2630 2631 /* 2632 * Function: sd_prop_op 2633 * 2634 * Description: This is the driver prop_op(9e) entry point function. 2635 * Return the number of blocks for the partition in question 2636 * or forward the request to the property facilities. 2637 * 2638 * Arguments: dev - device number 2639 * dip - pointer to device info structure 2640 * prop_op - property operator 2641 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2642 * name - pointer to property name 2643 * valuep - pointer or address of the user buffer 2644 * lengthp - property length 2645 * 2646 * Return Code: DDI_PROP_SUCCESS 2647 * DDI_PROP_NOT_FOUND 2648 * DDI_PROP_UNDEFINED 2649 * DDI_PROP_NO_MEMORY 2650 * DDI_PROP_BUF_TOO_SMALL 2651 */ 2652 2653 static int 2654 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2655 char *name, caddr_t valuep, int *lengthp) 2656 { 2657 struct sd_lun *un; 2658 2659 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2660 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2661 name, valuep, lengthp)); 2662 2663 return (cmlb_prop_op(un->un_cmlbhandle, 2664 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2665 SDPART(dev), (void *)SD_PATH_DIRECT)); 2666 } 2667 2668 /* 2669 * The following functions are for smart probing: 2670 * sd_scsi_probe_cache_init() 2671 * sd_scsi_probe_cache_fini() 2672 * sd_scsi_clear_probe_cache() 2673 * sd_scsi_probe_with_cache() 2674 */ 2675 2676 /* 2677 * Function: sd_scsi_probe_cache_init 2678 * 2679 * Description: Initializes the probe response cache mutex and head pointer. 2680 * 2681 * Context: Kernel thread context 2682 */ 2683 2684 static void 2685 sd_scsi_probe_cache_init(void) 2686 { 2687 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2688 sd_scsi_probe_cache_head = NULL; 2689 } 2690 2691 2692 /* 2693 * Function: sd_scsi_probe_cache_fini 2694 * 2695 * Description: Frees all resources associated with the probe response cache. 2696 * 2697 * Context: Kernel thread context 2698 */ 2699 2700 static void 2701 sd_scsi_probe_cache_fini(void) 2702 { 2703 struct sd_scsi_probe_cache *cp; 2704 struct sd_scsi_probe_cache *ncp; 2705 2706 /* Clean up our smart probing linked list */ 2707 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2708 ncp = cp->next; 2709 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2710 } 2711 sd_scsi_probe_cache_head = NULL; 2712 mutex_destroy(&sd_scsi_probe_cache_mutex); 2713 } 2714 2715 2716 /* 2717 * Function: sd_scsi_clear_probe_cache 2718 * 2719 * Description: This routine clears the probe response cache. This is 2720 * done when open() returns ENXIO so that when deferred 2721 * attach is attempted (possibly after a device has been 2722 * turned on) we will retry the probe. Since we don't know 2723 * which target we failed to open, we just clear the 2724 * entire cache. 2725 * 2726 * Context: Kernel thread context 2727 */ 2728 2729 static void 2730 sd_scsi_clear_probe_cache(void) 2731 { 2732 struct sd_scsi_probe_cache *cp; 2733 int i; 2734 2735 mutex_enter(&sd_scsi_probe_cache_mutex); 2736 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2737 /* 2738 * Reset all entries to SCSIPROBE_EXISTS. This will 2739 * force probing to be performed the next time 2740 * sd_scsi_probe_with_cache is called. 2741 */ 2742 for (i = 0; i < NTARGETS_WIDE; i++) { 2743 cp->cache[i] = SCSIPROBE_EXISTS; 2744 } 2745 } 2746 mutex_exit(&sd_scsi_probe_cache_mutex); 2747 } 2748 2749 2750 /* 2751 * Function: sd_scsi_probe_with_cache 2752 * 2753 * Description: This routine implements support for a scsi device probe 2754 * with cache. The driver maintains a cache of the target 2755 * responses to scsi probes. If we get no response from a 2756 * target during a probe inquiry, we remember that, and we 2757 * avoid additional calls to scsi_probe on non-zero LUNs 2758 * on the same target until the cache is cleared. By doing 2759 * so we avoid the 1/4 sec selection timeout for nonzero 2760 * LUNs. lun0 of a target is always probed. 2761 * 2762 * Arguments: devp - Pointer to a scsi_device(9S) structure 2763 * waitfunc - indicates what the allocator routines should 2764 * do when resources are not available. This value 2765 * is passed on to scsi_probe() when that routine 2766 * is called. 2767 * 2768 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2769 * otherwise the value returned by scsi_probe(9F). 2770 * 2771 * Context: Kernel thread context 2772 */ 2773 2774 static int 2775 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2776 { 2777 struct sd_scsi_probe_cache *cp; 2778 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2779 int lun, tgt; 2780 2781 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2782 SCSI_ADDR_PROP_LUN, 0); 2783 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2784 SCSI_ADDR_PROP_TARGET, -1); 2785 2786 /* Make sure caching enabled and target in range */ 2787 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2788 /* do it the old way (no cache) */ 2789 return (scsi_probe(devp, waitfn)); 2790 } 2791 2792 mutex_enter(&sd_scsi_probe_cache_mutex); 2793 2794 /* Find the cache for this scsi bus instance */ 2795 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2796 if (cp->pdip == pdip) { 2797 break; 2798 } 2799 } 2800 2801 /* If we can't find a cache for this pdip, create one */ 2802 if (cp == NULL) { 2803 int i; 2804 2805 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2806 KM_SLEEP); 2807 cp->pdip = pdip; 2808 cp->next = sd_scsi_probe_cache_head; 2809 sd_scsi_probe_cache_head = cp; 2810 for (i = 0; i < NTARGETS_WIDE; i++) { 2811 cp->cache[i] = SCSIPROBE_EXISTS; 2812 } 2813 } 2814 2815 mutex_exit(&sd_scsi_probe_cache_mutex); 2816 2817 /* Recompute the cache for this target if LUN zero */ 2818 if (lun == 0) { 2819 cp->cache[tgt] = SCSIPROBE_EXISTS; 2820 } 2821 2822 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2823 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2824 return (SCSIPROBE_NORESP); 2825 } 2826 2827 /* Do the actual probe; save & return the result */ 2828 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2829 } 2830 2831 2832 /* 2833 * Function: sd_scsi_target_lun_init 2834 * 2835 * Description: Initializes the attached lun chain mutex and head pointer. 2836 * 2837 * Context: Kernel thread context 2838 */ 2839 2840 static void 2841 sd_scsi_target_lun_init(void) 2842 { 2843 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2844 sd_scsi_target_lun_head = NULL; 2845 } 2846 2847 2848 /* 2849 * Function: sd_scsi_target_lun_fini 2850 * 2851 * Description: Frees all resources associated with the attached lun 2852 * chain 2853 * 2854 * Context: Kernel thread context 2855 */ 2856 2857 static void 2858 sd_scsi_target_lun_fini(void) 2859 { 2860 struct sd_scsi_hba_tgt_lun *cp; 2861 struct sd_scsi_hba_tgt_lun *ncp; 2862 2863 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2864 ncp = cp->next; 2865 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2866 } 2867 sd_scsi_target_lun_head = NULL; 2868 mutex_destroy(&sd_scsi_target_lun_mutex); 2869 } 2870 2871 2872 /* 2873 * Function: sd_scsi_get_target_lun_count 2874 * 2875 * Description: This routine will check in the attached lun chain to see 2876 * how many luns are attached on the required SCSI controller 2877 * and target. Currently, some capabilities like tagged queue 2878 * are supported per target based by HBA. So all luns in a 2879 * target have the same capabilities. Based on this assumption, 2880 * sd should only set these capabilities once per target. This 2881 * function is called when sd needs to decide how many luns 2882 * already attached on a target. 2883 * 2884 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2885 * controller device. 2886 * target - The target ID on the controller's SCSI bus. 2887 * 2888 * Return Code: The number of luns attached on the required target and 2889 * controller. 2890 * -1 if target ID is not in parallel SCSI scope or the given 2891 * dip is not in the chain. 2892 * 2893 * Context: Kernel thread context 2894 */ 2895 2896 static int 2897 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2898 { 2899 struct sd_scsi_hba_tgt_lun *cp; 2900 2901 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2902 return (-1); 2903 } 2904 2905 mutex_enter(&sd_scsi_target_lun_mutex); 2906 2907 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2908 if (cp->pdip == dip) { 2909 break; 2910 } 2911 } 2912 2913 mutex_exit(&sd_scsi_target_lun_mutex); 2914 2915 if (cp == NULL) { 2916 return (-1); 2917 } 2918 2919 return (cp->nlun[target]); 2920 } 2921 2922 2923 /* 2924 * Function: sd_scsi_update_lun_on_target 2925 * 2926 * Description: This routine is used to update the attached lun chain when a 2927 * lun is attached or detached on a target. 2928 * 2929 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2930 * controller device. 2931 * target - The target ID on the controller's SCSI bus. 2932 * flag - Indicate the lun is attached or detached. 2933 * 2934 * Context: Kernel thread context 2935 */ 2936 2937 static void 2938 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2939 { 2940 struct sd_scsi_hba_tgt_lun *cp; 2941 2942 mutex_enter(&sd_scsi_target_lun_mutex); 2943 2944 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2945 if (cp->pdip == dip) { 2946 break; 2947 } 2948 } 2949 2950 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2951 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2952 KM_SLEEP); 2953 cp->pdip = dip; 2954 cp->next = sd_scsi_target_lun_head; 2955 sd_scsi_target_lun_head = cp; 2956 } 2957 2958 mutex_exit(&sd_scsi_target_lun_mutex); 2959 2960 if (cp != NULL) { 2961 if (flag == SD_SCSI_LUN_ATTACH) { 2962 cp->nlun[target] ++; 2963 } else { 2964 cp->nlun[target] --; 2965 } 2966 } 2967 } 2968 2969 2970 /* 2971 * Function: sd_spin_up_unit 2972 * 2973 * Description: Issues the following commands to spin-up the device: 2974 * START STOP UNIT, and INQUIRY. 2975 * 2976 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 2977 * structure for this target. 2978 * 2979 * Return Code: 0 - success 2980 * EIO - failure 2981 * EACCES - reservation conflict 2982 * 2983 * Context: Kernel thread context 2984 */ 2985 2986 static int 2987 sd_spin_up_unit(sd_ssc_t *ssc) 2988 { 2989 size_t resid = 0; 2990 int has_conflict = FALSE; 2991 uchar_t *bufaddr; 2992 int status; 2993 struct sd_lun *un; 2994 2995 ASSERT(ssc != NULL); 2996 un = ssc->ssc_un; 2997 ASSERT(un != NULL); 2998 2999 /* 3000 * Send a throwaway START UNIT command. 3001 * 3002 * If we fail on this, we don't care presently what precisely 3003 * is wrong. EMC's arrays will also fail this with a check 3004 * condition (0x2/0x4/0x3) if the device is "inactive," but 3005 * we don't want to fail the attach because it may become 3006 * "active" later. 3007 */ 3008 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3009 SD_PATH_DIRECT); 3010 3011 if (status != 0) { 3012 if (status == EACCES) 3013 has_conflict = TRUE; 3014 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3015 } 3016 3017 /* 3018 * Send another INQUIRY command to the target. This is necessary for 3019 * non-removable media direct access devices because their INQUIRY data 3020 * may not be fully qualified until they are spun up (perhaps via the 3021 * START command above). Note: This seems to be needed for some 3022 * legacy devices only.) The INQUIRY command should succeed even if a 3023 * Reservation Conflict is present. 3024 */ 3025 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3026 3027 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3028 != 0) { 3029 kmem_free(bufaddr, SUN_INQSIZE); 3030 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3031 return (EIO); 3032 } 3033 3034 /* 3035 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3036 * Note that this routine does not return a failure here even if the 3037 * INQUIRY command did not return any data. This is a legacy behavior. 3038 */ 3039 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3040 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3041 } 3042 3043 kmem_free(bufaddr, SUN_INQSIZE); 3044 3045 /* If we hit a reservation conflict above, tell the caller. */ 3046 if (has_conflict == TRUE) { 3047 return (EACCES); 3048 } 3049 3050 return (0); 3051 } 3052 3053 #ifdef _LP64 3054 /* 3055 * Function: sd_enable_descr_sense 3056 * 3057 * Description: This routine attempts to select descriptor sense format 3058 * using the Control mode page. Devices that support 64 bit 3059 * LBAs (for >2TB luns) should also implement descriptor 3060 * sense data so we will call this function whenever we see 3061 * a lun larger than 2TB. If for some reason the device 3062 * supports 64 bit LBAs but doesn't support descriptor sense 3063 * presumably the mode select will fail. Everything will 3064 * continue to work normally except that we will not get 3065 * complete sense data for commands that fail with an LBA 3066 * larger than 32 bits. 3067 * 3068 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3069 * structure for this target. 3070 * 3071 * Context: Kernel thread context only 3072 */ 3073 3074 static void 3075 sd_enable_descr_sense(sd_ssc_t *ssc) 3076 { 3077 uchar_t *header; 3078 struct mode_control_scsi3 *ctrl_bufp; 3079 size_t buflen; 3080 size_t bd_len; 3081 int status; 3082 struct sd_lun *un; 3083 3084 ASSERT(ssc != NULL); 3085 un = ssc->ssc_un; 3086 ASSERT(un != NULL); 3087 3088 /* 3089 * Read MODE SENSE page 0xA, Control Mode Page 3090 */ 3091 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3092 sizeof (struct mode_control_scsi3); 3093 header = kmem_zalloc(buflen, KM_SLEEP); 3094 3095 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3096 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3097 3098 if (status != 0) { 3099 SD_ERROR(SD_LOG_COMMON, un, 3100 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3101 goto eds_exit; 3102 } 3103 3104 /* 3105 * Determine size of Block Descriptors in order to locate 3106 * the mode page data. ATAPI devices return 0, SCSI devices 3107 * should return MODE_BLK_DESC_LENGTH. 3108 */ 3109 bd_len = ((struct mode_header *)header)->bdesc_length; 3110 3111 /* Clear the mode data length field for MODE SELECT */ 3112 ((struct mode_header *)header)->length = 0; 3113 3114 ctrl_bufp = (struct mode_control_scsi3 *) 3115 (header + MODE_HEADER_LENGTH + bd_len); 3116 3117 /* 3118 * If the page length is smaller than the expected value, 3119 * the target device doesn't support D_SENSE. Bail out here. 3120 */ 3121 if (ctrl_bufp->mode_page.length < 3122 sizeof (struct mode_control_scsi3) - 2) { 3123 SD_ERROR(SD_LOG_COMMON, un, 3124 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3125 goto eds_exit; 3126 } 3127 3128 /* 3129 * Clear PS bit for MODE SELECT 3130 */ 3131 ctrl_bufp->mode_page.ps = 0; 3132 3133 /* 3134 * Set D_SENSE to enable descriptor sense format. 3135 */ 3136 ctrl_bufp->d_sense = 1; 3137 3138 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3139 3140 /* 3141 * Use MODE SELECT to commit the change to the D_SENSE bit 3142 */ 3143 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3144 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3145 3146 if (status != 0) { 3147 SD_INFO(SD_LOG_COMMON, un, 3148 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3149 } else { 3150 kmem_free(header, buflen); 3151 return; 3152 } 3153 3154 eds_exit: 3155 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3156 kmem_free(header, buflen); 3157 } 3158 3159 /* 3160 * Function: sd_reenable_dsense_task 3161 * 3162 * Description: Re-enable descriptor sense after device or bus reset 3163 * 3164 * Context: Executes in a taskq() thread context 3165 */ 3166 static void 3167 sd_reenable_dsense_task(void *arg) 3168 { 3169 struct sd_lun *un = arg; 3170 sd_ssc_t *ssc; 3171 3172 ASSERT(un != NULL); 3173 3174 ssc = sd_ssc_init(un); 3175 sd_enable_descr_sense(ssc); 3176 sd_ssc_fini(ssc); 3177 } 3178 #endif /* _LP64 */ 3179 3180 /* 3181 * Function: sd_set_mmc_caps 3182 * 3183 * Description: This routine determines if the device is MMC compliant and if 3184 * the device supports CDDA via a mode sense of the CDVD 3185 * capabilities mode page. Also checks if the device is a 3186 * dvdram writable device. 3187 * 3188 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3189 * structure for this target. 3190 * 3191 * Context: Kernel thread context only 3192 */ 3193 3194 static void 3195 sd_set_mmc_caps(sd_ssc_t *ssc) 3196 { 3197 struct mode_header_grp2 *sense_mhp; 3198 uchar_t *sense_page; 3199 caddr_t buf; 3200 int bd_len; 3201 int status; 3202 struct uscsi_cmd com; 3203 int rtn; 3204 uchar_t *out_data_rw, *out_data_hd; 3205 uchar_t *rqbuf_rw, *rqbuf_hd; 3206 struct sd_lun *un; 3207 3208 ASSERT(ssc != NULL); 3209 un = ssc->ssc_un; 3210 ASSERT(un != NULL); 3211 3212 /* 3213 * The flags which will be set in this function are - mmc compliant, 3214 * dvdram writable device, cdda support. Initialize them to FALSE 3215 * and if a capability is detected - it will be set to TRUE. 3216 */ 3217 un->un_f_mmc_cap = FALSE; 3218 un->un_f_dvdram_writable_device = FALSE; 3219 un->un_f_cfg_cdda = FALSE; 3220 3221 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3222 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3223 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3224 3225 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3226 3227 if (status != 0) { 3228 /* command failed; just return */ 3229 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3230 return; 3231 } 3232 /* 3233 * If the mode sense request for the CDROM CAPABILITIES 3234 * page (0x2A) succeeds the device is assumed to be MMC. 3235 */ 3236 un->un_f_mmc_cap = TRUE; 3237 3238 /* Get to the page data */ 3239 sense_mhp = (struct mode_header_grp2 *)buf; 3240 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3241 sense_mhp->bdesc_length_lo; 3242 if (bd_len > MODE_BLK_DESC_LENGTH) { 3243 /* 3244 * We did not get back the expected block descriptor 3245 * length so we cannot determine if the device supports 3246 * CDDA. However, we still indicate the device is MMC 3247 * according to the successful response to the page 3248 * 0x2A mode sense request. 3249 */ 3250 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3251 "sd_set_mmc_caps: Mode Sense returned " 3252 "invalid block descriptor length\n"); 3253 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3254 return; 3255 } 3256 3257 /* See if read CDDA is supported */ 3258 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3259 bd_len); 3260 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3261 3262 /* See if writing DVD RAM is supported. */ 3263 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3264 if (un->un_f_dvdram_writable_device == TRUE) { 3265 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3266 return; 3267 } 3268 3269 /* 3270 * If the device presents DVD or CD capabilities in the mode 3271 * page, we can return here since a RRD will not have 3272 * these capabilities. 3273 */ 3274 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3275 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3276 return; 3277 } 3278 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3279 3280 /* 3281 * If un->un_f_dvdram_writable_device is still FALSE, 3282 * check for a Removable Rigid Disk (RRD). A RRD 3283 * device is identified by the features RANDOM_WRITABLE and 3284 * HARDWARE_DEFECT_MANAGEMENT. 3285 */ 3286 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3287 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3288 3289 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3290 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3291 RANDOM_WRITABLE, SD_PATH_STANDARD); 3292 3293 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3294 3295 if (rtn != 0) { 3296 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3297 kmem_free(rqbuf_rw, SENSE_LENGTH); 3298 return; 3299 } 3300 3301 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3302 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3303 3304 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3305 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3306 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3307 3308 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3309 3310 if (rtn == 0) { 3311 /* 3312 * We have good information, check for random writable 3313 * and hardware defect features. 3314 */ 3315 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3316 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3317 un->un_f_dvdram_writable_device = TRUE; 3318 } 3319 } 3320 3321 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3322 kmem_free(rqbuf_rw, SENSE_LENGTH); 3323 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3324 kmem_free(rqbuf_hd, SENSE_LENGTH); 3325 } 3326 3327 /* 3328 * Function: sd_check_for_writable_cd 3329 * 3330 * Description: This routine determines if the media in the device is 3331 * writable or not. It uses the get configuration command (0x46) 3332 * to determine if the media is writable 3333 * 3334 * Arguments: un - driver soft state (unit) structure 3335 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3336 * chain and the normal command waitq, or 3337 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3338 * "direct" chain and bypass the normal command 3339 * waitq. 3340 * 3341 * Context: Never called at interrupt context. 3342 */ 3343 3344 static void 3345 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3346 { 3347 struct uscsi_cmd com; 3348 uchar_t *out_data; 3349 uchar_t *rqbuf; 3350 int rtn; 3351 uchar_t *out_data_rw, *out_data_hd; 3352 uchar_t *rqbuf_rw, *rqbuf_hd; 3353 struct mode_header_grp2 *sense_mhp; 3354 uchar_t *sense_page; 3355 caddr_t buf; 3356 int bd_len; 3357 int status; 3358 struct sd_lun *un; 3359 3360 ASSERT(ssc != NULL); 3361 un = ssc->ssc_un; 3362 ASSERT(un != NULL); 3363 ASSERT(mutex_owned(SD_MUTEX(un))); 3364 3365 /* 3366 * Initialize the writable media to false, if configuration info. 3367 * tells us otherwise then only we will set it. 3368 */ 3369 un->un_f_mmc_writable_media = FALSE; 3370 mutex_exit(SD_MUTEX(un)); 3371 3372 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3373 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3374 3375 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3376 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3377 3378 if (rtn != 0) 3379 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3380 3381 mutex_enter(SD_MUTEX(un)); 3382 if (rtn == 0) { 3383 /* 3384 * We have good information, check for writable DVD. 3385 */ 3386 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3387 un->un_f_mmc_writable_media = TRUE; 3388 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3389 kmem_free(rqbuf, SENSE_LENGTH); 3390 return; 3391 } 3392 } 3393 3394 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3395 kmem_free(rqbuf, SENSE_LENGTH); 3396 3397 /* 3398 * Determine if this is a RRD type device. 3399 */ 3400 mutex_exit(SD_MUTEX(un)); 3401 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3402 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3403 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3404 3405 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3406 3407 mutex_enter(SD_MUTEX(un)); 3408 if (status != 0) { 3409 /* command failed; just return */ 3410 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3411 return; 3412 } 3413 3414 /* Get to the page data */ 3415 sense_mhp = (struct mode_header_grp2 *)buf; 3416 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3417 if (bd_len > MODE_BLK_DESC_LENGTH) { 3418 /* 3419 * We did not get back the expected block descriptor length so 3420 * we cannot check the mode page. 3421 */ 3422 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3423 "sd_check_for_writable_cd: Mode Sense returned " 3424 "invalid block descriptor length\n"); 3425 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3426 return; 3427 } 3428 3429 /* 3430 * If the device presents DVD or CD capabilities in the mode 3431 * page, we can return here since a RRD device will not have 3432 * these capabilities. 3433 */ 3434 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3435 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3436 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3437 return; 3438 } 3439 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3440 3441 /* 3442 * If un->un_f_mmc_writable_media is still FALSE, 3443 * check for RRD type media. A RRD device is identified 3444 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3445 */ 3446 mutex_exit(SD_MUTEX(un)); 3447 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3448 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3449 3450 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3451 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3452 RANDOM_WRITABLE, path_flag); 3453 3454 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3455 if (rtn != 0) { 3456 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3457 kmem_free(rqbuf_rw, SENSE_LENGTH); 3458 mutex_enter(SD_MUTEX(un)); 3459 return; 3460 } 3461 3462 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3463 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3464 3465 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3466 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3467 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3468 3469 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3470 mutex_enter(SD_MUTEX(un)); 3471 if (rtn == 0) { 3472 /* 3473 * We have good information, check for random writable 3474 * and hardware defect features as current. 3475 */ 3476 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3477 (out_data_rw[10] & 0x1) && 3478 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3479 (out_data_hd[10] & 0x1)) { 3480 un->un_f_mmc_writable_media = TRUE; 3481 } 3482 } 3483 3484 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3485 kmem_free(rqbuf_rw, SENSE_LENGTH); 3486 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3487 kmem_free(rqbuf_hd, SENSE_LENGTH); 3488 } 3489 3490 /* 3491 * Function: sd_read_unit_properties 3492 * 3493 * Description: The following implements a property lookup mechanism. 3494 * Properties for particular disks (keyed on vendor, model 3495 * and rev numbers) are sought in the sd.conf file via 3496 * sd_process_sdconf_file(), and if not found there, are 3497 * looked for in a list hardcoded in this driver via 3498 * sd_process_sdconf_table() Once located the properties 3499 * are used to update the driver unit structure. 3500 * 3501 * Arguments: un - driver soft state (unit) structure 3502 */ 3503 3504 static void 3505 sd_read_unit_properties(struct sd_lun *un) 3506 { 3507 /* 3508 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3509 * the "sd-config-list" property (from the sd.conf file) or if 3510 * there was not a match for the inquiry vid/pid. If this event 3511 * occurs the static driver configuration table is searched for 3512 * a match. 3513 */ 3514 ASSERT(un != NULL); 3515 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3516 sd_process_sdconf_table(un); 3517 } 3518 3519 /* check for LSI device */ 3520 sd_is_lsi(un); 3521 3522 3523 } 3524 3525 3526 /* 3527 * Function: sd_process_sdconf_file 3528 * 3529 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3530 * driver's config file (ie, sd.conf) and update the driver 3531 * soft state structure accordingly. 3532 * 3533 * Arguments: un - driver soft state (unit) structure 3534 * 3535 * Return Code: SD_SUCCESS - The properties were successfully set according 3536 * to the driver configuration file. 3537 * SD_FAILURE - The driver config list was not obtained or 3538 * there was no vid/pid match. This indicates that 3539 * the static config table should be used. 3540 * 3541 * The config file has a property, "sd-config-list". Currently we support 3542 * two kinds of formats. For both formats, the value of this property 3543 * is a list of duplets: 3544 * 3545 * sd-config-list= 3546 * <duplet>, 3547 * [,<duplet>]*; 3548 * 3549 * For the improved format, where 3550 * 3551 * <duplet>:= "<vid+pid>","<tunable-list>" 3552 * 3553 * and 3554 * 3555 * <tunable-list>:= <tunable> [, <tunable> ]*; 3556 * <tunable> = <name> : <value> 3557 * 3558 * The <vid+pid> is the string that is returned by the target device on a 3559 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3560 * to apply to all target devices with the specified <vid+pid>. 3561 * 3562 * Each <tunable> is a "<name> : <value>" pair. 3563 * 3564 * For the old format, the structure of each duplet is as follows: 3565 * 3566 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3567 * 3568 * The first entry of the duplet is the device ID string (the concatenated 3569 * vid & pid; not to be confused with a device_id). This is defined in 3570 * the same way as in the sd_disk_table. 3571 * 3572 * The second part of the duplet is a string that identifies a 3573 * data-property-name-list. The data-property-name-list is defined as 3574 * follows: 3575 * 3576 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3577 * 3578 * The syntax of <data-property-name> depends on the <version> field. 3579 * 3580 * If version = SD_CONF_VERSION_1 we have the following syntax: 3581 * 3582 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3583 * 3584 * where the prop0 value will be used to set prop0 if bit0 set in the 3585 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3586 * 3587 */ 3588 3589 static int 3590 sd_process_sdconf_file(struct sd_lun *un) 3591 { 3592 char **config_list = NULL; 3593 uint_t nelements; 3594 char *vidptr; 3595 int vidlen; 3596 char *dnlist_ptr; 3597 char *dataname_ptr; 3598 char *dataname_lasts; 3599 int *data_list = NULL; 3600 uint_t data_list_len; 3601 int rval = SD_FAILURE; 3602 int i; 3603 3604 ASSERT(un != NULL); 3605 3606 /* Obtain the configuration list associated with the .conf file */ 3607 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3608 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3609 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3610 return (SD_FAILURE); 3611 } 3612 3613 /* 3614 * Compare vids in each duplet to the inquiry vid - if a match is 3615 * made, get the data value and update the soft state structure 3616 * accordingly. 3617 * 3618 * Each duplet should show as a pair of strings, return SD_FAILURE 3619 * otherwise. 3620 */ 3621 if (nelements & 1) { 3622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3623 "sd-config-list should show as pairs of strings.\n"); 3624 if (config_list) 3625 ddi_prop_free(config_list); 3626 return (SD_FAILURE); 3627 } 3628 3629 for (i = 0; i < nelements; i += 2) { 3630 /* 3631 * Note: The assumption here is that each vid entry is on 3632 * a unique line from its associated duplet. 3633 */ 3634 vidptr = config_list[i]; 3635 vidlen = (int)strlen(vidptr); 3636 if ((vidlen == 0) || 3637 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3638 continue; 3639 } 3640 3641 /* 3642 * dnlist contains 1 or more blank separated 3643 * data-property-name entries 3644 */ 3645 dnlist_ptr = config_list[i + 1]; 3646 3647 if (strchr(dnlist_ptr, ':') != NULL) { 3648 /* 3649 * Decode the improved format sd-config-list. 3650 */ 3651 sd_nvpair_str_decode(un, dnlist_ptr); 3652 } else { 3653 /* 3654 * The old format sd-config-list, loop through all 3655 * data-property-name entries in the 3656 * data-property-name-list 3657 * setting the properties for each. 3658 */ 3659 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3660 &dataname_lasts); dataname_ptr != NULL; 3661 dataname_ptr = sd_strtok_r(NULL, " \t", 3662 &dataname_lasts)) { 3663 int version; 3664 3665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3666 "sd_process_sdconf_file: disk:%s, " 3667 "data:%s\n", vidptr, dataname_ptr); 3668 3669 /* Get the data list */ 3670 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3671 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3672 &data_list_len) != DDI_PROP_SUCCESS) { 3673 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3674 "sd_process_sdconf_file: data " 3675 "property (%s) has no value\n", 3676 dataname_ptr); 3677 continue; 3678 } 3679 3680 version = data_list[0]; 3681 3682 if (version == SD_CONF_VERSION_1) { 3683 sd_tunables values; 3684 3685 /* Set the properties */ 3686 if (sd_chk_vers1_data(un, data_list[1], 3687 &data_list[2], data_list_len, 3688 dataname_ptr) == SD_SUCCESS) { 3689 sd_get_tunables_from_conf(un, 3690 data_list[1], &data_list[2], 3691 &values); 3692 sd_set_vers1_properties(un, 3693 data_list[1], &values); 3694 rval = SD_SUCCESS; 3695 } else { 3696 rval = SD_FAILURE; 3697 } 3698 } else { 3699 scsi_log(SD_DEVINFO(un), sd_label, 3700 CE_WARN, "data property %s version " 3701 "0x%x is invalid.", 3702 dataname_ptr, version); 3703 rval = SD_FAILURE; 3704 } 3705 if (data_list) 3706 ddi_prop_free(data_list); 3707 } 3708 } 3709 } 3710 3711 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3712 if (config_list) { 3713 ddi_prop_free(config_list); 3714 } 3715 3716 return (rval); 3717 } 3718 3719 /* 3720 * Function: sd_nvpair_str_decode() 3721 * 3722 * Description: Parse the improved format sd-config-list to get 3723 * each entry of tunable, which includes a name-value pair. 3724 * Then call sd_set_properties() to set the property. 3725 * 3726 * Arguments: un - driver soft state (unit) structure 3727 * nvpair_str - the tunable list 3728 */ 3729 static void 3730 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3731 { 3732 char *nv, *name, *value, *token; 3733 char *nv_lasts, *v_lasts, *x_lasts; 3734 3735 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3736 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3737 token = sd_strtok_r(nv, ":", &v_lasts); 3738 name = sd_strtok_r(token, " \t", &x_lasts); 3739 token = sd_strtok_r(NULL, ":", &v_lasts); 3740 value = sd_strtok_r(token, " \t", &x_lasts); 3741 if (name == NULL || value == NULL) { 3742 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3743 "sd_nvpair_str_decode: " 3744 "name or value is not valid!\n"); 3745 } else { 3746 sd_set_properties(un, name, value); 3747 } 3748 } 3749 } 3750 3751 /* 3752 * Function: sd_strtok_r() 3753 * 3754 * Description: This function uses strpbrk and strspn to break 3755 * string into tokens on sequentially subsequent calls. Return 3756 * NULL when no non-separator characters remain. The first 3757 * argument is NULL for subsequent calls. 3758 */ 3759 static char * 3760 sd_strtok_r(char *string, const char *sepset, char **lasts) 3761 { 3762 char *q, *r; 3763 3764 /* First or subsequent call */ 3765 if (string == NULL) 3766 string = *lasts; 3767 3768 if (string == NULL) 3769 return (NULL); 3770 3771 /* Skip leading separators */ 3772 q = string + strspn(string, sepset); 3773 3774 if (*q == '\0') 3775 return (NULL); 3776 3777 if ((r = strpbrk(q, sepset)) == NULL) 3778 *lasts = NULL; 3779 else { 3780 *r = '\0'; 3781 *lasts = r + 1; 3782 } 3783 return (q); 3784 } 3785 3786 /* 3787 * Function: sd_set_properties() 3788 * 3789 * Description: Set device properties based on the improved 3790 * format sd-config-list. 3791 * 3792 * Arguments: un - driver soft state (unit) structure 3793 * name - supported tunable name 3794 * value - tunable value 3795 */ 3796 static void 3797 sd_set_properties(struct sd_lun *un, char *name, char *value) 3798 { 3799 char *endptr = NULL; 3800 long val = 0; 3801 3802 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3803 if (strcasecmp(value, "true") == 0) { 3804 un->un_f_suppress_cache_flush = TRUE; 3805 } else if (strcasecmp(value, "false") == 0) { 3806 un->un_f_suppress_cache_flush = FALSE; 3807 } else { 3808 goto value_invalid; 3809 } 3810 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3811 "suppress_cache_flush flag set to %d\n", 3812 un->un_f_suppress_cache_flush); 3813 return; 3814 } 3815 3816 if (strcasecmp(name, "controller-type") == 0) { 3817 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3818 un->un_ctype = val; 3819 } else { 3820 goto value_invalid; 3821 } 3822 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3823 "ctype set to %d\n", un->un_ctype); 3824 return; 3825 } 3826 3827 if (strcasecmp(name, "delay-busy") == 0) { 3828 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3829 un->un_busy_timeout = drv_usectohz(val / 1000); 3830 } else { 3831 goto value_invalid; 3832 } 3833 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3834 "busy_timeout set to %d\n", un->un_busy_timeout); 3835 return; 3836 } 3837 3838 if (strcasecmp(name, "disksort") == 0) { 3839 if (strcasecmp(value, "true") == 0) { 3840 un->un_f_disksort_disabled = FALSE; 3841 } else if (strcasecmp(value, "false") == 0) { 3842 un->un_f_disksort_disabled = TRUE; 3843 } else { 3844 goto value_invalid; 3845 } 3846 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3847 "disksort disabled flag set to %d\n", 3848 un->un_f_disksort_disabled); 3849 return; 3850 } 3851 3852 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3853 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3854 un->un_reserve_release_time = val; 3855 } else { 3856 goto value_invalid; 3857 } 3858 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3859 "reservation release timeout set to %d\n", 3860 un->un_reserve_release_time); 3861 return; 3862 } 3863 3864 if (strcasecmp(name, "reset-lun") == 0) { 3865 if (strcasecmp(value, "true") == 0) { 3866 un->un_f_lun_reset_enabled = TRUE; 3867 } else if (strcasecmp(value, "false") == 0) { 3868 un->un_f_lun_reset_enabled = FALSE; 3869 } else { 3870 goto value_invalid; 3871 } 3872 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3873 "lun reset enabled flag set to %d\n", 3874 un->un_f_lun_reset_enabled); 3875 return; 3876 } 3877 3878 if (strcasecmp(name, "retries-busy") == 0) { 3879 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3880 un->un_busy_retry_count = val; 3881 } else { 3882 goto value_invalid; 3883 } 3884 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3885 "busy retry count set to %d\n", un->un_busy_retry_count); 3886 return; 3887 } 3888 3889 if (strcasecmp(name, "retries-timeout") == 0) { 3890 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3891 un->un_retry_count = val; 3892 } else { 3893 goto value_invalid; 3894 } 3895 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3896 "timeout retry count set to %d\n", un->un_retry_count); 3897 return; 3898 } 3899 3900 if (strcasecmp(name, "retries-notready") == 0) { 3901 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3902 un->un_notready_retry_count = val; 3903 } else { 3904 goto value_invalid; 3905 } 3906 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3907 "notready retry count set to %d\n", 3908 un->un_notready_retry_count); 3909 return; 3910 } 3911 3912 if (strcasecmp(name, "retries-reset") == 0) { 3913 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3914 un->un_reset_retry_count = val; 3915 } else { 3916 goto value_invalid; 3917 } 3918 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3919 "reset retry count set to %d\n", 3920 un->un_reset_retry_count); 3921 return; 3922 } 3923 3924 if (strcasecmp(name, "throttle-max") == 0) { 3925 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3926 un->un_saved_throttle = un->un_throttle = val; 3927 } else { 3928 goto value_invalid; 3929 } 3930 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3931 "throttle set to %d\n", un->un_throttle); 3932 } 3933 3934 if (strcasecmp(name, "throttle-min") == 0) { 3935 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3936 un->un_min_throttle = val; 3937 } else { 3938 goto value_invalid; 3939 } 3940 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3941 "min throttle set to %d\n", un->un_min_throttle); 3942 } 3943 3944 /* 3945 * Validate the throttle values. 3946 * If any of the numbers are invalid, set everything to defaults. 3947 */ 3948 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3949 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3950 (un->un_min_throttle > un->un_throttle)) { 3951 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3952 un->un_min_throttle = sd_min_throttle; 3953 } 3954 return; 3955 3956 value_invalid: 3957 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3958 "value of prop %s is invalid\n", name); 3959 } 3960 3961 /* 3962 * Function: sd_get_tunables_from_conf() 3963 * 3964 * 3965 * This function reads the data list from the sd.conf file and pulls 3966 * the values that can have numeric values as arguments and places 3967 * the values in the appropriate sd_tunables member. 3968 * Since the order of the data list members varies across platforms 3969 * This function reads them from the data list in a platform specific 3970 * order and places them into the correct sd_tunable member that is 3971 * consistent across all platforms. 3972 */ 3973 static void 3974 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3975 sd_tunables *values) 3976 { 3977 int i; 3978 int mask; 3979 3980 bzero(values, sizeof (sd_tunables)); 3981 3982 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3983 3984 mask = 1 << i; 3985 if (mask > flags) { 3986 break; 3987 } 3988 3989 switch (mask & flags) { 3990 case 0: /* This mask bit not set in flags */ 3991 continue; 3992 case SD_CONF_BSET_THROTTLE: 3993 values->sdt_throttle = data_list[i]; 3994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3995 "sd_get_tunables_from_conf: throttle = %d\n", 3996 values->sdt_throttle); 3997 break; 3998 case SD_CONF_BSET_CTYPE: 3999 values->sdt_ctype = data_list[i]; 4000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4001 "sd_get_tunables_from_conf: ctype = %d\n", 4002 values->sdt_ctype); 4003 break; 4004 case SD_CONF_BSET_NRR_COUNT: 4005 values->sdt_not_rdy_retries = data_list[i]; 4006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4007 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4008 values->sdt_not_rdy_retries); 4009 break; 4010 case SD_CONF_BSET_BSY_RETRY_COUNT: 4011 values->sdt_busy_retries = data_list[i]; 4012 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4013 "sd_get_tunables_from_conf: busy_retries = %d\n", 4014 values->sdt_busy_retries); 4015 break; 4016 case SD_CONF_BSET_RST_RETRIES: 4017 values->sdt_reset_retries = data_list[i]; 4018 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4019 "sd_get_tunables_from_conf: reset_retries = %d\n", 4020 values->sdt_reset_retries); 4021 break; 4022 case SD_CONF_BSET_RSV_REL_TIME: 4023 values->sdt_reserv_rel_time = data_list[i]; 4024 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4025 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4026 values->sdt_reserv_rel_time); 4027 break; 4028 case SD_CONF_BSET_MIN_THROTTLE: 4029 values->sdt_min_throttle = data_list[i]; 4030 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4031 "sd_get_tunables_from_conf: min_throttle = %d\n", 4032 values->sdt_min_throttle); 4033 break; 4034 case SD_CONF_BSET_DISKSORT_DISABLED: 4035 values->sdt_disk_sort_dis = data_list[i]; 4036 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4037 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4038 values->sdt_disk_sort_dis); 4039 break; 4040 case SD_CONF_BSET_LUN_RESET_ENABLED: 4041 values->sdt_lun_reset_enable = data_list[i]; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4044 "\n", values->sdt_lun_reset_enable); 4045 break; 4046 case SD_CONF_BSET_CACHE_IS_NV: 4047 values->sdt_suppress_cache_flush = data_list[i]; 4048 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4049 "sd_get_tunables_from_conf: \ 4050 suppress_cache_flush = %d" 4051 "\n", values->sdt_suppress_cache_flush); 4052 break; 4053 } 4054 } 4055 } 4056 4057 /* 4058 * Function: sd_process_sdconf_table 4059 * 4060 * Description: Search the static configuration table for a match on the 4061 * inquiry vid/pid and update the driver soft state structure 4062 * according to the table property values for the device. 4063 * 4064 * The form of a configuration table entry is: 4065 * <vid+pid>,<flags>,<property-data> 4066 * "SEAGATE ST42400N",1,0x40000, 4067 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4068 * 4069 * Arguments: un - driver soft state (unit) structure 4070 */ 4071 4072 static void 4073 sd_process_sdconf_table(struct sd_lun *un) 4074 { 4075 char *id = NULL; 4076 int table_index; 4077 int idlen; 4078 4079 ASSERT(un != NULL); 4080 for (table_index = 0; table_index < sd_disk_table_size; 4081 table_index++) { 4082 id = sd_disk_table[table_index].device_id; 4083 idlen = strlen(id); 4084 if (idlen == 0) { 4085 continue; 4086 } 4087 4088 /* 4089 * The static configuration table currently does not 4090 * implement version 10 properties. Additionally, 4091 * multiple data-property-name entries are not 4092 * implemented in the static configuration table. 4093 */ 4094 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_process_sdconf_table: disk %s\n", id); 4097 sd_set_vers1_properties(un, 4098 sd_disk_table[table_index].flags, 4099 sd_disk_table[table_index].properties); 4100 break; 4101 } 4102 } 4103 } 4104 4105 4106 /* 4107 * Function: sd_sdconf_id_match 4108 * 4109 * Description: This local function implements a case sensitive vid/pid 4110 * comparison as well as the boundary cases of wild card and 4111 * multiple blanks. 4112 * 4113 * Note: An implicit assumption made here is that the scsi 4114 * inquiry structure will always keep the vid, pid and 4115 * revision strings in consecutive sequence, so they can be 4116 * read as a single string. If this assumption is not the 4117 * case, a separate string, to be used for the check, needs 4118 * to be built with these strings concatenated. 4119 * 4120 * Arguments: un - driver soft state (unit) structure 4121 * id - table or config file vid/pid 4122 * idlen - length of the vid/pid (bytes) 4123 * 4124 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4125 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4126 */ 4127 4128 static int 4129 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4130 { 4131 struct scsi_inquiry *sd_inq; 4132 int rval = SD_SUCCESS; 4133 4134 ASSERT(un != NULL); 4135 sd_inq = un->un_sd->sd_inq; 4136 ASSERT(id != NULL); 4137 4138 /* 4139 * We use the inq_vid as a pointer to a buffer containing the 4140 * vid and pid and use the entire vid/pid length of the table 4141 * entry for the comparison. This works because the inq_pid 4142 * data member follows inq_vid in the scsi_inquiry structure. 4143 */ 4144 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4145 /* 4146 * The user id string is compared to the inquiry vid/pid 4147 * using a case insensitive comparison and ignoring 4148 * multiple spaces. 4149 */ 4150 rval = sd_blank_cmp(un, id, idlen); 4151 if (rval != SD_SUCCESS) { 4152 /* 4153 * User id strings that start and end with a "*" 4154 * are a special case. These do not have a 4155 * specific vendor, and the product string can 4156 * appear anywhere in the 16 byte PID portion of 4157 * the inquiry data. This is a simple strstr() 4158 * type search for the user id in the inquiry data. 4159 */ 4160 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4161 char *pidptr = &id[1]; 4162 int i; 4163 int j; 4164 int pidstrlen = idlen - 2; 4165 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4166 pidstrlen; 4167 4168 if (j < 0) { 4169 return (SD_FAILURE); 4170 } 4171 for (i = 0; i < j; i++) { 4172 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4173 pidptr, pidstrlen) == 0) { 4174 rval = SD_SUCCESS; 4175 break; 4176 } 4177 } 4178 } 4179 } 4180 } 4181 return (rval); 4182 } 4183 4184 4185 /* 4186 * Function: sd_blank_cmp 4187 * 4188 * Description: If the id string starts and ends with a space, treat 4189 * multiple consecutive spaces as equivalent to a single 4190 * space. For example, this causes a sd_disk_table entry 4191 * of " NEC CDROM " to match a device's id string of 4192 * "NEC CDROM". 4193 * 4194 * Note: The success exit condition for this routine is if 4195 * the pointer to the table entry is '\0' and the cnt of 4196 * the inquiry length is zero. This will happen if the inquiry 4197 * string returned by the device is padded with spaces to be 4198 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4199 * SCSI spec states that the inquiry string is to be padded with 4200 * spaces. 4201 * 4202 * Arguments: un - driver soft state (unit) structure 4203 * id - table or config file vid/pid 4204 * idlen - length of the vid/pid (bytes) 4205 * 4206 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4207 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4208 */ 4209 4210 static int 4211 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4212 { 4213 char *p1; 4214 char *p2; 4215 int cnt; 4216 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4217 sizeof (SD_INQUIRY(un)->inq_pid); 4218 4219 ASSERT(un != NULL); 4220 p2 = un->un_sd->sd_inq->inq_vid; 4221 ASSERT(id != NULL); 4222 p1 = id; 4223 4224 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4225 /* 4226 * Note: string p1 is terminated by a NUL but string p2 4227 * isn't. The end of p2 is determined by cnt. 4228 */ 4229 for (;;) { 4230 /* skip over any extra blanks in both strings */ 4231 while ((*p1 != '\0') && (*p1 == ' ')) { 4232 p1++; 4233 } 4234 while ((cnt != 0) && (*p2 == ' ')) { 4235 p2++; 4236 cnt--; 4237 } 4238 4239 /* compare the two strings */ 4240 if ((cnt == 0) || 4241 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4242 break; 4243 } 4244 while ((cnt > 0) && 4245 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4246 p1++; 4247 p2++; 4248 cnt--; 4249 } 4250 } 4251 } 4252 4253 /* return SD_SUCCESS if both strings match */ 4254 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4255 } 4256 4257 4258 /* 4259 * Function: sd_chk_vers1_data 4260 * 4261 * Description: Verify the version 1 device properties provided by the 4262 * user via the configuration file 4263 * 4264 * Arguments: un - driver soft state (unit) structure 4265 * flags - integer mask indicating properties to be set 4266 * prop_list - integer list of property values 4267 * list_len - number of the elements 4268 * 4269 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4270 * SD_FAILURE - Indicates the user provided data is invalid 4271 */ 4272 4273 static int 4274 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4275 int list_len, char *dataname_ptr) 4276 { 4277 int i; 4278 int mask = 1; 4279 int index = 0; 4280 4281 ASSERT(un != NULL); 4282 4283 /* Check for a NULL property name and list */ 4284 if (dataname_ptr == NULL) { 4285 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4286 "sd_chk_vers1_data: NULL data property name."); 4287 return (SD_FAILURE); 4288 } 4289 if (prop_list == NULL) { 4290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4291 "sd_chk_vers1_data: %s NULL data property list.", 4292 dataname_ptr); 4293 return (SD_FAILURE); 4294 } 4295 4296 /* Display a warning if undefined bits are set in the flags */ 4297 if (flags & ~SD_CONF_BIT_MASK) { 4298 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4299 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4300 "Properties not set.", 4301 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4302 return (SD_FAILURE); 4303 } 4304 4305 /* 4306 * Verify the length of the list by identifying the highest bit set 4307 * in the flags and validating that the property list has a length 4308 * up to the index of this bit. 4309 */ 4310 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4311 if (flags & mask) { 4312 index++; 4313 } 4314 mask = 1 << i; 4315 } 4316 if (list_len < (index + 2)) { 4317 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4318 "sd_chk_vers1_data: " 4319 "Data property list %s size is incorrect. " 4320 "Properties not set.", dataname_ptr); 4321 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4322 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4323 return (SD_FAILURE); 4324 } 4325 return (SD_SUCCESS); 4326 } 4327 4328 4329 /* 4330 * Function: sd_set_vers1_properties 4331 * 4332 * Description: Set version 1 device properties based on a property list 4333 * retrieved from the driver configuration file or static 4334 * configuration table. Version 1 properties have the format: 4335 * 4336 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4337 * 4338 * where the prop0 value will be used to set prop0 if bit0 4339 * is set in the flags 4340 * 4341 * Arguments: un - driver soft state (unit) structure 4342 * flags - integer mask indicating properties to be set 4343 * prop_list - integer list of property values 4344 */ 4345 4346 static void 4347 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4348 { 4349 ASSERT(un != NULL); 4350 4351 /* 4352 * Set the flag to indicate cache is to be disabled. An attempt 4353 * to disable the cache via sd_cache_control() will be made 4354 * later during attach once the basic initialization is complete. 4355 */ 4356 if (flags & SD_CONF_BSET_NOCACHE) { 4357 un->un_f_opt_disable_cache = TRUE; 4358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4359 "sd_set_vers1_properties: caching disabled flag set\n"); 4360 } 4361 4362 /* CD-specific configuration parameters */ 4363 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4364 un->un_f_cfg_playmsf_bcd = TRUE; 4365 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4366 "sd_set_vers1_properties: playmsf_bcd set\n"); 4367 } 4368 if (flags & SD_CONF_BSET_READSUB_BCD) { 4369 un->un_f_cfg_readsub_bcd = TRUE; 4370 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4371 "sd_set_vers1_properties: readsub_bcd set\n"); 4372 } 4373 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4374 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4375 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4376 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4377 } 4378 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4379 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4380 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4381 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4382 } 4383 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4384 un->un_f_cfg_no_read_header = TRUE; 4385 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4386 "sd_set_vers1_properties: no_read_header set\n"); 4387 } 4388 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4389 un->un_f_cfg_read_cd_xd4 = TRUE; 4390 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4391 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4392 } 4393 4394 /* Support for devices which do not have valid/unique serial numbers */ 4395 if (flags & SD_CONF_BSET_FAB_DEVID) { 4396 un->un_f_opt_fab_devid = TRUE; 4397 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4398 "sd_set_vers1_properties: fab_devid bit set\n"); 4399 } 4400 4401 /* Support for user throttle configuration */ 4402 if (flags & SD_CONF_BSET_THROTTLE) { 4403 ASSERT(prop_list != NULL); 4404 un->un_saved_throttle = un->un_throttle = 4405 prop_list->sdt_throttle; 4406 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4407 "sd_set_vers1_properties: throttle set to %d\n", 4408 prop_list->sdt_throttle); 4409 } 4410 4411 /* Set the per disk retry count according to the conf file or table. */ 4412 if (flags & SD_CONF_BSET_NRR_COUNT) { 4413 ASSERT(prop_list != NULL); 4414 if (prop_list->sdt_not_rdy_retries) { 4415 un->un_notready_retry_count = 4416 prop_list->sdt_not_rdy_retries; 4417 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4418 "sd_set_vers1_properties: not ready retry count" 4419 " set to %d\n", un->un_notready_retry_count); 4420 } 4421 } 4422 4423 /* The controller type is reported for generic disk driver ioctls */ 4424 if (flags & SD_CONF_BSET_CTYPE) { 4425 ASSERT(prop_list != NULL); 4426 switch (prop_list->sdt_ctype) { 4427 case CTYPE_CDROM: 4428 un->un_ctype = prop_list->sdt_ctype; 4429 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4430 "sd_set_vers1_properties: ctype set to " 4431 "CTYPE_CDROM\n"); 4432 break; 4433 case CTYPE_CCS: 4434 un->un_ctype = prop_list->sdt_ctype; 4435 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4436 "sd_set_vers1_properties: ctype set to " 4437 "CTYPE_CCS\n"); 4438 break; 4439 case CTYPE_ROD: /* RW optical */ 4440 un->un_ctype = prop_list->sdt_ctype; 4441 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4442 "sd_set_vers1_properties: ctype set to " 4443 "CTYPE_ROD\n"); 4444 break; 4445 default: 4446 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4447 "sd_set_vers1_properties: Could not set " 4448 "invalid ctype value (%d)", 4449 prop_list->sdt_ctype); 4450 } 4451 } 4452 4453 /* Purple failover timeout */ 4454 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4455 ASSERT(prop_list != NULL); 4456 un->un_busy_retry_count = 4457 prop_list->sdt_busy_retries; 4458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4459 "sd_set_vers1_properties: " 4460 "busy retry count set to %d\n", 4461 un->un_busy_retry_count); 4462 } 4463 4464 /* Purple reset retry count */ 4465 if (flags & SD_CONF_BSET_RST_RETRIES) { 4466 ASSERT(prop_list != NULL); 4467 un->un_reset_retry_count = 4468 prop_list->sdt_reset_retries; 4469 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4470 "sd_set_vers1_properties: " 4471 "reset retry count set to %d\n", 4472 un->un_reset_retry_count); 4473 } 4474 4475 /* Purple reservation release timeout */ 4476 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4477 ASSERT(prop_list != NULL); 4478 un->un_reserve_release_time = 4479 prop_list->sdt_reserv_rel_time; 4480 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4481 "sd_set_vers1_properties: " 4482 "reservation release timeout set to %d\n", 4483 un->un_reserve_release_time); 4484 } 4485 4486 /* 4487 * Driver flag telling the driver to verify that no commands are pending 4488 * for a device before issuing a Test Unit Ready. This is a workaround 4489 * for a firmware bug in some Seagate eliteI drives. 4490 */ 4491 if (flags & SD_CONF_BSET_TUR_CHECK) { 4492 un->un_f_cfg_tur_check = TRUE; 4493 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4494 "sd_set_vers1_properties: tur queue check set\n"); 4495 } 4496 4497 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4498 un->un_min_throttle = prop_list->sdt_min_throttle; 4499 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4500 "sd_set_vers1_properties: min throttle set to %d\n", 4501 un->un_min_throttle); 4502 } 4503 4504 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4505 un->un_f_disksort_disabled = 4506 (prop_list->sdt_disk_sort_dis != 0) ? 4507 TRUE : FALSE; 4508 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4509 "sd_set_vers1_properties: disksort disabled " 4510 "flag set to %d\n", 4511 prop_list->sdt_disk_sort_dis); 4512 } 4513 4514 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4515 un->un_f_lun_reset_enabled = 4516 (prop_list->sdt_lun_reset_enable != 0) ? 4517 TRUE : FALSE; 4518 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4519 "sd_set_vers1_properties: lun reset enabled " 4520 "flag set to %d\n", 4521 prop_list->sdt_lun_reset_enable); 4522 } 4523 4524 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4525 un->un_f_suppress_cache_flush = 4526 (prop_list->sdt_suppress_cache_flush != 0) ? 4527 TRUE : FALSE; 4528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4529 "sd_set_vers1_properties: suppress_cache_flush " 4530 "flag set to %d\n", 4531 prop_list->sdt_suppress_cache_flush); 4532 } 4533 4534 /* 4535 * Validate the throttle values. 4536 * If any of the numbers are invalid, set everything to defaults. 4537 */ 4538 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4539 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4540 (un->un_min_throttle > un->un_throttle)) { 4541 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4542 un->un_min_throttle = sd_min_throttle; 4543 } 4544 } 4545 4546 /* 4547 * Function: sd_is_lsi() 4548 * 4549 * Description: Check for lsi devices, step through the static device 4550 * table to match vid/pid. 4551 * 4552 * Args: un - ptr to sd_lun 4553 * 4554 * Notes: When creating new LSI property, need to add the new LSI property 4555 * to this function. 4556 */ 4557 static void 4558 sd_is_lsi(struct sd_lun *un) 4559 { 4560 char *id = NULL; 4561 int table_index; 4562 int idlen; 4563 void *prop; 4564 4565 ASSERT(un != NULL); 4566 for (table_index = 0; table_index < sd_disk_table_size; 4567 table_index++) { 4568 id = sd_disk_table[table_index].device_id; 4569 idlen = strlen(id); 4570 if (idlen == 0) { 4571 continue; 4572 } 4573 4574 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4575 prop = sd_disk_table[table_index].properties; 4576 if (prop == &lsi_properties || 4577 prop == &lsi_oem_properties || 4578 prop == &lsi_properties_scsi || 4579 prop == &symbios_properties) { 4580 un->un_f_cfg_is_lsi = TRUE; 4581 } 4582 break; 4583 } 4584 } 4585 } 4586 4587 /* 4588 * Function: sd_get_physical_geometry 4589 * 4590 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4591 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4592 * target, and use this information to initialize the physical 4593 * geometry cache specified by pgeom_p. 4594 * 4595 * MODE SENSE is an optional command, so failure in this case 4596 * does not necessarily denote an error. We want to use the 4597 * MODE SENSE commands to derive the physical geometry of the 4598 * device, but if either command fails, the logical geometry is 4599 * used as the fallback for disk label geometry in cmlb. 4600 * 4601 * This requires that un->un_blockcount and un->un_tgt_blocksize 4602 * have already been initialized for the current target and 4603 * that the current values be passed as args so that we don't 4604 * end up ever trying to use -1 as a valid value. This could 4605 * happen if either value is reset while we're not holding 4606 * the mutex. 4607 * 4608 * Arguments: un - driver soft state (unit) structure 4609 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4610 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4611 * to use the USCSI "direct" chain and bypass the normal 4612 * command waitq. 4613 * 4614 * Context: Kernel thread only (can sleep). 4615 */ 4616 4617 static int 4618 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4619 diskaddr_t capacity, int lbasize, int path_flag) 4620 { 4621 struct mode_format *page3p; 4622 struct mode_geometry *page4p; 4623 struct mode_header *headerp; 4624 int sector_size; 4625 int nsect; 4626 int nhead; 4627 int ncyl; 4628 int intrlv; 4629 int spc; 4630 diskaddr_t modesense_capacity; 4631 int rpm; 4632 int bd_len; 4633 int mode_header_length; 4634 uchar_t *p3bufp; 4635 uchar_t *p4bufp; 4636 int cdbsize; 4637 int ret = EIO; 4638 sd_ssc_t *ssc; 4639 int status; 4640 4641 ASSERT(un != NULL); 4642 4643 if (lbasize == 0) { 4644 if (ISCD(un)) { 4645 lbasize = 2048; 4646 } else { 4647 lbasize = un->un_sys_blocksize; 4648 } 4649 } 4650 pgeom_p->g_secsize = (unsigned short)lbasize; 4651 4652 /* 4653 * If the unit is a cd/dvd drive MODE SENSE page three 4654 * and MODE SENSE page four are reserved (see SBC spec 4655 * and MMC spec). To prevent soft errors just return 4656 * using the default LBA size. 4657 */ 4658 if (ISCD(un)) 4659 return (ret); 4660 4661 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4662 4663 /* 4664 * Retrieve MODE SENSE page 3 - Format Device Page 4665 */ 4666 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4667 ssc = sd_ssc_init(un); 4668 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4669 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4670 if (status != 0) { 4671 SD_ERROR(SD_LOG_COMMON, un, 4672 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4673 goto page3_exit; 4674 } 4675 4676 /* 4677 * Determine size of Block Descriptors in order to locate the mode 4678 * page data. ATAPI devices return 0, SCSI devices should return 4679 * MODE_BLK_DESC_LENGTH. 4680 */ 4681 headerp = (struct mode_header *)p3bufp; 4682 if (un->un_f_cfg_is_atapi == TRUE) { 4683 struct mode_header_grp2 *mhp = 4684 (struct mode_header_grp2 *)headerp; 4685 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4686 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4687 } else { 4688 mode_header_length = MODE_HEADER_LENGTH; 4689 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4690 } 4691 4692 if (bd_len > MODE_BLK_DESC_LENGTH) { 4693 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4694 "received unexpected bd_len of %d, page3\n", bd_len); 4695 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4696 "sd_get_physical_geometry: received unexpected " 4697 "bd_len of %d, page3", bd_len); 4698 status = EIO; 4699 goto page3_exit; 4700 } 4701 4702 page3p = (struct mode_format *) 4703 ((caddr_t)headerp + mode_header_length + bd_len); 4704 4705 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4706 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4707 "mode sense pg3 code mismatch %d\n", 4708 page3p->mode_page.code); 4709 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4710 "sd_get_physical_geometry: mode sense pg3 code " 4711 "mismatch %d", page3p->mode_page.code); 4712 status = EIO; 4713 goto page3_exit; 4714 } 4715 4716 /* 4717 * Use this physical geometry data only if BOTH MODE SENSE commands 4718 * complete successfully; otherwise, revert to the logical geometry. 4719 * So, we need to save everything in temporary variables. 4720 */ 4721 sector_size = BE_16(page3p->data_bytes_sect); 4722 4723 /* 4724 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4725 */ 4726 if (sector_size == 0) { 4727 sector_size = un->un_sys_blocksize; 4728 } else { 4729 sector_size &= ~(un->un_sys_blocksize - 1); 4730 } 4731 4732 nsect = BE_16(page3p->sect_track); 4733 intrlv = BE_16(page3p->interleave); 4734 4735 SD_INFO(SD_LOG_COMMON, un, 4736 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4737 SD_INFO(SD_LOG_COMMON, un, 4738 " mode page: %d; nsect: %d; sector size: %d;\n", 4739 page3p->mode_page.code, nsect, sector_size); 4740 SD_INFO(SD_LOG_COMMON, un, 4741 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4742 BE_16(page3p->track_skew), 4743 BE_16(page3p->cylinder_skew)); 4744 4745 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4746 4747 /* 4748 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4749 */ 4750 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4751 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4752 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4753 if (status != 0) { 4754 SD_ERROR(SD_LOG_COMMON, un, 4755 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4756 goto page4_exit; 4757 } 4758 4759 /* 4760 * Determine size of Block Descriptors in order to locate the mode 4761 * page data. ATAPI devices return 0, SCSI devices should return 4762 * MODE_BLK_DESC_LENGTH. 4763 */ 4764 headerp = (struct mode_header *)p4bufp; 4765 if (un->un_f_cfg_is_atapi == TRUE) { 4766 struct mode_header_grp2 *mhp = 4767 (struct mode_header_grp2 *)headerp; 4768 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4769 } else { 4770 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4771 } 4772 4773 if (bd_len > MODE_BLK_DESC_LENGTH) { 4774 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4775 "received unexpected bd_len of %d, page4\n", bd_len); 4776 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4777 "sd_get_physical_geometry: received unexpected " 4778 "bd_len of %d, page4", bd_len); 4779 status = EIO; 4780 goto page4_exit; 4781 } 4782 4783 page4p = (struct mode_geometry *) 4784 ((caddr_t)headerp + mode_header_length + bd_len); 4785 4786 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4787 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4788 "mode sense pg4 code mismatch %d\n", 4789 page4p->mode_page.code); 4790 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4791 "sd_get_physical_geometry: mode sense pg4 code " 4792 "mismatch %d", page4p->mode_page.code); 4793 status = EIO; 4794 goto page4_exit; 4795 } 4796 4797 /* 4798 * Stash the data now, after we know that both commands completed. 4799 */ 4800 4801 4802 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4803 spc = nhead * nsect; 4804 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4805 rpm = BE_16(page4p->rpm); 4806 4807 modesense_capacity = spc * ncyl; 4808 4809 SD_INFO(SD_LOG_COMMON, un, 4810 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4811 SD_INFO(SD_LOG_COMMON, un, 4812 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4813 SD_INFO(SD_LOG_COMMON, un, 4814 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4815 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4816 (void *)pgeom_p, capacity); 4817 4818 /* 4819 * Compensate if the drive's geometry is not rectangular, i.e., 4820 * the product of C * H * S returned by MODE SENSE >= that returned 4821 * by read capacity. This is an idiosyncrasy of the original x86 4822 * disk subsystem. 4823 */ 4824 if (modesense_capacity >= capacity) { 4825 SD_INFO(SD_LOG_COMMON, un, 4826 "sd_get_physical_geometry: adjusting acyl; " 4827 "old: %d; new: %d\n", pgeom_p->g_acyl, 4828 (modesense_capacity - capacity + spc - 1) / spc); 4829 if (sector_size != 0) { 4830 /* 1243403: NEC D38x7 drives don't support sec size */ 4831 pgeom_p->g_secsize = (unsigned short)sector_size; 4832 } 4833 pgeom_p->g_nsect = (unsigned short)nsect; 4834 pgeom_p->g_nhead = (unsigned short)nhead; 4835 pgeom_p->g_capacity = capacity; 4836 pgeom_p->g_acyl = 4837 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4838 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4839 } 4840 4841 pgeom_p->g_rpm = (unsigned short)rpm; 4842 pgeom_p->g_intrlv = (unsigned short)intrlv; 4843 ret = 0; 4844 4845 SD_INFO(SD_LOG_COMMON, un, 4846 "sd_get_physical_geometry: mode sense geometry:\n"); 4847 SD_INFO(SD_LOG_COMMON, un, 4848 " nsect: %d; sector size: %d; interlv: %d\n", 4849 nsect, sector_size, intrlv); 4850 SD_INFO(SD_LOG_COMMON, un, 4851 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4852 nhead, ncyl, rpm, modesense_capacity); 4853 SD_INFO(SD_LOG_COMMON, un, 4854 "sd_get_physical_geometry: (cached)\n"); 4855 SD_INFO(SD_LOG_COMMON, un, 4856 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4857 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4858 pgeom_p->g_nhead, pgeom_p->g_nsect); 4859 SD_INFO(SD_LOG_COMMON, un, 4860 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4861 pgeom_p->g_secsize, pgeom_p->g_capacity, 4862 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4863 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4864 4865 page4_exit: 4866 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4867 4868 page3_exit: 4869 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4870 4871 if (status != 0) { 4872 if (status == EIO) { 4873 /* 4874 * Some disks do not support mode sense(6), we 4875 * should ignore this kind of error(sense key is 4876 * 0x5 - illegal request). 4877 */ 4878 uint8_t *sensep; 4879 int senlen; 4880 4881 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4882 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4883 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4884 4885 if (senlen > 0 && 4886 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4887 sd_ssc_assessment(ssc, 4888 SD_FMT_IGNORE_COMPROMISE); 4889 } else { 4890 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4891 } 4892 } else { 4893 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4894 } 4895 } 4896 sd_ssc_fini(ssc); 4897 return (ret); 4898 } 4899 4900 /* 4901 * Function: sd_get_virtual_geometry 4902 * 4903 * Description: Ask the controller to tell us about the target device. 4904 * 4905 * Arguments: un - pointer to softstate 4906 * capacity - disk capacity in #blocks 4907 * lbasize - disk block size in bytes 4908 * 4909 * Context: Kernel thread only 4910 */ 4911 4912 static int 4913 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4914 diskaddr_t capacity, int lbasize) 4915 { 4916 uint_t geombuf; 4917 int spc; 4918 4919 ASSERT(un != NULL); 4920 4921 /* Set sector size, and total number of sectors */ 4922 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4923 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4924 4925 /* Let the HBA tell us its geometry */ 4926 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4927 4928 /* A value of -1 indicates an undefined "geometry" property */ 4929 if (geombuf == (-1)) { 4930 return (EINVAL); 4931 } 4932 4933 /* Initialize the logical geometry cache. */ 4934 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4935 lgeom_p->g_nsect = geombuf & 0xffff; 4936 lgeom_p->g_secsize = un->un_sys_blocksize; 4937 4938 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4939 4940 /* 4941 * Note: The driver originally converted the capacity value from 4942 * target blocks to system blocks. However, the capacity value passed 4943 * to this routine is already in terms of system blocks (this scaling 4944 * is done when the READ CAPACITY command is issued and processed). 4945 * This 'error' may have gone undetected because the usage of g_ncyl 4946 * (which is based upon g_capacity) is very limited within the driver 4947 */ 4948 lgeom_p->g_capacity = capacity; 4949 4950 /* 4951 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4952 * hba may return zero values if the device has been removed. 4953 */ 4954 if (spc == 0) { 4955 lgeom_p->g_ncyl = 0; 4956 } else { 4957 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4958 } 4959 lgeom_p->g_acyl = 0; 4960 4961 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4962 return (0); 4963 4964 } 4965 /* 4966 * Function: sd_update_block_info 4967 * 4968 * Description: Calculate a byte count to sector count bitshift value 4969 * from sector size. 4970 * 4971 * Arguments: un: unit struct. 4972 * lbasize: new target sector size 4973 * capacity: new target capacity, ie. block count 4974 * 4975 * Context: Kernel thread context 4976 */ 4977 4978 static void 4979 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4980 { 4981 if (lbasize != 0) { 4982 un->un_tgt_blocksize = lbasize; 4983 un->un_f_tgt_blocksize_is_valid = TRUE; 4984 } 4985 4986 if (capacity != 0) { 4987 un->un_blockcount = capacity; 4988 un->un_f_blockcount_is_valid = TRUE; 4989 } 4990 } 4991 4992 4993 /* 4994 * Function: sd_register_devid 4995 * 4996 * Description: This routine will obtain the device id information from the 4997 * target, obtain the serial number, and register the device 4998 * id with the ddi framework. 4999 * 5000 * Arguments: devi - the system's dev_info_t for the device. 5001 * un - driver soft state (unit) structure 5002 * reservation_flag - indicates if a reservation conflict 5003 * occurred during attach 5004 * 5005 * Context: Kernel Thread 5006 */ 5007 static void 5008 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5009 { 5010 int rval = 0; 5011 uchar_t *inq80 = NULL; 5012 size_t inq80_len = MAX_INQUIRY_SIZE; 5013 size_t inq80_resid = 0; 5014 uchar_t *inq83 = NULL; 5015 size_t inq83_len = MAX_INQUIRY_SIZE; 5016 size_t inq83_resid = 0; 5017 int dlen, len; 5018 char *sn; 5019 struct sd_lun *un; 5020 5021 ASSERT(ssc != NULL); 5022 un = ssc->ssc_un; 5023 ASSERT(un != NULL); 5024 ASSERT(mutex_owned(SD_MUTEX(un))); 5025 ASSERT((SD_DEVINFO(un)) == devi); 5026 5027 /* 5028 * If transport has already registered a devid for this target 5029 * then that takes precedence over the driver's determination 5030 * of the devid. 5031 */ 5032 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5033 ASSERT(un->un_devid); 5034 return; /* use devid registered by the transport */ 5035 } 5036 5037 /* 5038 * This is the case of antiquated Sun disk drives that have the 5039 * FAB_DEVID property set in the disk_table. These drives 5040 * manage the devid's by storing them in last 2 available sectors 5041 * on the drive and have them fabricated by the ddi layer by calling 5042 * ddi_devid_init and passing the DEVID_FAB flag. 5043 */ 5044 if (un->un_f_opt_fab_devid == TRUE) { 5045 /* 5046 * Depending on EINVAL isn't reliable, since a reserved disk 5047 * may result in invalid geometry, so check to make sure a 5048 * reservation conflict did not occur during attach. 5049 */ 5050 if ((sd_get_devid(ssc) == EINVAL) && 5051 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5052 /* 5053 * The devid is invalid AND there is no reservation 5054 * conflict. Fabricate a new devid. 5055 */ 5056 (void) sd_create_devid(ssc); 5057 } 5058 5059 /* Register the devid if it exists */ 5060 if (un->un_devid != NULL) { 5061 (void) ddi_devid_register(SD_DEVINFO(un), 5062 un->un_devid); 5063 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5064 "sd_register_devid: Devid Fabricated\n"); 5065 } 5066 return; 5067 } 5068 5069 /* 5070 * We check the availability of the World Wide Name (0x83) and Unit 5071 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5072 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5073 * 0x83 is available, that is the best choice. Our next choice is 5074 * 0x80. If neither are available, we munge the devid from the device 5075 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5076 * to fabricate a devid for non-Sun qualified disks. 5077 */ 5078 if (sd_check_vpd_page_support(ssc) == 0) { 5079 /* collect page 80 data if available */ 5080 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5081 5082 mutex_exit(SD_MUTEX(un)); 5083 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5084 5085 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5086 0x01, 0x80, &inq80_resid); 5087 5088 if (rval != 0) { 5089 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5090 kmem_free(inq80, inq80_len); 5091 inq80 = NULL; 5092 inq80_len = 0; 5093 } else if (ddi_prop_exists( 5094 DDI_DEV_T_NONE, SD_DEVINFO(un), 5095 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5096 INQUIRY_SERIAL_NO) == 0) { 5097 /* 5098 * If we don't already have a serial number 5099 * property, do quick verify of data returned 5100 * and define property. 5101 */ 5102 dlen = inq80_len - inq80_resid; 5103 len = (size_t)inq80[3]; 5104 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5105 /* 5106 * Ensure sn termination, skip leading 5107 * blanks, and create property 5108 * 'inquiry-serial-no'. 5109 */ 5110 sn = (char *)&inq80[4]; 5111 sn[len] = 0; 5112 while (*sn && (*sn == ' ')) 5113 sn++; 5114 if (*sn) { 5115 (void) ddi_prop_update_string( 5116 DDI_DEV_T_NONE, 5117 SD_DEVINFO(un), 5118 INQUIRY_SERIAL_NO, sn); 5119 } 5120 } 5121 } 5122 mutex_enter(SD_MUTEX(un)); 5123 } 5124 5125 /* collect page 83 data if available */ 5126 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5127 mutex_exit(SD_MUTEX(un)); 5128 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5129 5130 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5131 0x01, 0x83, &inq83_resid); 5132 5133 if (rval != 0) { 5134 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5135 kmem_free(inq83, inq83_len); 5136 inq83 = NULL; 5137 inq83_len = 0; 5138 } 5139 mutex_enter(SD_MUTEX(un)); 5140 } 5141 } 5142 5143 /* encode best devid possible based on data available */ 5144 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5145 (char *)ddi_driver_name(SD_DEVINFO(un)), 5146 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5147 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5148 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5149 5150 /* devid successfully encoded, register devid */ 5151 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5152 5153 } else { 5154 /* 5155 * Unable to encode a devid based on data available. 5156 * This is not a Sun qualified disk. Older Sun disk 5157 * drives that have the SD_FAB_DEVID property 5158 * set in the disk_table and non Sun qualified 5159 * disks are treated in the same manner. These 5160 * drives manage the devid's by storing them in 5161 * last 2 available sectors on the drive and 5162 * have them fabricated by the ddi layer by 5163 * calling ddi_devid_init and passing the 5164 * DEVID_FAB flag. 5165 * Create a fabricate devid only if there's no 5166 * fabricate devid existed. 5167 */ 5168 if (sd_get_devid(ssc) == EINVAL) { 5169 (void) sd_create_devid(ssc); 5170 } 5171 un->un_f_opt_fab_devid = TRUE; 5172 5173 /* Register the devid if it exists */ 5174 if (un->un_devid != NULL) { 5175 (void) ddi_devid_register(SD_DEVINFO(un), 5176 un->un_devid); 5177 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5178 "sd_register_devid: devid fabricated using " 5179 "ddi framework\n"); 5180 } 5181 } 5182 5183 /* clean up resources */ 5184 if (inq80 != NULL) { 5185 kmem_free(inq80, inq80_len); 5186 } 5187 if (inq83 != NULL) { 5188 kmem_free(inq83, inq83_len); 5189 } 5190 } 5191 5192 5193 5194 /* 5195 * Function: sd_get_devid 5196 * 5197 * Description: This routine will return 0 if a valid device id has been 5198 * obtained from the target and stored in the soft state. If a 5199 * valid device id has not been previously read and stored, a 5200 * read attempt will be made. 5201 * 5202 * Arguments: un - driver soft state (unit) structure 5203 * 5204 * Return Code: 0 if we successfully get the device id 5205 * 5206 * Context: Kernel Thread 5207 */ 5208 5209 static int 5210 sd_get_devid(sd_ssc_t *ssc) 5211 { 5212 struct dk_devid *dkdevid; 5213 ddi_devid_t tmpid; 5214 uint_t *ip; 5215 size_t sz; 5216 diskaddr_t blk; 5217 int status; 5218 int chksum; 5219 int i; 5220 size_t buffer_size; 5221 struct sd_lun *un; 5222 5223 ASSERT(ssc != NULL); 5224 un = ssc->ssc_un; 5225 ASSERT(un != NULL); 5226 ASSERT(mutex_owned(SD_MUTEX(un))); 5227 5228 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5229 un); 5230 5231 if (un->un_devid != NULL) { 5232 return (0); 5233 } 5234 5235 mutex_exit(SD_MUTEX(un)); 5236 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5237 (void *)SD_PATH_DIRECT) != 0) { 5238 mutex_enter(SD_MUTEX(un)); 5239 return (EINVAL); 5240 } 5241 5242 /* 5243 * Read and verify device id, stored in the reserved cylinders at the 5244 * end of the disk. Backup label is on the odd sectors of the last 5245 * track of the last cylinder. Device id will be on track of the next 5246 * to last cylinder. 5247 */ 5248 mutex_enter(SD_MUTEX(un)); 5249 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5250 mutex_exit(SD_MUTEX(un)); 5251 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5252 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5253 SD_PATH_DIRECT); 5254 5255 if (status != 0) { 5256 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5257 goto error; 5258 } 5259 5260 /* Validate the revision */ 5261 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5262 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5263 status = EINVAL; 5264 goto error; 5265 } 5266 5267 /* Calculate the checksum */ 5268 chksum = 0; 5269 ip = (uint_t *)dkdevid; 5270 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5271 i++) { 5272 chksum ^= ip[i]; 5273 } 5274 5275 /* Compare the checksums */ 5276 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5277 status = EINVAL; 5278 goto error; 5279 } 5280 5281 /* Validate the device id */ 5282 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5283 status = EINVAL; 5284 goto error; 5285 } 5286 5287 /* 5288 * Store the device id in the driver soft state 5289 */ 5290 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5291 tmpid = kmem_alloc(sz, KM_SLEEP); 5292 5293 mutex_enter(SD_MUTEX(un)); 5294 5295 un->un_devid = tmpid; 5296 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5297 5298 kmem_free(dkdevid, buffer_size); 5299 5300 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5301 5302 return (status); 5303 error: 5304 mutex_enter(SD_MUTEX(un)); 5305 kmem_free(dkdevid, buffer_size); 5306 return (status); 5307 } 5308 5309 5310 /* 5311 * Function: sd_create_devid 5312 * 5313 * Description: This routine will fabricate the device id and write it 5314 * to the disk. 5315 * 5316 * Arguments: un - driver soft state (unit) structure 5317 * 5318 * Return Code: value of the fabricated device id 5319 * 5320 * Context: Kernel Thread 5321 */ 5322 5323 static ddi_devid_t 5324 sd_create_devid(sd_ssc_t *ssc) 5325 { 5326 struct sd_lun *un; 5327 5328 ASSERT(ssc != NULL); 5329 un = ssc->ssc_un; 5330 ASSERT(un != NULL); 5331 5332 /* Fabricate the devid */ 5333 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5334 == DDI_FAILURE) { 5335 return (NULL); 5336 } 5337 5338 /* Write the devid to disk */ 5339 if (sd_write_deviceid(ssc) != 0) { 5340 ddi_devid_free(un->un_devid); 5341 un->un_devid = NULL; 5342 } 5343 5344 return (un->un_devid); 5345 } 5346 5347 5348 /* 5349 * Function: sd_write_deviceid 5350 * 5351 * Description: This routine will write the device id to the disk 5352 * reserved sector. 5353 * 5354 * Arguments: un - driver soft state (unit) structure 5355 * 5356 * Return Code: EINVAL 5357 * value returned by sd_send_scsi_cmd 5358 * 5359 * Context: Kernel Thread 5360 */ 5361 5362 static int 5363 sd_write_deviceid(sd_ssc_t *ssc) 5364 { 5365 struct dk_devid *dkdevid; 5366 diskaddr_t blk; 5367 uint_t *ip, chksum; 5368 int status; 5369 int i; 5370 struct sd_lun *un; 5371 5372 ASSERT(ssc != NULL); 5373 un = ssc->ssc_un; 5374 ASSERT(un != NULL); 5375 ASSERT(mutex_owned(SD_MUTEX(un))); 5376 5377 mutex_exit(SD_MUTEX(un)); 5378 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5379 (void *)SD_PATH_DIRECT) != 0) { 5380 mutex_enter(SD_MUTEX(un)); 5381 return (-1); 5382 } 5383 5384 5385 /* Allocate the buffer */ 5386 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5387 5388 /* Fill in the revision */ 5389 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5390 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5391 5392 /* Copy in the device id */ 5393 mutex_enter(SD_MUTEX(un)); 5394 bcopy(un->un_devid, &dkdevid->dkd_devid, 5395 ddi_devid_sizeof(un->un_devid)); 5396 mutex_exit(SD_MUTEX(un)); 5397 5398 /* Calculate the checksum */ 5399 chksum = 0; 5400 ip = (uint_t *)dkdevid; 5401 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5402 i++) { 5403 chksum ^= ip[i]; 5404 } 5405 5406 /* Fill-in checksum */ 5407 DKD_FORMCHKSUM(chksum, dkdevid); 5408 5409 /* Write the reserved sector */ 5410 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5411 SD_PATH_DIRECT); 5412 if (status != 0) 5413 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5414 5415 kmem_free(dkdevid, un->un_sys_blocksize); 5416 5417 mutex_enter(SD_MUTEX(un)); 5418 return (status); 5419 } 5420 5421 5422 /* 5423 * Function: sd_check_vpd_page_support 5424 * 5425 * Description: This routine sends an inquiry command with the EVPD bit set and 5426 * a page code of 0x00 to the device. It is used to determine which 5427 * vital product pages are available to find the devid. We are 5428 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5429 * device does not support that command. 5430 * 5431 * Arguments: un - driver soft state (unit) structure 5432 * 5433 * Return Code: 0 - success 5434 * 1 - check condition 5435 * 5436 * Context: This routine can sleep. 5437 */ 5438 5439 static int 5440 sd_check_vpd_page_support(sd_ssc_t *ssc) 5441 { 5442 uchar_t *page_list = NULL; 5443 uchar_t page_length = 0xff; /* Use max possible length */ 5444 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5445 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5446 int rval = 0; 5447 int counter; 5448 struct sd_lun *un; 5449 5450 ASSERT(ssc != NULL); 5451 un = ssc->ssc_un; 5452 ASSERT(un != NULL); 5453 ASSERT(mutex_owned(SD_MUTEX(un))); 5454 5455 mutex_exit(SD_MUTEX(un)); 5456 5457 /* 5458 * We'll set the page length to the maximum to save figuring it out 5459 * with an additional call. 5460 */ 5461 page_list = kmem_zalloc(page_length, KM_SLEEP); 5462 5463 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5464 page_code, NULL); 5465 5466 if (rval != 0) 5467 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5468 5469 mutex_enter(SD_MUTEX(un)); 5470 5471 /* 5472 * Now we must validate that the device accepted the command, as some 5473 * drives do not support it. If the drive does support it, we will 5474 * return 0, and the supported pages will be in un_vpd_page_mask. If 5475 * not, we return -1. 5476 */ 5477 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5478 /* Loop to find one of the 2 pages we need */ 5479 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5480 5481 /* 5482 * Pages are returned in ascending order, and 0x83 is what we 5483 * are hoping for. 5484 */ 5485 while ((page_list[counter] <= 0x86) && 5486 (counter <= (page_list[VPD_PAGE_LENGTH] + 5487 VPD_HEAD_OFFSET))) { 5488 /* 5489 * Add 3 because page_list[3] is the number of 5490 * pages minus 3 5491 */ 5492 5493 switch (page_list[counter]) { 5494 case 0x00: 5495 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5496 break; 5497 case 0x80: 5498 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5499 break; 5500 case 0x81: 5501 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5502 break; 5503 case 0x82: 5504 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5505 break; 5506 case 0x83: 5507 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5508 break; 5509 case 0x86: 5510 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5511 break; 5512 } 5513 counter++; 5514 } 5515 5516 } else { 5517 rval = -1; 5518 5519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5520 "sd_check_vpd_page_support: This drive does not implement " 5521 "VPD pages.\n"); 5522 } 5523 5524 kmem_free(page_list, page_length); 5525 5526 return (rval); 5527 } 5528 5529 5530 /* 5531 * Function: sd_setup_pm 5532 * 5533 * Description: Initialize Power Management on the device 5534 * 5535 * Context: Kernel Thread 5536 */ 5537 5538 static void 5539 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5540 { 5541 uint_t log_page_size; 5542 uchar_t *log_page_data; 5543 int rval = 0; 5544 struct sd_lun *un; 5545 5546 ASSERT(ssc != NULL); 5547 un = ssc->ssc_un; 5548 ASSERT(un != NULL); 5549 5550 /* 5551 * Since we are called from attach, holding a mutex for 5552 * un is unnecessary. Because some of the routines called 5553 * from here require SD_MUTEX to not be held, assert this 5554 * right up front. 5555 */ 5556 ASSERT(!mutex_owned(SD_MUTEX(un))); 5557 /* 5558 * Since the sd device does not have the 'reg' property, 5559 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5560 * The following code is to tell cpr that this device 5561 * DOES need to be suspended and resumed. 5562 */ 5563 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5564 "pm-hardware-state", "needs-suspend-resume"); 5565 5566 /* 5567 * This complies with the new power management framework 5568 * for certain desktop machines. Create the pm_components 5569 * property as a string array property. 5570 */ 5571 if (un->un_f_pm_supported) { 5572 /* 5573 * not all devices have a motor, try it first. 5574 * some devices may return ILLEGAL REQUEST, some 5575 * will hang 5576 * The following START_STOP_UNIT is used to check if target 5577 * device has a motor. 5578 */ 5579 un->un_f_start_stop_supported = TRUE; 5580 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5581 SD_PATH_DIRECT); 5582 5583 if (rval != 0) { 5584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5585 un->un_f_start_stop_supported = FALSE; 5586 } 5587 5588 /* 5589 * create pm properties anyways otherwise the parent can't 5590 * go to sleep 5591 */ 5592 (void) sd_create_pm_components(devi, un); 5593 un->un_f_pm_is_enabled = TRUE; 5594 return; 5595 } 5596 5597 if (!un->un_f_log_sense_supported) { 5598 un->un_power_level = SD_SPINDLE_ON; 5599 un->un_f_pm_is_enabled = FALSE; 5600 return; 5601 } 5602 5603 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5604 5605 #ifdef SDDEBUG 5606 if (sd_force_pm_supported) { 5607 /* Force a successful result */ 5608 rval = 1; 5609 } 5610 #endif 5611 5612 /* 5613 * If the start-stop cycle counter log page is not supported 5614 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5615 * then we should not create the pm_components property. 5616 */ 5617 if (rval == -1) { 5618 /* 5619 * Error. 5620 * Reading log sense failed, most likely this is 5621 * an older drive that does not support log sense. 5622 * If this fails auto-pm is not supported. 5623 */ 5624 un->un_power_level = SD_SPINDLE_ON; 5625 un->un_f_pm_is_enabled = FALSE; 5626 5627 } else if (rval == 0) { 5628 /* 5629 * Page not found. 5630 * The start stop cycle counter is implemented as page 5631 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5632 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5633 */ 5634 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5635 /* 5636 * Page found, use this one. 5637 */ 5638 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5639 un->un_f_pm_is_enabled = TRUE; 5640 } else { 5641 /* 5642 * Error or page not found. 5643 * auto-pm is not supported for this device. 5644 */ 5645 un->un_power_level = SD_SPINDLE_ON; 5646 un->un_f_pm_is_enabled = FALSE; 5647 } 5648 } else { 5649 /* 5650 * Page found, use it. 5651 */ 5652 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5653 un->un_f_pm_is_enabled = TRUE; 5654 } 5655 5656 5657 if (un->un_f_pm_is_enabled == TRUE) { 5658 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5659 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5660 5661 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5662 log_page_size, un->un_start_stop_cycle_page, 5663 0x01, 0, SD_PATH_DIRECT); 5664 5665 if (rval != 0) { 5666 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5667 } 5668 5669 #ifdef SDDEBUG 5670 if (sd_force_pm_supported) { 5671 /* Force a successful result */ 5672 rval = 0; 5673 } 5674 #endif 5675 5676 /* 5677 * If the Log sense for Page( Start/stop cycle counter page) 5678 * succeeds, then power management is supported and we can 5679 * enable auto-pm. 5680 */ 5681 if (rval == 0) { 5682 (void) sd_create_pm_components(devi, un); 5683 } else { 5684 un->un_power_level = SD_SPINDLE_ON; 5685 un->un_f_pm_is_enabled = FALSE; 5686 } 5687 5688 kmem_free(log_page_data, log_page_size); 5689 } 5690 } 5691 5692 5693 /* 5694 * Function: sd_create_pm_components 5695 * 5696 * Description: Initialize PM property. 5697 * 5698 * Context: Kernel thread context 5699 */ 5700 5701 static void 5702 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5703 { 5704 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5705 5706 ASSERT(!mutex_owned(SD_MUTEX(un))); 5707 5708 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5709 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5710 /* 5711 * When components are initially created they are idle, 5712 * power up any non-removables. 5713 * Note: the return value of pm_raise_power can't be used 5714 * for determining if PM should be enabled for this device. 5715 * Even if you check the return values and remove this 5716 * property created above, the PM framework will not honor the 5717 * change after the first call to pm_raise_power. Hence, 5718 * removal of that property does not help if pm_raise_power 5719 * fails. In the case of removable media, the start/stop 5720 * will fail if the media is not present. 5721 */ 5722 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5723 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5724 mutex_enter(SD_MUTEX(un)); 5725 un->un_power_level = SD_SPINDLE_ON; 5726 mutex_enter(&un->un_pm_mutex); 5727 /* Set to on and not busy. */ 5728 un->un_pm_count = 0; 5729 } else { 5730 mutex_enter(SD_MUTEX(un)); 5731 un->un_power_level = SD_SPINDLE_OFF; 5732 mutex_enter(&un->un_pm_mutex); 5733 /* Set to off. */ 5734 un->un_pm_count = -1; 5735 } 5736 mutex_exit(&un->un_pm_mutex); 5737 mutex_exit(SD_MUTEX(un)); 5738 } else { 5739 un->un_power_level = SD_SPINDLE_ON; 5740 un->un_f_pm_is_enabled = FALSE; 5741 } 5742 } 5743 5744 5745 /* 5746 * Function: sd_ddi_suspend 5747 * 5748 * Description: Performs system power-down operations. This includes 5749 * setting the drive state to indicate its suspended so 5750 * that no new commands will be accepted. Also, wait for 5751 * all commands that are in transport or queued to a timer 5752 * for retry to complete. All timeout threads are cancelled. 5753 * 5754 * Return Code: DDI_FAILURE or DDI_SUCCESS 5755 * 5756 * Context: Kernel thread context 5757 */ 5758 5759 static int 5760 sd_ddi_suspend(dev_info_t *devi) 5761 { 5762 struct sd_lun *un; 5763 clock_t wait_cmds_complete; 5764 5765 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5766 if (un == NULL) { 5767 return (DDI_FAILURE); 5768 } 5769 5770 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5771 5772 mutex_enter(SD_MUTEX(un)); 5773 5774 /* Return success if the device is already suspended. */ 5775 if (un->un_state == SD_STATE_SUSPENDED) { 5776 mutex_exit(SD_MUTEX(un)); 5777 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5778 "device already suspended, exiting\n"); 5779 return (DDI_SUCCESS); 5780 } 5781 5782 /* Return failure if the device is being used by HA */ 5783 if (un->un_resvd_status & 5784 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5785 mutex_exit(SD_MUTEX(un)); 5786 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5787 "device in use by HA, exiting\n"); 5788 return (DDI_FAILURE); 5789 } 5790 5791 /* 5792 * Return failure if the device is in a resource wait 5793 * or power changing state. 5794 */ 5795 if ((un->un_state == SD_STATE_RWAIT) || 5796 (un->un_state == SD_STATE_PM_CHANGING)) { 5797 mutex_exit(SD_MUTEX(un)); 5798 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5799 "device in resource wait state, exiting\n"); 5800 return (DDI_FAILURE); 5801 } 5802 5803 5804 un->un_save_state = un->un_last_state; 5805 New_state(un, SD_STATE_SUSPENDED); 5806 5807 /* 5808 * Wait for all commands that are in transport or queued to a timer 5809 * for retry to complete. 5810 * 5811 * While waiting, no new commands will be accepted or sent because of 5812 * the new state we set above. 5813 * 5814 * Wait till current operation has completed. If we are in the resource 5815 * wait state (with an intr outstanding) then we need to wait till the 5816 * intr completes and starts the next cmd. We want to wait for 5817 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5818 */ 5819 wait_cmds_complete = ddi_get_lbolt() + 5820 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5821 5822 while (un->un_ncmds_in_transport != 0) { 5823 /* 5824 * Fail if commands do not finish in the specified time. 5825 */ 5826 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5827 wait_cmds_complete) == -1) { 5828 /* 5829 * Undo the state changes made above. Everything 5830 * must go back to it's original value. 5831 */ 5832 Restore_state(un); 5833 un->un_last_state = un->un_save_state; 5834 /* Wake up any threads that might be waiting. */ 5835 cv_broadcast(&un->un_suspend_cv); 5836 mutex_exit(SD_MUTEX(un)); 5837 SD_ERROR(SD_LOG_IO_PM, un, 5838 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5839 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5840 return (DDI_FAILURE); 5841 } 5842 } 5843 5844 /* 5845 * Cancel SCSI watch thread and timeouts, if any are active 5846 */ 5847 5848 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5849 opaque_t temp_token = un->un_swr_token; 5850 mutex_exit(SD_MUTEX(un)); 5851 scsi_watch_suspend(temp_token); 5852 mutex_enter(SD_MUTEX(un)); 5853 } 5854 5855 if (un->un_reset_throttle_timeid != NULL) { 5856 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5857 un->un_reset_throttle_timeid = NULL; 5858 mutex_exit(SD_MUTEX(un)); 5859 (void) untimeout(temp_id); 5860 mutex_enter(SD_MUTEX(un)); 5861 } 5862 5863 if (un->un_dcvb_timeid != NULL) { 5864 timeout_id_t temp_id = un->un_dcvb_timeid; 5865 un->un_dcvb_timeid = NULL; 5866 mutex_exit(SD_MUTEX(un)); 5867 (void) untimeout(temp_id); 5868 mutex_enter(SD_MUTEX(un)); 5869 } 5870 5871 mutex_enter(&un->un_pm_mutex); 5872 if (un->un_pm_timeid != NULL) { 5873 timeout_id_t temp_id = un->un_pm_timeid; 5874 un->un_pm_timeid = NULL; 5875 mutex_exit(&un->un_pm_mutex); 5876 mutex_exit(SD_MUTEX(un)); 5877 (void) untimeout(temp_id); 5878 mutex_enter(SD_MUTEX(un)); 5879 } else { 5880 mutex_exit(&un->un_pm_mutex); 5881 } 5882 5883 if (un->un_retry_timeid != NULL) { 5884 timeout_id_t temp_id = un->un_retry_timeid; 5885 un->un_retry_timeid = NULL; 5886 mutex_exit(SD_MUTEX(un)); 5887 (void) untimeout(temp_id); 5888 mutex_enter(SD_MUTEX(un)); 5889 5890 if (un->un_retry_bp != NULL) { 5891 un->un_retry_bp->av_forw = un->un_waitq_headp; 5892 un->un_waitq_headp = un->un_retry_bp; 5893 if (un->un_waitq_tailp == NULL) { 5894 un->un_waitq_tailp = un->un_retry_bp; 5895 } 5896 un->un_retry_bp = NULL; 5897 un->un_retry_statp = NULL; 5898 } 5899 } 5900 5901 if (un->un_direct_priority_timeid != NULL) { 5902 timeout_id_t temp_id = un->un_direct_priority_timeid; 5903 un->un_direct_priority_timeid = NULL; 5904 mutex_exit(SD_MUTEX(un)); 5905 (void) untimeout(temp_id); 5906 mutex_enter(SD_MUTEX(un)); 5907 } 5908 5909 if (un->un_f_is_fibre == TRUE) { 5910 /* 5911 * Remove callbacks for insert and remove events 5912 */ 5913 if (un->un_insert_event != NULL) { 5914 mutex_exit(SD_MUTEX(un)); 5915 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5916 mutex_enter(SD_MUTEX(un)); 5917 un->un_insert_event = NULL; 5918 } 5919 5920 if (un->un_remove_event != NULL) { 5921 mutex_exit(SD_MUTEX(un)); 5922 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5923 mutex_enter(SD_MUTEX(un)); 5924 un->un_remove_event = NULL; 5925 } 5926 } 5927 5928 mutex_exit(SD_MUTEX(un)); 5929 5930 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5931 5932 return (DDI_SUCCESS); 5933 } 5934 5935 5936 /* 5937 * Function: sd_ddi_pm_suspend 5938 * 5939 * Description: Set the drive state to low power. 5940 * Someone else is required to actually change the drive 5941 * power level. 5942 * 5943 * Arguments: un - driver soft state (unit) structure 5944 * 5945 * Return Code: DDI_FAILURE or DDI_SUCCESS 5946 * 5947 * Context: Kernel thread context 5948 */ 5949 5950 static int 5951 sd_ddi_pm_suspend(struct sd_lun *un) 5952 { 5953 ASSERT(un != NULL); 5954 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5955 5956 ASSERT(!mutex_owned(SD_MUTEX(un))); 5957 mutex_enter(SD_MUTEX(un)); 5958 5959 /* 5960 * Exit if power management is not enabled for this device, or if 5961 * the device is being used by HA. 5962 */ 5963 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5964 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5965 mutex_exit(SD_MUTEX(un)); 5966 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5967 return (DDI_SUCCESS); 5968 } 5969 5970 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5971 un->un_ncmds_in_driver); 5972 5973 /* 5974 * See if the device is not busy, ie.: 5975 * - we have no commands in the driver for this device 5976 * - not waiting for resources 5977 */ 5978 if ((un->un_ncmds_in_driver == 0) && 5979 (un->un_state != SD_STATE_RWAIT)) { 5980 /* 5981 * The device is not busy, so it is OK to go to low power state. 5982 * Indicate low power, but rely on someone else to actually 5983 * change it. 5984 */ 5985 mutex_enter(&un->un_pm_mutex); 5986 un->un_pm_count = -1; 5987 mutex_exit(&un->un_pm_mutex); 5988 un->un_power_level = SD_SPINDLE_OFF; 5989 } 5990 5991 mutex_exit(SD_MUTEX(un)); 5992 5993 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5994 5995 return (DDI_SUCCESS); 5996 } 5997 5998 5999 /* 6000 * Function: sd_ddi_resume 6001 * 6002 * Description: Performs system power-up operations.. 6003 * 6004 * Return Code: DDI_SUCCESS 6005 * DDI_FAILURE 6006 * 6007 * Context: Kernel thread context 6008 */ 6009 6010 static int 6011 sd_ddi_resume(dev_info_t *devi) 6012 { 6013 struct sd_lun *un; 6014 6015 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6016 if (un == NULL) { 6017 return (DDI_FAILURE); 6018 } 6019 6020 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6021 6022 mutex_enter(SD_MUTEX(un)); 6023 Restore_state(un); 6024 6025 /* 6026 * Restore the state which was saved to give the 6027 * the right state in un_last_state 6028 */ 6029 un->un_last_state = un->un_save_state; 6030 /* 6031 * Note: throttle comes back at full. 6032 * Also note: this MUST be done before calling pm_raise_power 6033 * otherwise the system can get hung in biowait. The scenario where 6034 * this'll happen is under cpr suspend. Writing of the system 6035 * state goes through sddump, which writes 0 to un_throttle. If 6036 * writing the system state then fails, example if the partition is 6037 * too small, then cpr attempts a resume. If throttle isn't restored 6038 * from the saved value until after calling pm_raise_power then 6039 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6040 * in biowait. 6041 */ 6042 un->un_throttle = un->un_saved_throttle; 6043 6044 /* 6045 * The chance of failure is very rare as the only command done in power 6046 * entry point is START command when you transition from 0->1 or 6047 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6048 * which suspend was done. Ignore the return value as the resume should 6049 * not be failed. In the case of removable media the media need not be 6050 * inserted and hence there is a chance that raise power will fail with 6051 * media not present. 6052 */ 6053 if (un->un_f_attach_spinup) { 6054 mutex_exit(SD_MUTEX(un)); 6055 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6056 mutex_enter(SD_MUTEX(un)); 6057 } 6058 6059 /* 6060 * Don't broadcast to the suspend cv and therefore possibly 6061 * start I/O until after power has been restored. 6062 */ 6063 cv_broadcast(&un->un_suspend_cv); 6064 cv_broadcast(&un->un_state_cv); 6065 6066 /* restart thread */ 6067 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6068 scsi_watch_resume(un->un_swr_token); 6069 } 6070 6071 #if (defined(__fibre)) 6072 if (un->un_f_is_fibre == TRUE) { 6073 /* 6074 * Add callbacks for insert and remove events 6075 */ 6076 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6077 sd_init_event_callbacks(un); 6078 } 6079 } 6080 #endif 6081 6082 /* 6083 * Transport any pending commands to the target. 6084 * 6085 * If this is a low-activity device commands in queue will have to wait 6086 * until new commands come in, which may take awhile. Also, we 6087 * specifically don't check un_ncmds_in_transport because we know that 6088 * there really are no commands in progress after the unit was 6089 * suspended and we could have reached the throttle level, been 6090 * suspended, and have no new commands coming in for awhile. Highly 6091 * unlikely, but so is the low-activity disk scenario. 6092 */ 6093 ddi_xbuf_dispatch(un->un_xbuf_attr); 6094 6095 sd_start_cmds(un, NULL); 6096 mutex_exit(SD_MUTEX(un)); 6097 6098 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6099 6100 return (DDI_SUCCESS); 6101 } 6102 6103 6104 /* 6105 * Function: sd_ddi_pm_resume 6106 * 6107 * Description: Set the drive state to powered on. 6108 * Someone else is required to actually change the drive 6109 * power level. 6110 * 6111 * Arguments: un - driver soft state (unit) structure 6112 * 6113 * Return Code: DDI_SUCCESS 6114 * 6115 * Context: Kernel thread context 6116 */ 6117 6118 static int 6119 sd_ddi_pm_resume(struct sd_lun *un) 6120 { 6121 ASSERT(un != NULL); 6122 6123 ASSERT(!mutex_owned(SD_MUTEX(un))); 6124 mutex_enter(SD_MUTEX(un)); 6125 un->un_power_level = SD_SPINDLE_ON; 6126 6127 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6128 mutex_enter(&un->un_pm_mutex); 6129 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6130 un->un_pm_count++; 6131 ASSERT(un->un_pm_count == 0); 6132 /* 6133 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6134 * un_suspend_cv is for a system resume, not a power management 6135 * device resume. (4297749) 6136 * cv_broadcast(&un->un_suspend_cv); 6137 */ 6138 } 6139 mutex_exit(&un->un_pm_mutex); 6140 mutex_exit(SD_MUTEX(un)); 6141 6142 return (DDI_SUCCESS); 6143 } 6144 6145 6146 /* 6147 * Function: sd_pm_idletimeout_handler 6148 * 6149 * Description: A timer routine that's active only while a device is busy. 6150 * The purpose is to extend slightly the pm framework's busy 6151 * view of the device to prevent busy/idle thrashing for 6152 * back-to-back commands. Do this by comparing the current time 6153 * to the time at which the last command completed and when the 6154 * difference is greater than sd_pm_idletime, call 6155 * pm_idle_component. In addition to indicating idle to the pm 6156 * framework, update the chain type to again use the internal pm 6157 * layers of the driver. 6158 * 6159 * Arguments: arg - driver soft state (unit) structure 6160 * 6161 * Context: Executes in a timeout(9F) thread context 6162 */ 6163 6164 static void 6165 sd_pm_idletimeout_handler(void *arg) 6166 { 6167 struct sd_lun *un = arg; 6168 6169 time_t now; 6170 6171 mutex_enter(&sd_detach_mutex); 6172 if (un->un_detach_count != 0) { 6173 /* Abort if the instance is detaching */ 6174 mutex_exit(&sd_detach_mutex); 6175 return; 6176 } 6177 mutex_exit(&sd_detach_mutex); 6178 6179 now = ddi_get_time(); 6180 /* 6181 * Grab both mutexes, in the proper order, since we're accessing 6182 * both PM and softstate variables. 6183 */ 6184 mutex_enter(SD_MUTEX(un)); 6185 mutex_enter(&un->un_pm_mutex); 6186 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6187 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6188 /* 6189 * Update the chain types. 6190 * This takes affect on the next new command received. 6191 */ 6192 if (un->un_f_non_devbsize_supported) { 6193 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6194 } else { 6195 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6196 } 6197 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6198 6199 SD_TRACE(SD_LOG_IO_PM, un, 6200 "sd_pm_idletimeout_handler: idling device\n"); 6201 (void) pm_idle_component(SD_DEVINFO(un), 0); 6202 un->un_pm_idle_timeid = NULL; 6203 } else { 6204 un->un_pm_idle_timeid = 6205 timeout(sd_pm_idletimeout_handler, un, 6206 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6207 } 6208 mutex_exit(&un->un_pm_mutex); 6209 mutex_exit(SD_MUTEX(un)); 6210 } 6211 6212 6213 /* 6214 * Function: sd_pm_timeout_handler 6215 * 6216 * Description: Callback to tell framework we are idle. 6217 * 6218 * Context: timeout(9f) thread context. 6219 */ 6220 6221 static void 6222 sd_pm_timeout_handler(void *arg) 6223 { 6224 struct sd_lun *un = arg; 6225 6226 (void) pm_idle_component(SD_DEVINFO(un), 0); 6227 mutex_enter(&un->un_pm_mutex); 6228 un->un_pm_timeid = NULL; 6229 mutex_exit(&un->un_pm_mutex); 6230 } 6231 6232 6233 /* 6234 * Function: sdpower 6235 * 6236 * Description: PM entry point. 6237 * 6238 * Return Code: DDI_SUCCESS 6239 * DDI_FAILURE 6240 * 6241 * Context: Kernel thread context 6242 */ 6243 6244 static int 6245 sdpower(dev_info_t *devi, int component, int level) 6246 { 6247 struct sd_lun *un; 6248 int instance; 6249 int rval = DDI_SUCCESS; 6250 uint_t i, log_page_size, maxcycles, ncycles; 6251 uchar_t *log_page_data; 6252 int log_sense_page; 6253 int medium_present; 6254 time_t intvlp; 6255 dev_t dev; 6256 struct pm_trans_data sd_pm_tran_data; 6257 uchar_t save_state; 6258 int sval; 6259 uchar_t state_before_pm; 6260 int got_semaphore_here; 6261 sd_ssc_t *ssc; 6262 6263 instance = ddi_get_instance(devi); 6264 6265 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6266 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6267 component != 0) { 6268 return (DDI_FAILURE); 6269 } 6270 6271 dev = sd_make_device(SD_DEVINFO(un)); 6272 ssc = sd_ssc_init(un); 6273 6274 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6275 6276 /* 6277 * Must synchronize power down with close. 6278 * Attempt to decrement/acquire the open/close semaphore, 6279 * but do NOT wait on it. If it's not greater than zero, 6280 * ie. it can't be decremented without waiting, then 6281 * someone else, either open or close, already has it 6282 * and the try returns 0. Use that knowledge here to determine 6283 * if it's OK to change the device power level. 6284 * Also, only increment it on exit if it was decremented, ie. gotten, 6285 * here. 6286 */ 6287 got_semaphore_here = sema_tryp(&un->un_semoclose); 6288 6289 mutex_enter(SD_MUTEX(un)); 6290 6291 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6292 un->un_ncmds_in_driver); 6293 6294 /* 6295 * If un_ncmds_in_driver is non-zero it indicates commands are 6296 * already being processed in the driver, or if the semaphore was 6297 * not gotten here it indicates an open or close is being processed. 6298 * At the same time somebody is requesting to go low power which 6299 * can't happen, therefore we need to return failure. 6300 */ 6301 if ((level == SD_SPINDLE_OFF) && 6302 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6303 mutex_exit(SD_MUTEX(un)); 6304 6305 if (got_semaphore_here != 0) { 6306 sema_v(&un->un_semoclose); 6307 } 6308 SD_TRACE(SD_LOG_IO_PM, un, 6309 "sdpower: exit, device has queued cmds.\n"); 6310 6311 goto sdpower_failed; 6312 } 6313 6314 /* 6315 * if it is OFFLINE that means the disk is completely dead 6316 * in our case we have to put the disk in on or off by sending commands 6317 * Of course that will fail anyway so return back here. 6318 * 6319 * Power changes to a device that's OFFLINE or SUSPENDED 6320 * are not allowed. 6321 */ 6322 if ((un->un_state == SD_STATE_OFFLINE) || 6323 (un->un_state == SD_STATE_SUSPENDED)) { 6324 mutex_exit(SD_MUTEX(un)); 6325 6326 if (got_semaphore_here != 0) { 6327 sema_v(&un->un_semoclose); 6328 } 6329 SD_TRACE(SD_LOG_IO_PM, un, 6330 "sdpower: exit, device is off-line.\n"); 6331 6332 goto sdpower_failed; 6333 } 6334 6335 /* 6336 * Change the device's state to indicate it's power level 6337 * is being changed. Do this to prevent a power off in the 6338 * middle of commands, which is especially bad on devices 6339 * that are really powered off instead of just spun down. 6340 */ 6341 state_before_pm = un->un_state; 6342 un->un_state = SD_STATE_PM_CHANGING; 6343 6344 mutex_exit(SD_MUTEX(un)); 6345 6346 /* 6347 * If "pm-capable" property is set to TRUE by HBA drivers, 6348 * bypass the following checking, otherwise, check the log 6349 * sense information for this device 6350 */ 6351 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6352 /* 6353 * Get the log sense information to understand whether the 6354 * the powercycle counts have gone beyond the threshhold. 6355 */ 6356 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6357 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6358 6359 mutex_enter(SD_MUTEX(un)); 6360 log_sense_page = un->un_start_stop_cycle_page; 6361 mutex_exit(SD_MUTEX(un)); 6362 6363 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6364 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6365 6366 if (rval != 0) { 6367 if (rval == EIO) 6368 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6369 else 6370 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6371 } 6372 6373 #ifdef SDDEBUG 6374 if (sd_force_pm_supported) { 6375 /* Force a successful result */ 6376 rval = 0; 6377 } 6378 #endif 6379 if (rval != 0) { 6380 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6381 "Log Sense Failed\n"); 6382 6383 kmem_free(log_page_data, log_page_size); 6384 /* Cannot support power management on those drives */ 6385 6386 if (got_semaphore_here != 0) { 6387 sema_v(&un->un_semoclose); 6388 } 6389 /* 6390 * On exit put the state back to it's original value 6391 * and broadcast to anyone waiting for the power 6392 * change completion. 6393 */ 6394 mutex_enter(SD_MUTEX(un)); 6395 un->un_state = state_before_pm; 6396 cv_broadcast(&un->un_suspend_cv); 6397 mutex_exit(SD_MUTEX(un)); 6398 SD_TRACE(SD_LOG_IO_PM, un, 6399 "sdpower: exit, Log Sense Failed.\n"); 6400 6401 goto sdpower_failed; 6402 } 6403 6404 /* 6405 * From the page data - Convert the essential information to 6406 * pm_trans_data 6407 */ 6408 maxcycles = 6409 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6410 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6411 6412 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6413 6414 ncycles = 6415 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6416 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6417 6418 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6419 6420 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6421 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6422 log_page_data[8+i]; 6423 } 6424 6425 kmem_free(log_page_data, log_page_size); 6426 6427 /* 6428 * Call pm_trans_check routine to get the Ok from 6429 * the global policy 6430 */ 6431 6432 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6433 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6434 6435 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6436 #ifdef SDDEBUG 6437 if (sd_force_pm_supported) { 6438 /* Force a successful result */ 6439 rval = 1; 6440 } 6441 #endif 6442 switch (rval) { 6443 case 0: 6444 /* 6445 * Not Ok to Power cycle or error in parameters passed 6446 * Would have given the advised time to consider power 6447 * cycle. Based on the new intvlp parameter we are 6448 * supposed to pretend we are busy so that pm framework 6449 * will never call our power entry point. Because of 6450 * that install a timeout handler and wait for the 6451 * recommended time to elapse so that power management 6452 * can be effective again. 6453 * 6454 * To effect this behavior, call pm_busy_component to 6455 * indicate to the framework this device is busy. 6456 * By not adjusting un_pm_count the rest of PM in 6457 * the driver will function normally, and independent 6458 * of this but because the framework is told the device 6459 * is busy it won't attempt powering down until it gets 6460 * a matching idle. The timeout handler sends this. 6461 * Note: sd_pm_entry can't be called here to do this 6462 * because sdpower may have been called as a result 6463 * of a call to pm_raise_power from within sd_pm_entry. 6464 * 6465 * If a timeout handler is already active then 6466 * don't install another. 6467 */ 6468 mutex_enter(&un->un_pm_mutex); 6469 if (un->un_pm_timeid == NULL) { 6470 un->un_pm_timeid = 6471 timeout(sd_pm_timeout_handler, 6472 un, intvlp * drv_usectohz(1000000)); 6473 mutex_exit(&un->un_pm_mutex); 6474 (void) pm_busy_component(SD_DEVINFO(un), 0); 6475 } else { 6476 mutex_exit(&un->un_pm_mutex); 6477 } 6478 if (got_semaphore_here != 0) { 6479 sema_v(&un->un_semoclose); 6480 } 6481 /* 6482 * On exit put the state back to it's original value 6483 * and broadcast to anyone waiting for the power 6484 * change completion. 6485 */ 6486 mutex_enter(SD_MUTEX(un)); 6487 un->un_state = state_before_pm; 6488 cv_broadcast(&un->un_suspend_cv); 6489 mutex_exit(SD_MUTEX(un)); 6490 6491 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6492 "trans check Failed, not ok to power cycle.\n"); 6493 6494 goto sdpower_failed; 6495 case -1: 6496 if (got_semaphore_here != 0) { 6497 sema_v(&un->un_semoclose); 6498 } 6499 /* 6500 * On exit put the state back to it's original value 6501 * and broadcast to anyone waiting for the power 6502 * change completion. 6503 */ 6504 mutex_enter(SD_MUTEX(un)); 6505 un->un_state = state_before_pm; 6506 cv_broadcast(&un->un_suspend_cv); 6507 mutex_exit(SD_MUTEX(un)); 6508 SD_TRACE(SD_LOG_IO_PM, un, 6509 "sdpower: exit, trans check command Failed.\n"); 6510 6511 goto sdpower_failed; 6512 } 6513 } 6514 6515 if (level == SD_SPINDLE_OFF) { 6516 /* 6517 * Save the last state... if the STOP FAILS we need it 6518 * for restoring 6519 */ 6520 mutex_enter(SD_MUTEX(un)); 6521 save_state = un->un_last_state; 6522 /* 6523 * There must not be any cmds. getting processed 6524 * in the driver when we get here. Power to the 6525 * device is potentially going off. 6526 */ 6527 ASSERT(un->un_ncmds_in_driver == 0); 6528 mutex_exit(SD_MUTEX(un)); 6529 6530 /* 6531 * For now suspend the device completely before spindle is 6532 * turned off 6533 */ 6534 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6535 if (got_semaphore_here != 0) { 6536 sema_v(&un->un_semoclose); 6537 } 6538 /* 6539 * On exit put the state back to it's original value 6540 * and broadcast to anyone waiting for the power 6541 * change completion. 6542 */ 6543 mutex_enter(SD_MUTEX(un)); 6544 un->un_state = state_before_pm; 6545 cv_broadcast(&un->un_suspend_cv); 6546 mutex_exit(SD_MUTEX(un)); 6547 SD_TRACE(SD_LOG_IO_PM, un, 6548 "sdpower: exit, PM suspend Failed.\n"); 6549 6550 goto sdpower_failed; 6551 } 6552 } 6553 6554 /* 6555 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6556 * close, or strategy. Dump no long uses this routine, it uses it's 6557 * own code so it can be done in polled mode. 6558 */ 6559 6560 medium_present = TRUE; 6561 6562 /* 6563 * When powering up, issue a TUR in case the device is at unit 6564 * attention. Don't do retries. Bypass the PM layer, otherwise 6565 * a deadlock on un_pm_busy_cv will occur. 6566 */ 6567 if (level == SD_SPINDLE_ON) { 6568 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6569 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6570 if (sval != 0) 6571 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6572 } 6573 6574 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6575 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6576 6577 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6578 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6579 SD_PATH_DIRECT); 6580 if (sval != 0) { 6581 if (sval == EIO) 6582 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6583 else 6584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6585 } 6586 6587 /* Command failed, check for media present. */ 6588 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6589 medium_present = FALSE; 6590 } 6591 6592 /* 6593 * The conditions of interest here are: 6594 * if a spindle off with media present fails, 6595 * then restore the state and return an error. 6596 * else if a spindle on fails, 6597 * then return an error (there's no state to restore). 6598 * In all other cases we setup for the new state 6599 * and return success. 6600 */ 6601 switch (level) { 6602 case SD_SPINDLE_OFF: 6603 if ((medium_present == TRUE) && (sval != 0)) { 6604 /* The stop command from above failed */ 6605 rval = DDI_FAILURE; 6606 /* 6607 * The stop command failed, and we have media 6608 * present. Put the level back by calling the 6609 * sd_pm_resume() and set the state back to 6610 * it's previous value. 6611 */ 6612 (void) sd_ddi_pm_resume(un); 6613 mutex_enter(SD_MUTEX(un)); 6614 un->un_last_state = save_state; 6615 mutex_exit(SD_MUTEX(un)); 6616 break; 6617 } 6618 /* 6619 * The stop command from above succeeded. 6620 */ 6621 if (un->un_f_monitor_media_state) { 6622 /* 6623 * Terminate watch thread in case of removable media 6624 * devices going into low power state. This is as per 6625 * the requirements of pm framework, otherwise commands 6626 * will be generated for the device (through watch 6627 * thread), even when the device is in low power state. 6628 */ 6629 mutex_enter(SD_MUTEX(un)); 6630 un->un_f_watcht_stopped = FALSE; 6631 if (un->un_swr_token != NULL) { 6632 opaque_t temp_token = un->un_swr_token; 6633 un->un_f_watcht_stopped = TRUE; 6634 un->un_swr_token = NULL; 6635 mutex_exit(SD_MUTEX(un)); 6636 (void) scsi_watch_request_terminate(temp_token, 6637 SCSI_WATCH_TERMINATE_ALL_WAIT); 6638 } else { 6639 mutex_exit(SD_MUTEX(un)); 6640 } 6641 } 6642 break; 6643 6644 default: /* The level requested is spindle on... */ 6645 /* 6646 * Legacy behavior: return success on a failed spinup 6647 * if there is no media in the drive. 6648 * Do this by looking at medium_present here. 6649 */ 6650 if ((sval != 0) && medium_present) { 6651 /* The start command from above failed */ 6652 rval = DDI_FAILURE; 6653 break; 6654 } 6655 /* 6656 * The start command from above succeeded 6657 * Resume the devices now that we have 6658 * started the disks 6659 */ 6660 (void) sd_ddi_pm_resume(un); 6661 6662 /* 6663 * Resume the watch thread since it was suspended 6664 * when the device went into low power mode. 6665 */ 6666 if (un->un_f_monitor_media_state) { 6667 mutex_enter(SD_MUTEX(un)); 6668 if (un->un_f_watcht_stopped == TRUE) { 6669 opaque_t temp_token; 6670 6671 un->un_f_watcht_stopped = FALSE; 6672 mutex_exit(SD_MUTEX(un)); 6673 temp_token = scsi_watch_request_submit( 6674 SD_SCSI_DEVP(un), 6675 sd_check_media_time, 6676 SENSE_LENGTH, sd_media_watch_cb, 6677 (caddr_t)dev); 6678 mutex_enter(SD_MUTEX(un)); 6679 un->un_swr_token = temp_token; 6680 } 6681 mutex_exit(SD_MUTEX(un)); 6682 } 6683 } 6684 if (got_semaphore_here != 0) { 6685 sema_v(&un->un_semoclose); 6686 } 6687 /* 6688 * On exit put the state back to it's original value 6689 * and broadcast to anyone waiting for the power 6690 * change completion. 6691 */ 6692 mutex_enter(SD_MUTEX(un)); 6693 un->un_state = state_before_pm; 6694 cv_broadcast(&un->un_suspend_cv); 6695 mutex_exit(SD_MUTEX(un)); 6696 6697 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6698 6699 sd_ssc_fini(ssc); 6700 return (rval); 6701 6702 sdpower_failed: 6703 6704 sd_ssc_fini(ssc); 6705 return (DDI_FAILURE); 6706 } 6707 6708 6709 6710 /* 6711 * Function: sdattach 6712 * 6713 * Description: Driver's attach(9e) entry point function. 6714 * 6715 * Arguments: devi - opaque device info handle 6716 * cmd - attach type 6717 * 6718 * Return Code: DDI_SUCCESS 6719 * DDI_FAILURE 6720 * 6721 * Context: Kernel thread context 6722 */ 6723 6724 static int 6725 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6726 { 6727 switch (cmd) { 6728 case DDI_ATTACH: 6729 return (sd_unit_attach(devi)); 6730 case DDI_RESUME: 6731 return (sd_ddi_resume(devi)); 6732 default: 6733 break; 6734 } 6735 return (DDI_FAILURE); 6736 } 6737 6738 6739 /* 6740 * Function: sddetach 6741 * 6742 * Description: Driver's detach(9E) entry point function. 6743 * 6744 * Arguments: devi - opaque device info handle 6745 * cmd - detach type 6746 * 6747 * Return Code: DDI_SUCCESS 6748 * DDI_FAILURE 6749 * 6750 * Context: Kernel thread context 6751 */ 6752 6753 static int 6754 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6755 { 6756 switch (cmd) { 6757 case DDI_DETACH: 6758 return (sd_unit_detach(devi)); 6759 case DDI_SUSPEND: 6760 return (sd_ddi_suspend(devi)); 6761 default: 6762 break; 6763 } 6764 return (DDI_FAILURE); 6765 } 6766 6767 6768 /* 6769 * Function: sd_sync_with_callback 6770 * 6771 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6772 * state while the callback routine is active. 6773 * 6774 * Arguments: un: softstate structure for the instance 6775 * 6776 * Context: Kernel thread context 6777 */ 6778 6779 static void 6780 sd_sync_with_callback(struct sd_lun *un) 6781 { 6782 ASSERT(un != NULL); 6783 6784 mutex_enter(SD_MUTEX(un)); 6785 6786 ASSERT(un->un_in_callback >= 0); 6787 6788 while (un->un_in_callback > 0) { 6789 mutex_exit(SD_MUTEX(un)); 6790 delay(2); 6791 mutex_enter(SD_MUTEX(un)); 6792 } 6793 6794 mutex_exit(SD_MUTEX(un)); 6795 } 6796 6797 /* 6798 * Function: sd_unit_attach 6799 * 6800 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6801 * the soft state structure for the device and performs 6802 * all necessary structure and device initializations. 6803 * 6804 * Arguments: devi: the system's dev_info_t for the device. 6805 * 6806 * Return Code: DDI_SUCCESS if attach is successful. 6807 * DDI_FAILURE if any part of the attach fails. 6808 * 6809 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6810 * Kernel thread context only. Can sleep. 6811 */ 6812 6813 static int 6814 sd_unit_attach(dev_info_t *devi) 6815 { 6816 struct scsi_device *devp; 6817 struct sd_lun *un; 6818 char *variantp; 6819 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6820 int instance; 6821 int rval; 6822 int wc_enabled; 6823 int tgt; 6824 uint64_t capacity; 6825 uint_t lbasize = 0; 6826 dev_info_t *pdip = ddi_get_parent(devi); 6827 int offbyone = 0; 6828 int geom_label_valid = 0; 6829 sd_ssc_t *ssc; 6830 int status; 6831 struct sd_fm_internal *sfip = NULL; 6832 #if defined(__sparc) 6833 int max_xfer_size; 6834 #endif 6835 6836 /* 6837 * Retrieve the target driver's private data area. This was set 6838 * up by the HBA. 6839 */ 6840 devp = ddi_get_driver_private(devi); 6841 6842 /* 6843 * Retrieve the target ID of the device. 6844 */ 6845 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6846 SCSI_ADDR_PROP_TARGET, -1); 6847 6848 /* 6849 * Since we have no idea what state things were left in by the last 6850 * user of the device, set up some 'default' settings, ie. turn 'em 6851 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6852 * Do this before the scsi_probe, which sends an inquiry. 6853 * This is a fix for bug (4430280). 6854 * Of special importance is wide-xfer. The drive could have been left 6855 * in wide transfer mode by the last driver to communicate with it, 6856 * this includes us. If that's the case, and if the following is not 6857 * setup properly or we don't re-negotiate with the drive prior to 6858 * transferring data to/from the drive, it causes bus parity errors, 6859 * data overruns, and unexpected interrupts. This first occurred when 6860 * the fix for bug (4378686) was made. 6861 */ 6862 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6863 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6864 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6865 6866 /* 6867 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6868 * on a target. Setting it per lun instance actually sets the 6869 * capability of this target, which affects those luns already 6870 * attached on the same target. So during attach, we can only disable 6871 * this capability only when no other lun has been attached on this 6872 * target. By doing this, we assume a target has the same tagged-qing 6873 * capability for every lun. The condition can be removed when HBA 6874 * is changed to support per lun based tagged-qing capability. 6875 */ 6876 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6877 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6878 } 6879 6880 /* 6881 * Use scsi_probe() to issue an INQUIRY command to the device. 6882 * This call will allocate and fill in the scsi_inquiry structure 6883 * and point the sd_inq member of the scsi_device structure to it. 6884 * If the attach succeeds, then this memory will not be de-allocated 6885 * (via scsi_unprobe()) until the instance is detached. 6886 */ 6887 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6888 goto probe_failed; 6889 } 6890 6891 /* 6892 * Check the device type as specified in the inquiry data and 6893 * claim it if it is of a type that we support. 6894 */ 6895 switch (devp->sd_inq->inq_dtype) { 6896 case DTYPE_DIRECT: 6897 break; 6898 case DTYPE_RODIRECT: 6899 break; 6900 case DTYPE_OPTICAL: 6901 break; 6902 case DTYPE_NOTPRESENT: 6903 default: 6904 /* Unsupported device type; fail the attach. */ 6905 goto probe_failed; 6906 } 6907 6908 /* 6909 * Allocate the soft state structure for this unit. 6910 * 6911 * We rely upon this memory being set to all zeroes by 6912 * ddi_soft_state_zalloc(). We assume that any member of the 6913 * soft state structure that is not explicitly initialized by 6914 * this routine will have a value of zero. 6915 */ 6916 instance = ddi_get_instance(devp->sd_dev); 6917 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6918 goto probe_failed; 6919 } 6920 6921 /* 6922 * Retrieve a pointer to the newly-allocated soft state. 6923 * 6924 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6925 * was successful, unless something has gone horribly wrong and the 6926 * ddi's soft state internals are corrupt (in which case it is 6927 * probably better to halt here than just fail the attach....) 6928 */ 6929 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6930 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6931 instance); 6932 /*NOTREACHED*/ 6933 } 6934 6935 /* 6936 * Link the back ptr of the driver soft state to the scsi_device 6937 * struct for this lun. 6938 * Save a pointer to the softstate in the driver-private area of 6939 * the scsi_device struct. 6940 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6941 * we first set un->un_sd below. 6942 */ 6943 un->un_sd = devp; 6944 devp->sd_private = (opaque_t)un; 6945 6946 /* 6947 * The following must be after devp is stored in the soft state struct. 6948 */ 6949 #ifdef SDDEBUG 6950 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6951 "%s_unit_attach: un:0x%p instance:%d\n", 6952 ddi_driver_name(devi), un, instance); 6953 #endif 6954 6955 /* 6956 * Set up the device type and node type (for the minor nodes). 6957 * By default we assume that the device can at least support the 6958 * Common Command Set. Call it a CD-ROM if it reports itself 6959 * as a RODIRECT device. 6960 */ 6961 switch (devp->sd_inq->inq_dtype) { 6962 case DTYPE_RODIRECT: 6963 un->un_node_type = DDI_NT_CD_CHAN; 6964 un->un_ctype = CTYPE_CDROM; 6965 break; 6966 case DTYPE_OPTICAL: 6967 un->un_node_type = DDI_NT_BLOCK_CHAN; 6968 un->un_ctype = CTYPE_ROD; 6969 break; 6970 default: 6971 un->un_node_type = DDI_NT_BLOCK_CHAN; 6972 un->un_ctype = CTYPE_CCS; 6973 break; 6974 } 6975 6976 /* 6977 * Try to read the interconnect type from the HBA. 6978 * 6979 * Note: This driver is currently compiled as two binaries, a parallel 6980 * scsi version (sd) and a fibre channel version (ssd). All functional 6981 * differences are determined at compile time. In the future a single 6982 * binary will be provided and the interconnect type will be used to 6983 * differentiate between fibre and parallel scsi behaviors. At that time 6984 * it will be necessary for all fibre channel HBAs to support this 6985 * property. 6986 * 6987 * set un_f_is_fiber to TRUE ( default fiber ) 6988 */ 6989 un->un_f_is_fibre = TRUE; 6990 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6991 case INTERCONNECT_SSA: 6992 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6993 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6994 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6995 break; 6996 case INTERCONNECT_PARALLEL: 6997 un->un_f_is_fibre = FALSE; 6998 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6999 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7000 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7001 break; 7002 case INTERCONNECT_SATA: 7003 un->un_f_is_fibre = FALSE; 7004 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7005 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7006 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7007 break; 7008 case INTERCONNECT_FIBRE: 7009 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7010 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7011 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7012 break; 7013 case INTERCONNECT_FABRIC: 7014 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7015 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7017 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7018 break; 7019 default: 7020 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7021 /* 7022 * The HBA does not support the "interconnect-type" property 7023 * (or did not provide a recognized type). 7024 * 7025 * Note: This will be obsoleted when a single fibre channel 7026 * and parallel scsi driver is delivered. In the meantime the 7027 * interconnect type will be set to the platform default.If that 7028 * type is not parallel SCSI, it means that we should be 7029 * assuming "ssd" semantics. However, here this also means that 7030 * the FC HBA is not supporting the "interconnect-type" property 7031 * like we expect it to, so log this occurrence. 7032 */ 7033 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7034 if (!SD_IS_PARALLEL_SCSI(un)) { 7035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7036 "sd_unit_attach: un:0x%p Assuming " 7037 "INTERCONNECT_FIBRE\n", un); 7038 } else { 7039 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7040 "sd_unit_attach: un:0x%p Assuming " 7041 "INTERCONNECT_PARALLEL\n", un); 7042 un->un_f_is_fibre = FALSE; 7043 } 7044 #else 7045 /* 7046 * Note: This source will be implemented when a single fibre 7047 * channel and parallel scsi driver is delivered. The default 7048 * will be to assume that if a device does not support the 7049 * "interconnect-type" property it is a parallel SCSI HBA and 7050 * we will set the interconnect type for parallel scsi. 7051 */ 7052 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7053 un->un_f_is_fibre = FALSE; 7054 #endif 7055 break; 7056 } 7057 7058 if (un->un_f_is_fibre == TRUE) { 7059 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7060 SCSI_VERSION_3) { 7061 switch (un->un_interconnect_type) { 7062 case SD_INTERCONNECT_FIBRE: 7063 case SD_INTERCONNECT_SSA: 7064 un->un_node_type = DDI_NT_BLOCK_WWN; 7065 break; 7066 default: 7067 break; 7068 } 7069 } 7070 } 7071 7072 /* 7073 * Initialize the Request Sense command for the target 7074 */ 7075 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7076 goto alloc_rqs_failed; 7077 } 7078 7079 /* 7080 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7081 * with separate binary for sd and ssd. 7082 * 7083 * x86 has 1 binary, un_retry_count is set base on connection type. 7084 * The hardcoded values will go away when Sparc uses 1 binary 7085 * for sd and ssd. This hardcoded values need to match 7086 * SD_RETRY_COUNT in sddef.h 7087 * The value used is base on interconnect type. 7088 * fibre = 3, parallel = 5 7089 */ 7090 #if defined(__i386) || defined(__amd64) 7091 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7092 #else 7093 un->un_retry_count = SD_RETRY_COUNT; 7094 #endif 7095 7096 /* 7097 * Set the per disk retry count to the default number of retries 7098 * for disks and CDROMs. This value can be overridden by the 7099 * disk property list or an entry in sd.conf. 7100 */ 7101 un->un_notready_retry_count = 7102 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7103 : DISK_NOT_READY_RETRY_COUNT(un); 7104 7105 /* 7106 * Set the busy retry count to the default value of un_retry_count. 7107 * This can be overridden by entries in sd.conf or the device 7108 * config table. 7109 */ 7110 un->un_busy_retry_count = un->un_retry_count; 7111 7112 /* 7113 * Init the reset threshold for retries. This number determines 7114 * how many retries must be performed before a reset can be issued 7115 * (for certain error conditions). This can be overridden by entries 7116 * in sd.conf or the device config table. 7117 */ 7118 un->un_reset_retry_count = (un->un_retry_count / 2); 7119 7120 /* 7121 * Set the victim_retry_count to the default un_retry_count 7122 */ 7123 un->un_victim_retry_count = (2 * un->un_retry_count); 7124 7125 /* 7126 * Set the reservation release timeout to the default value of 7127 * 5 seconds. This can be overridden by entries in ssd.conf or the 7128 * device config table. 7129 */ 7130 un->un_reserve_release_time = 5; 7131 7132 /* 7133 * Set up the default maximum transfer size. Note that this may 7134 * get updated later in the attach, when setting up default wide 7135 * operations for disks. 7136 */ 7137 #if defined(__i386) || defined(__amd64) 7138 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7139 un->un_partial_dma_supported = 1; 7140 #else 7141 un->un_max_xfer_size = (uint_t)maxphys; 7142 #endif 7143 7144 /* 7145 * Get "allow bus device reset" property (defaults to "enabled" if 7146 * the property was not defined). This is to disable bus resets for 7147 * certain kinds of error recovery. Note: In the future when a run-time 7148 * fibre check is available the soft state flag should default to 7149 * enabled. 7150 */ 7151 if (un->un_f_is_fibre == TRUE) { 7152 un->un_f_allow_bus_device_reset = TRUE; 7153 } else { 7154 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7155 "allow-bus-device-reset", 1) != 0) { 7156 un->un_f_allow_bus_device_reset = TRUE; 7157 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7158 "sd_unit_attach: un:0x%p Bus device reset " 7159 "enabled\n", un); 7160 } else { 7161 un->un_f_allow_bus_device_reset = FALSE; 7162 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7163 "sd_unit_attach: un:0x%p Bus device reset " 7164 "disabled\n", un); 7165 } 7166 } 7167 7168 /* 7169 * Check if this is an ATAPI device. ATAPI devices use Group 1 7170 * Read/Write commands and Group 2 Mode Sense/Select commands. 7171 * 7172 * Note: The "obsolete" way of doing this is to check for the "atapi" 7173 * property. The new "variant" property with a value of "atapi" has been 7174 * introduced so that future 'variants' of standard SCSI behavior (like 7175 * atapi) could be specified by the underlying HBA drivers by supplying 7176 * a new value for the "variant" property, instead of having to define a 7177 * new property. 7178 */ 7179 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7180 un->un_f_cfg_is_atapi = TRUE; 7181 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7182 "sd_unit_attach: un:0x%p Atapi device\n", un); 7183 } 7184 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7185 &variantp) == DDI_PROP_SUCCESS) { 7186 if (strcmp(variantp, "atapi") == 0) { 7187 un->un_f_cfg_is_atapi = TRUE; 7188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7189 "sd_unit_attach: un:0x%p Atapi device\n", un); 7190 } 7191 ddi_prop_free(variantp); 7192 } 7193 7194 un->un_cmd_timeout = SD_IO_TIME; 7195 7196 un->un_busy_timeout = SD_BSY_TIMEOUT; 7197 7198 /* Info on current states, statuses, etc. (Updated frequently) */ 7199 un->un_state = SD_STATE_NORMAL; 7200 un->un_last_state = SD_STATE_NORMAL; 7201 7202 /* Control & status info for command throttling */ 7203 un->un_throttle = sd_max_throttle; 7204 un->un_saved_throttle = sd_max_throttle; 7205 un->un_min_throttle = sd_min_throttle; 7206 7207 if (un->un_f_is_fibre == TRUE) { 7208 un->un_f_use_adaptive_throttle = TRUE; 7209 } else { 7210 un->un_f_use_adaptive_throttle = FALSE; 7211 } 7212 7213 /* Removable media support. */ 7214 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7215 un->un_mediastate = DKIO_NONE; 7216 un->un_specified_mediastate = DKIO_NONE; 7217 7218 /* CVs for suspend/resume (PM or DR) */ 7219 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7220 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7221 7222 /* Power management support. */ 7223 un->un_power_level = SD_SPINDLE_UNINIT; 7224 7225 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7226 un->un_f_wcc_inprog = 0; 7227 7228 /* 7229 * The open/close semaphore is used to serialize threads executing 7230 * in the driver's open & close entry point routines for a given 7231 * instance. 7232 */ 7233 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7234 7235 /* 7236 * The conf file entry and softstate variable is a forceful override, 7237 * meaning a non-zero value must be entered to change the default. 7238 */ 7239 un->un_f_disksort_disabled = FALSE; 7240 7241 /* 7242 * Retrieve the properties from the static driver table or the driver 7243 * configuration file (.conf) for this unit and update the soft state 7244 * for the device as needed for the indicated properties. 7245 * Note: the property configuration needs to occur here as some of the 7246 * following routines may have dependencies on soft state flags set 7247 * as part of the driver property configuration. 7248 */ 7249 sd_read_unit_properties(un); 7250 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7251 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7252 7253 /* 7254 * Only if a device has "hotpluggable" property, it is 7255 * treated as hotpluggable device. Otherwise, it is 7256 * regarded as non-hotpluggable one. 7257 */ 7258 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7259 -1) != -1) { 7260 un->un_f_is_hotpluggable = TRUE; 7261 } 7262 7263 /* 7264 * set unit's attributes(flags) according to "hotpluggable" and 7265 * RMB bit in INQUIRY data. 7266 */ 7267 sd_set_unit_attributes(un, devi); 7268 7269 /* 7270 * By default, we mark the capacity, lbasize, and geometry 7271 * as invalid. Only if we successfully read a valid capacity 7272 * will we update the un_blockcount and un_tgt_blocksize with the 7273 * valid values (the geometry will be validated later). 7274 */ 7275 un->un_f_blockcount_is_valid = FALSE; 7276 un->un_f_tgt_blocksize_is_valid = FALSE; 7277 7278 /* 7279 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7280 * otherwise. 7281 */ 7282 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7283 un->un_blockcount = 0; 7284 7285 /* 7286 * Set up the per-instance info needed to determine the correct 7287 * CDBs and other info for issuing commands to the target. 7288 */ 7289 sd_init_cdb_limits(un); 7290 7291 /* 7292 * Set up the IO chains to use, based upon the target type. 7293 */ 7294 if (un->un_f_non_devbsize_supported) { 7295 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7296 } else { 7297 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7298 } 7299 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7300 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7301 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7302 7303 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7304 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7305 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7306 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7307 7308 7309 if (ISCD(un)) { 7310 un->un_additional_codes = sd_additional_codes; 7311 } else { 7312 un->un_additional_codes = NULL; 7313 } 7314 7315 /* 7316 * Create the kstats here so they can be available for attach-time 7317 * routines that send commands to the unit (either polled or via 7318 * sd_send_scsi_cmd). 7319 * 7320 * Note: This is a critical sequence that needs to be maintained: 7321 * 1) Instantiate the kstats here, before any routines using the 7322 * iopath (i.e. sd_send_scsi_cmd). 7323 * 2) Instantiate and initialize the partition stats 7324 * (sd_set_pstats). 7325 * 3) Initialize the error stats (sd_set_errstats), following 7326 * sd_validate_geometry(),sd_register_devid(), 7327 * and sd_cache_control(). 7328 */ 7329 7330 un->un_stats = kstat_create(sd_label, instance, 7331 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7332 if (un->un_stats != NULL) { 7333 un->un_stats->ks_lock = SD_MUTEX(un); 7334 kstat_install(un->un_stats); 7335 } 7336 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7337 "sd_unit_attach: un:0x%p un_stats created\n", un); 7338 7339 sd_create_errstats(un, instance); 7340 if (un->un_errstats == NULL) { 7341 goto create_errstats_failed; 7342 } 7343 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7344 "sd_unit_attach: un:0x%p errstats created\n", un); 7345 7346 /* 7347 * The following if/else code was relocated here from below as part 7348 * of the fix for bug (4430280). However with the default setup added 7349 * on entry to this routine, it's no longer absolutely necessary for 7350 * this to be before the call to sd_spin_up_unit. 7351 */ 7352 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7353 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7354 (devp->sd_inq->inq_ansi == 5)) && 7355 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7356 7357 /* 7358 * If tagged queueing is supported by the target 7359 * and by the host adapter then we will enable it 7360 */ 7361 un->un_tagflags = 0; 7362 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7363 (un->un_f_arq_enabled == TRUE)) { 7364 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7365 1, 1) == 1) { 7366 un->un_tagflags = FLAG_STAG; 7367 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7368 "sd_unit_attach: un:0x%p tag queueing " 7369 "enabled\n", un); 7370 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7371 "untagged-qing", 0) == 1) { 7372 un->un_f_opt_queueing = TRUE; 7373 un->un_saved_throttle = un->un_throttle = 7374 min(un->un_throttle, 3); 7375 } else { 7376 un->un_f_opt_queueing = FALSE; 7377 un->un_saved_throttle = un->un_throttle = 1; 7378 } 7379 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7380 == 1) && (un->un_f_arq_enabled == TRUE)) { 7381 /* The Host Adapter supports internal queueing. */ 7382 un->un_f_opt_queueing = TRUE; 7383 un->un_saved_throttle = un->un_throttle = 7384 min(un->un_throttle, 3); 7385 } else { 7386 un->un_f_opt_queueing = FALSE; 7387 un->un_saved_throttle = un->un_throttle = 1; 7388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7389 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7390 } 7391 7392 /* 7393 * Enable large transfers for SATA/SAS drives 7394 */ 7395 if (SD_IS_SERIAL(un)) { 7396 un->un_max_xfer_size = 7397 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7398 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7399 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7400 "sd_unit_attach: un:0x%p max transfer " 7401 "size=0x%x\n", un, un->un_max_xfer_size); 7402 7403 } 7404 7405 /* Setup or tear down default wide operations for disks */ 7406 7407 /* 7408 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7409 * and "ssd_max_xfer_size" to exist simultaneously on the same 7410 * system and be set to different values. In the future this 7411 * code may need to be updated when the ssd module is 7412 * obsoleted and removed from the system. (4299588) 7413 */ 7414 if (SD_IS_PARALLEL_SCSI(un) && 7415 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7416 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7417 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7418 1, 1) == 1) { 7419 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7420 "sd_unit_attach: un:0x%p Wide Transfer " 7421 "enabled\n", un); 7422 } 7423 7424 /* 7425 * If tagged queuing has also been enabled, then 7426 * enable large xfers 7427 */ 7428 if (un->un_saved_throttle == sd_max_throttle) { 7429 un->un_max_xfer_size = 7430 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7431 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7432 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7433 "sd_unit_attach: un:0x%p max transfer " 7434 "size=0x%x\n", un, un->un_max_xfer_size); 7435 } 7436 } else { 7437 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7438 0, 1) == 1) { 7439 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7440 "sd_unit_attach: un:0x%p " 7441 "Wide Transfer disabled\n", un); 7442 } 7443 } 7444 } else { 7445 un->un_tagflags = FLAG_STAG; 7446 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7447 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7448 } 7449 7450 /* 7451 * If this target supports LUN reset, try to enable it. 7452 */ 7453 if (un->un_f_lun_reset_enabled) { 7454 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7455 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7456 "un:0x%p lun_reset capability set\n", un); 7457 } else { 7458 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7459 "un:0x%p lun-reset capability not set\n", un); 7460 } 7461 } 7462 7463 /* 7464 * Adjust the maximum transfer size. This is to fix 7465 * the problem of partial DMA support on SPARC. Some 7466 * HBA driver, like aac, has very small dma_attr_maxxfer 7467 * size, which requires partial DMA support on SPARC. 7468 * In the future the SPARC pci nexus driver may solve 7469 * the problem instead of this fix. 7470 */ 7471 #if defined(__sparc) 7472 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7473 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7474 un->un_max_xfer_size = max_xfer_size; 7475 un->un_partial_dma_supported = 1; 7476 } 7477 #endif 7478 7479 /* 7480 * Set PKT_DMA_PARTIAL flag. 7481 */ 7482 if (un->un_partial_dma_supported == 1) { 7483 un->un_pkt_flags = PKT_DMA_PARTIAL; 7484 } else { 7485 un->un_pkt_flags = 0; 7486 } 7487 7488 /* Initialize sd_ssc_t for internal uscsi commands */ 7489 ssc = sd_ssc_init(un); 7490 scsi_fm_init(devp); 7491 7492 /* 7493 * Allocate memory for SCSI FMA stuffs. 7494 */ 7495 un->un_fm_private = 7496 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7497 sfip = (struct sd_fm_internal *)un->un_fm_private; 7498 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7499 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7500 sfip->fm_ssc.ssc_un = un; 7501 7502 /* 7503 * At this point in the attach, we have enough info in the 7504 * soft state to be able to issue commands to the target. 7505 * 7506 * All command paths used below MUST issue their commands as 7507 * SD_PATH_DIRECT. This is important as intermediate layers 7508 * are not all initialized yet (such as PM). 7509 */ 7510 7511 /* 7512 * Send a TEST UNIT READY command to the device. This should clear 7513 * any outstanding UNIT ATTENTION that may be present. 7514 * 7515 * Note: Don't check for success, just track if there is a reservation, 7516 * this is a throw away command to clear any unit attentions. 7517 * 7518 * Note: This MUST be the first command issued to the target during 7519 * attach to ensure power on UNIT ATTENTIONS are cleared. 7520 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7521 * with attempts at spinning up a device with no media. 7522 */ 7523 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7524 if (status != 0) { 7525 if (status == EACCES) 7526 reservation_flag = SD_TARGET_IS_RESERVED; 7527 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7528 } 7529 7530 /* 7531 * If the device is NOT a removable media device, attempt to spin 7532 * it up (using the START_STOP_UNIT command) and read its capacity 7533 * (using the READ CAPACITY command). Note, however, that either 7534 * of these could fail and in some cases we would continue with 7535 * the attach despite the failure (see below). 7536 */ 7537 if (un->un_f_descr_format_supported) { 7538 7539 switch (sd_spin_up_unit(ssc)) { 7540 case 0: 7541 /* 7542 * Spin-up was successful; now try to read the 7543 * capacity. If successful then save the results 7544 * and mark the capacity & lbasize as valid. 7545 */ 7546 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7547 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7548 7549 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7550 &lbasize, SD_PATH_DIRECT); 7551 7552 switch (status) { 7553 case 0: { 7554 if (capacity > DK_MAX_BLOCKS) { 7555 #ifdef _LP64 7556 if ((capacity + 1) > 7557 SD_GROUP1_MAX_ADDRESS) { 7558 /* 7559 * Enable descriptor format 7560 * sense data so that we can 7561 * get 64 bit sense data 7562 * fields. 7563 */ 7564 sd_enable_descr_sense(ssc); 7565 } 7566 #else 7567 /* 32-bit kernels can't handle this */ 7568 scsi_log(SD_DEVINFO(un), 7569 sd_label, CE_WARN, 7570 "disk has %llu blocks, which " 7571 "is too large for a 32-bit " 7572 "kernel", capacity); 7573 7574 #if defined(__i386) || defined(__amd64) 7575 /* 7576 * 1TB disk was treated as (1T - 512)B 7577 * in the past, so that it might have 7578 * valid VTOC and solaris partitions, 7579 * we have to allow it to continue to 7580 * work. 7581 */ 7582 if (capacity -1 > DK_MAX_BLOCKS) 7583 #endif 7584 goto spinup_failed; 7585 #endif 7586 } 7587 7588 /* 7589 * Here it's not necessary to check the case: 7590 * the capacity of the device is bigger than 7591 * what the max hba cdb can support. Because 7592 * sd_send_scsi_READ_CAPACITY will retrieve 7593 * the capacity by sending USCSI command, which 7594 * is constrained by the max hba cdb. Actually, 7595 * sd_send_scsi_READ_CAPACITY will return 7596 * EINVAL when using bigger cdb than required 7597 * cdb length. Will handle this case in 7598 * "case EINVAL". 7599 */ 7600 7601 /* 7602 * The following relies on 7603 * sd_send_scsi_READ_CAPACITY never 7604 * returning 0 for capacity and/or lbasize. 7605 */ 7606 sd_update_block_info(un, lbasize, capacity); 7607 7608 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7609 "sd_unit_attach: un:0x%p capacity = %ld " 7610 "blocks; lbasize= %ld.\n", un, 7611 un->un_blockcount, un->un_tgt_blocksize); 7612 7613 break; 7614 } 7615 case EINVAL: 7616 /* 7617 * In the case where the max-cdb-length property 7618 * is smaller than the required CDB length for 7619 * a SCSI device, a target driver can fail to 7620 * attach to that device. 7621 */ 7622 scsi_log(SD_DEVINFO(un), 7623 sd_label, CE_WARN, 7624 "disk capacity is too large " 7625 "for current cdb length"); 7626 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7627 7628 goto spinup_failed; 7629 case EACCES: 7630 /* 7631 * Should never get here if the spin-up 7632 * succeeded, but code it in anyway. 7633 * From here, just continue with the attach... 7634 */ 7635 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7636 "sd_unit_attach: un:0x%p " 7637 "sd_send_scsi_READ_CAPACITY " 7638 "returned reservation conflict\n", un); 7639 reservation_flag = SD_TARGET_IS_RESERVED; 7640 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7641 break; 7642 default: 7643 /* 7644 * Likewise, should never get here if the 7645 * spin-up succeeded. Just continue with 7646 * the attach... 7647 */ 7648 if (status == EIO) 7649 sd_ssc_assessment(ssc, 7650 SD_FMT_STATUS_CHECK); 7651 else 7652 sd_ssc_assessment(ssc, 7653 SD_FMT_IGNORE); 7654 break; 7655 } 7656 break; 7657 case EACCES: 7658 /* 7659 * Device is reserved by another host. In this case 7660 * we could not spin it up or read the capacity, but 7661 * we continue with the attach anyway. 7662 */ 7663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7664 "sd_unit_attach: un:0x%p spin-up reservation " 7665 "conflict.\n", un); 7666 reservation_flag = SD_TARGET_IS_RESERVED; 7667 break; 7668 default: 7669 /* Fail the attach if the spin-up failed. */ 7670 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7671 "sd_unit_attach: un:0x%p spin-up failed.", un); 7672 goto spinup_failed; 7673 } 7674 7675 } 7676 7677 /* 7678 * Check to see if this is a MMC drive 7679 */ 7680 if (ISCD(un)) { 7681 sd_set_mmc_caps(ssc); 7682 } 7683 7684 7685 /* 7686 * Add a zero-length attribute to tell the world we support 7687 * kernel ioctls (for layered drivers) 7688 */ 7689 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7690 DDI_KERNEL_IOCTL, NULL, 0); 7691 7692 /* 7693 * Add a boolean property to tell the world we support 7694 * the B_FAILFAST flag (for layered drivers) 7695 */ 7696 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7697 "ddi-failfast-supported", NULL, 0); 7698 7699 /* 7700 * Initialize power management 7701 */ 7702 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7703 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7704 sd_setup_pm(ssc, devi); 7705 if (un->un_f_pm_is_enabled == FALSE) { 7706 /* 7707 * For performance, point to a jump table that does 7708 * not include pm. 7709 * The direct and priority chains don't change with PM. 7710 * 7711 * Note: this is currently done based on individual device 7712 * capabilities. When an interface for determining system 7713 * power enabled state becomes available, or when additional 7714 * layers are added to the command chain, these values will 7715 * have to be re-evaluated for correctness. 7716 */ 7717 if (un->un_f_non_devbsize_supported) { 7718 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7719 } else { 7720 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7721 } 7722 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7723 } 7724 7725 /* 7726 * This property is set to 0 by HA software to avoid retries 7727 * on a reserved disk. (The preferred property name is 7728 * "retry-on-reservation-conflict") (1189689) 7729 * 7730 * Note: The use of a global here can have unintended consequences. A 7731 * per instance variable is preferable to match the capabilities of 7732 * different underlying hba's (4402600) 7733 */ 7734 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7735 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7736 sd_retry_on_reservation_conflict); 7737 if (sd_retry_on_reservation_conflict != 0) { 7738 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7739 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7740 sd_retry_on_reservation_conflict); 7741 } 7742 7743 /* Set up options for QFULL handling. */ 7744 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7745 "qfull-retries", -1)) != -1) { 7746 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7747 rval, 1); 7748 } 7749 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7750 "qfull-retry-interval", -1)) != -1) { 7751 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7752 rval, 1); 7753 } 7754 7755 /* 7756 * This just prints a message that announces the existence of the 7757 * device. The message is always printed in the system logfile, but 7758 * only appears on the console if the system is booted with the 7759 * -v (verbose) argument. 7760 */ 7761 ddi_report_dev(devi); 7762 7763 un->un_mediastate = DKIO_NONE; 7764 7765 cmlb_alloc_handle(&un->un_cmlbhandle); 7766 7767 #if defined(__i386) || defined(__amd64) 7768 /* 7769 * On x86, compensate for off-by-1 legacy error 7770 */ 7771 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7772 (lbasize == un->un_sys_blocksize)) 7773 offbyone = CMLB_OFF_BY_ONE; 7774 #endif 7775 7776 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7777 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7778 un->un_node_type, offbyone, un->un_cmlbhandle, 7779 (void *)SD_PATH_DIRECT) != 0) { 7780 goto cmlb_attach_failed; 7781 } 7782 7783 7784 /* 7785 * Read and validate the device's geometry (ie, disk label) 7786 * A new unformatted drive will not have a valid geometry, but 7787 * the driver needs to successfully attach to this device so 7788 * the drive can be formatted via ioctls. 7789 */ 7790 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7791 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7792 7793 mutex_enter(SD_MUTEX(un)); 7794 7795 /* 7796 * Read and initialize the devid for the unit. 7797 */ 7798 if (un->un_f_devid_supported) { 7799 sd_register_devid(ssc, devi, reservation_flag); 7800 } 7801 mutex_exit(SD_MUTEX(un)); 7802 7803 #if (defined(__fibre)) 7804 /* 7805 * Register callbacks for fibre only. You can't do this solely 7806 * on the basis of the devid_type because this is hba specific. 7807 * We need to query our hba capabilities to find out whether to 7808 * register or not. 7809 */ 7810 if (un->un_f_is_fibre) { 7811 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7812 sd_init_event_callbacks(un); 7813 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7814 "sd_unit_attach: un:0x%p event callbacks inserted", 7815 un); 7816 } 7817 } 7818 #endif 7819 7820 if (un->un_f_opt_disable_cache == TRUE) { 7821 /* 7822 * Disable both read cache and write cache. This is 7823 * the historic behavior of the keywords in the config file. 7824 */ 7825 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7826 0) { 7827 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7828 "sd_unit_attach: un:0x%p Could not disable " 7829 "caching", un); 7830 goto devid_failed; 7831 } 7832 } 7833 7834 /* 7835 * Check the value of the WCE bit now and 7836 * set un_f_write_cache_enabled accordingly. 7837 */ 7838 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7839 mutex_enter(SD_MUTEX(un)); 7840 un->un_f_write_cache_enabled = (wc_enabled != 0); 7841 mutex_exit(SD_MUTEX(un)); 7842 7843 /* 7844 * Check the value of the NV_SUP bit and set 7845 * un_f_suppress_cache_flush accordingly. 7846 */ 7847 sd_get_nv_sup(ssc); 7848 7849 /* 7850 * Find out what type of reservation this disk supports. 7851 */ 7852 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7853 7854 switch (status) { 7855 case 0: 7856 /* 7857 * SCSI-3 reservations are supported. 7858 */ 7859 un->un_reservation_type = SD_SCSI3_RESERVATION; 7860 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7861 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7862 break; 7863 case ENOTSUP: 7864 /* 7865 * The PERSISTENT RESERVE IN command would not be recognized by 7866 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7867 */ 7868 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7869 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7870 un->un_reservation_type = SD_SCSI2_RESERVATION; 7871 7872 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7873 break; 7874 default: 7875 /* 7876 * default to SCSI-3 reservations 7877 */ 7878 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7879 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7880 un->un_reservation_type = SD_SCSI3_RESERVATION; 7881 7882 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7883 break; 7884 } 7885 7886 /* 7887 * Set the pstat and error stat values here, so data obtained during the 7888 * previous attach-time routines is available. 7889 * 7890 * Note: This is a critical sequence that needs to be maintained: 7891 * 1) Instantiate the kstats before any routines using the iopath 7892 * (i.e. sd_send_scsi_cmd). 7893 * 2) Initialize the error stats (sd_set_errstats) and partition 7894 * stats (sd_set_pstats)here, following 7895 * cmlb_validate_geometry(), sd_register_devid(), and 7896 * sd_cache_control(). 7897 */ 7898 7899 if (un->un_f_pkstats_enabled && geom_label_valid) { 7900 sd_set_pstats(un); 7901 SD_TRACE(SD_LOG_IO_PARTITION, un, 7902 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7903 } 7904 7905 sd_set_errstats(un); 7906 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7907 "sd_unit_attach: un:0x%p errstats set\n", un); 7908 7909 7910 /* 7911 * After successfully attaching an instance, we record the information 7912 * of how many luns have been attached on the relative target and 7913 * controller for parallel SCSI. This information is used when sd tries 7914 * to set the tagged queuing capability in HBA. 7915 */ 7916 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7917 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7918 } 7919 7920 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7921 "sd_unit_attach: un:0x%p exit success\n", un); 7922 7923 /* Uninitialize sd_ssc_t pointer */ 7924 sd_ssc_fini(ssc); 7925 7926 return (DDI_SUCCESS); 7927 7928 /* 7929 * An error occurred during the attach; clean up & return failure. 7930 */ 7931 7932 devid_failed: 7933 7934 setup_pm_failed: 7935 ddi_remove_minor_node(devi, NULL); 7936 7937 cmlb_attach_failed: 7938 /* 7939 * Cleanup from the scsi_ifsetcap() calls (437868) 7940 */ 7941 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7942 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7943 7944 /* 7945 * Refer to the comments of setting tagged-qing in the beginning of 7946 * sd_unit_attach. We can only disable tagged queuing when there is 7947 * no lun attached on the target. 7948 */ 7949 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7950 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7951 } 7952 7953 if (un->un_f_is_fibre == FALSE) { 7954 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7955 } 7956 7957 spinup_failed: 7958 7959 /* Uninitialize sd_ssc_t pointer */ 7960 sd_ssc_fini(ssc); 7961 7962 mutex_enter(SD_MUTEX(un)); 7963 7964 /* Deallocate SCSI FMA memory spaces */ 7965 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 7966 7967 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7968 if (un->un_direct_priority_timeid != NULL) { 7969 timeout_id_t temp_id = un->un_direct_priority_timeid; 7970 un->un_direct_priority_timeid = NULL; 7971 mutex_exit(SD_MUTEX(un)); 7972 (void) untimeout(temp_id); 7973 mutex_enter(SD_MUTEX(un)); 7974 } 7975 7976 /* Cancel any pending start/stop timeouts */ 7977 if (un->un_startstop_timeid != NULL) { 7978 timeout_id_t temp_id = un->un_startstop_timeid; 7979 un->un_startstop_timeid = NULL; 7980 mutex_exit(SD_MUTEX(un)); 7981 (void) untimeout(temp_id); 7982 mutex_enter(SD_MUTEX(un)); 7983 } 7984 7985 /* Cancel any pending reset-throttle timeouts */ 7986 if (un->un_reset_throttle_timeid != NULL) { 7987 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7988 un->un_reset_throttle_timeid = NULL; 7989 mutex_exit(SD_MUTEX(un)); 7990 (void) untimeout(temp_id); 7991 mutex_enter(SD_MUTEX(un)); 7992 } 7993 7994 /* Cancel any pending retry timeouts */ 7995 if (un->un_retry_timeid != NULL) { 7996 timeout_id_t temp_id = un->un_retry_timeid; 7997 un->un_retry_timeid = NULL; 7998 mutex_exit(SD_MUTEX(un)); 7999 (void) untimeout(temp_id); 8000 mutex_enter(SD_MUTEX(un)); 8001 } 8002 8003 /* Cancel any pending delayed cv broadcast timeouts */ 8004 if (un->un_dcvb_timeid != NULL) { 8005 timeout_id_t temp_id = un->un_dcvb_timeid; 8006 un->un_dcvb_timeid = NULL; 8007 mutex_exit(SD_MUTEX(un)); 8008 (void) untimeout(temp_id); 8009 mutex_enter(SD_MUTEX(un)); 8010 } 8011 8012 mutex_exit(SD_MUTEX(un)); 8013 8014 /* There should not be any in-progress I/O so ASSERT this check */ 8015 ASSERT(un->un_ncmds_in_transport == 0); 8016 ASSERT(un->un_ncmds_in_driver == 0); 8017 8018 /* Do not free the softstate if the callback routine is active */ 8019 sd_sync_with_callback(un); 8020 8021 /* 8022 * Partition stats apparently are not used with removables. These would 8023 * not have been created during attach, so no need to clean them up... 8024 */ 8025 if (un->un_errstats != NULL) { 8026 kstat_delete(un->un_errstats); 8027 un->un_errstats = NULL; 8028 } 8029 8030 create_errstats_failed: 8031 8032 if (un->un_stats != NULL) { 8033 kstat_delete(un->un_stats); 8034 un->un_stats = NULL; 8035 } 8036 8037 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8038 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8039 8040 ddi_prop_remove_all(devi); 8041 sema_destroy(&un->un_semoclose); 8042 cv_destroy(&un->un_state_cv); 8043 8044 getrbuf_failed: 8045 8046 sd_free_rqs(un); 8047 8048 alloc_rqs_failed: 8049 8050 devp->sd_private = NULL; 8051 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8052 8053 get_softstate_failed: 8054 /* 8055 * Note: the man pages are unclear as to whether or not doing a 8056 * ddi_soft_state_free(sd_state, instance) is the right way to 8057 * clean up after the ddi_soft_state_zalloc() if the subsequent 8058 * ddi_get_soft_state() fails. The implication seems to be 8059 * that the get_soft_state cannot fail if the zalloc succeeds. 8060 */ 8061 ddi_soft_state_free(sd_state, instance); 8062 8063 probe_failed: 8064 scsi_unprobe(devp); 8065 8066 return (DDI_FAILURE); 8067 } 8068 8069 8070 /* 8071 * Function: sd_unit_detach 8072 * 8073 * Description: Performs DDI_DETACH processing for sddetach(). 8074 * 8075 * Return Code: DDI_SUCCESS 8076 * DDI_FAILURE 8077 * 8078 * Context: Kernel thread context 8079 */ 8080 8081 static int 8082 sd_unit_detach(dev_info_t *devi) 8083 { 8084 struct scsi_device *devp; 8085 struct sd_lun *un; 8086 int i; 8087 int tgt; 8088 dev_t dev; 8089 dev_info_t *pdip = ddi_get_parent(devi); 8090 int instance = ddi_get_instance(devi); 8091 8092 mutex_enter(&sd_detach_mutex); 8093 8094 /* 8095 * Fail the detach for any of the following: 8096 * - Unable to get the sd_lun struct for the instance 8097 * - A layered driver has an outstanding open on the instance 8098 * - Another thread is already detaching this instance 8099 * - Another thread is currently performing an open 8100 */ 8101 devp = ddi_get_driver_private(devi); 8102 if ((devp == NULL) || 8103 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8104 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8105 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8106 mutex_exit(&sd_detach_mutex); 8107 return (DDI_FAILURE); 8108 } 8109 8110 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8111 8112 /* 8113 * Mark this instance as currently in a detach, to inhibit any 8114 * opens from a layered driver. 8115 */ 8116 un->un_detach_count++; 8117 mutex_exit(&sd_detach_mutex); 8118 8119 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8120 SCSI_ADDR_PROP_TARGET, -1); 8121 8122 dev = sd_make_device(SD_DEVINFO(un)); 8123 8124 #ifndef lint 8125 _NOTE(COMPETING_THREADS_NOW); 8126 #endif 8127 8128 mutex_enter(SD_MUTEX(un)); 8129 8130 /* 8131 * Fail the detach if there are any outstanding layered 8132 * opens on this device. 8133 */ 8134 for (i = 0; i < NDKMAP; i++) { 8135 if (un->un_ocmap.lyropen[i] != 0) { 8136 goto err_notclosed; 8137 } 8138 } 8139 8140 /* 8141 * Verify there are NO outstanding commands issued to this device. 8142 * ie, un_ncmds_in_transport == 0. 8143 * It's possible to have outstanding commands through the physio 8144 * code path, even though everything's closed. 8145 */ 8146 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8147 (un->un_direct_priority_timeid != NULL) || 8148 (un->un_state == SD_STATE_RWAIT)) { 8149 mutex_exit(SD_MUTEX(un)); 8150 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8151 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8152 goto err_stillbusy; 8153 } 8154 8155 /* 8156 * If we have the device reserved, release the reservation. 8157 */ 8158 if ((un->un_resvd_status & SD_RESERVE) && 8159 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8160 mutex_exit(SD_MUTEX(un)); 8161 /* 8162 * Note: sd_reserve_release sends a command to the device 8163 * via the sd_ioctlcmd() path, and can sleep. 8164 */ 8165 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8166 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8167 "sd_dr_detach: Cannot release reservation \n"); 8168 } 8169 } else { 8170 mutex_exit(SD_MUTEX(un)); 8171 } 8172 8173 /* 8174 * Untimeout any reserve recover, throttle reset, restart unit 8175 * and delayed broadcast timeout threads. Protect the timeout pointer 8176 * from getting nulled by their callback functions. 8177 */ 8178 mutex_enter(SD_MUTEX(un)); 8179 if (un->un_resvd_timeid != NULL) { 8180 timeout_id_t temp_id = un->un_resvd_timeid; 8181 un->un_resvd_timeid = NULL; 8182 mutex_exit(SD_MUTEX(un)); 8183 (void) untimeout(temp_id); 8184 mutex_enter(SD_MUTEX(un)); 8185 } 8186 8187 if (un->un_reset_throttle_timeid != NULL) { 8188 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8189 un->un_reset_throttle_timeid = NULL; 8190 mutex_exit(SD_MUTEX(un)); 8191 (void) untimeout(temp_id); 8192 mutex_enter(SD_MUTEX(un)); 8193 } 8194 8195 if (un->un_startstop_timeid != NULL) { 8196 timeout_id_t temp_id = un->un_startstop_timeid; 8197 un->un_startstop_timeid = NULL; 8198 mutex_exit(SD_MUTEX(un)); 8199 (void) untimeout(temp_id); 8200 mutex_enter(SD_MUTEX(un)); 8201 } 8202 8203 if (un->un_dcvb_timeid != NULL) { 8204 timeout_id_t temp_id = un->un_dcvb_timeid; 8205 un->un_dcvb_timeid = NULL; 8206 mutex_exit(SD_MUTEX(un)); 8207 (void) untimeout(temp_id); 8208 } else { 8209 mutex_exit(SD_MUTEX(un)); 8210 } 8211 8212 /* Remove any pending reservation reclaim requests for this device */ 8213 sd_rmv_resv_reclaim_req(dev); 8214 8215 mutex_enter(SD_MUTEX(un)); 8216 8217 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8218 if (un->un_direct_priority_timeid != NULL) { 8219 timeout_id_t temp_id = un->un_direct_priority_timeid; 8220 un->un_direct_priority_timeid = NULL; 8221 mutex_exit(SD_MUTEX(un)); 8222 (void) untimeout(temp_id); 8223 mutex_enter(SD_MUTEX(un)); 8224 } 8225 8226 /* Cancel any active multi-host disk watch thread requests */ 8227 if (un->un_mhd_token != NULL) { 8228 mutex_exit(SD_MUTEX(un)); 8229 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8230 if (scsi_watch_request_terminate(un->un_mhd_token, 8231 SCSI_WATCH_TERMINATE_NOWAIT)) { 8232 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8233 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8234 /* 8235 * Note: We are returning here after having removed 8236 * some driver timeouts above. This is consistent with 8237 * the legacy implementation but perhaps the watch 8238 * terminate call should be made with the wait flag set. 8239 */ 8240 goto err_stillbusy; 8241 } 8242 mutex_enter(SD_MUTEX(un)); 8243 un->un_mhd_token = NULL; 8244 } 8245 8246 if (un->un_swr_token != NULL) { 8247 mutex_exit(SD_MUTEX(un)); 8248 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8249 if (scsi_watch_request_terminate(un->un_swr_token, 8250 SCSI_WATCH_TERMINATE_NOWAIT)) { 8251 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8252 "sd_dr_detach: Cannot cancel swr watch request\n"); 8253 /* 8254 * Note: We are returning here after having removed 8255 * some driver timeouts above. This is consistent with 8256 * the legacy implementation but perhaps the watch 8257 * terminate call should be made with the wait flag set. 8258 */ 8259 goto err_stillbusy; 8260 } 8261 mutex_enter(SD_MUTEX(un)); 8262 un->un_swr_token = NULL; 8263 } 8264 8265 mutex_exit(SD_MUTEX(un)); 8266 8267 /* 8268 * Clear any scsi_reset_notifies. We clear the reset notifies 8269 * if we have not registered one. 8270 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8271 */ 8272 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8273 sd_mhd_reset_notify_cb, (caddr_t)un); 8274 8275 /* 8276 * protect the timeout pointers from getting nulled by 8277 * their callback functions during the cancellation process. 8278 * In such a scenario untimeout can be invoked with a null value. 8279 */ 8280 _NOTE(NO_COMPETING_THREADS_NOW); 8281 8282 mutex_enter(&un->un_pm_mutex); 8283 if (un->un_pm_idle_timeid != NULL) { 8284 timeout_id_t temp_id = un->un_pm_idle_timeid; 8285 un->un_pm_idle_timeid = NULL; 8286 mutex_exit(&un->un_pm_mutex); 8287 8288 /* 8289 * Timeout is active; cancel it. 8290 * Note that it'll never be active on a device 8291 * that does not support PM therefore we don't 8292 * have to check before calling pm_idle_component. 8293 */ 8294 (void) untimeout(temp_id); 8295 (void) pm_idle_component(SD_DEVINFO(un), 0); 8296 mutex_enter(&un->un_pm_mutex); 8297 } 8298 8299 /* 8300 * Check whether there is already a timeout scheduled for power 8301 * management. If yes then don't lower the power here, that's. 8302 * the timeout handler's job. 8303 */ 8304 if (un->un_pm_timeid != NULL) { 8305 timeout_id_t temp_id = un->un_pm_timeid; 8306 un->un_pm_timeid = NULL; 8307 mutex_exit(&un->un_pm_mutex); 8308 /* 8309 * Timeout is active; cancel it. 8310 * Note that it'll never be active on a device 8311 * that does not support PM therefore we don't 8312 * have to check before calling pm_idle_component. 8313 */ 8314 (void) untimeout(temp_id); 8315 (void) pm_idle_component(SD_DEVINFO(un), 0); 8316 8317 } else { 8318 mutex_exit(&un->un_pm_mutex); 8319 if ((un->un_f_pm_is_enabled == TRUE) && 8320 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8321 DDI_SUCCESS)) { 8322 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8323 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8324 /* 8325 * Fix for bug: 4297749, item # 13 8326 * The above test now includes a check to see if PM is 8327 * supported by this device before call 8328 * pm_lower_power(). 8329 * Note, the following is not dead code. The call to 8330 * pm_lower_power above will generate a call back into 8331 * our sdpower routine which might result in a timeout 8332 * handler getting activated. Therefore the following 8333 * code is valid and necessary. 8334 */ 8335 mutex_enter(&un->un_pm_mutex); 8336 if (un->un_pm_timeid != NULL) { 8337 timeout_id_t temp_id = un->un_pm_timeid; 8338 un->un_pm_timeid = NULL; 8339 mutex_exit(&un->un_pm_mutex); 8340 (void) untimeout(temp_id); 8341 (void) pm_idle_component(SD_DEVINFO(un), 0); 8342 } else { 8343 mutex_exit(&un->un_pm_mutex); 8344 } 8345 } 8346 } 8347 8348 /* 8349 * Cleanup from the scsi_ifsetcap() calls (437868) 8350 * Relocated here from above to be after the call to 8351 * pm_lower_power, which was getting errors. 8352 */ 8353 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8354 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8355 8356 /* 8357 * Currently, tagged queuing is supported per target based by HBA. 8358 * Setting this per lun instance actually sets the capability of this 8359 * target in HBA, which affects those luns already attached on the 8360 * same target. So during detach, we can only disable this capability 8361 * only when this is the only lun left on this target. By doing 8362 * this, we assume a target has the same tagged queuing capability 8363 * for every lun. The condition can be removed when HBA is changed to 8364 * support per lun based tagged queuing capability. 8365 */ 8366 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8367 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8368 } 8369 8370 if (un->un_f_is_fibre == FALSE) { 8371 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8372 } 8373 8374 /* 8375 * Remove any event callbacks, fibre only 8376 */ 8377 if (un->un_f_is_fibre == TRUE) { 8378 if ((un->un_insert_event != NULL) && 8379 (ddi_remove_event_handler(un->un_insert_cb_id) != 8380 DDI_SUCCESS)) { 8381 /* 8382 * Note: We are returning here after having done 8383 * substantial cleanup above. This is consistent 8384 * with the legacy implementation but this may not 8385 * be the right thing to do. 8386 */ 8387 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8388 "sd_dr_detach: Cannot cancel insert event\n"); 8389 goto err_remove_event; 8390 } 8391 un->un_insert_event = NULL; 8392 8393 if ((un->un_remove_event != NULL) && 8394 (ddi_remove_event_handler(un->un_remove_cb_id) != 8395 DDI_SUCCESS)) { 8396 /* 8397 * Note: We are returning here after having done 8398 * substantial cleanup above. This is consistent 8399 * with the legacy implementation but this may not 8400 * be the right thing to do. 8401 */ 8402 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8403 "sd_dr_detach: Cannot cancel remove event\n"); 8404 goto err_remove_event; 8405 } 8406 un->un_remove_event = NULL; 8407 } 8408 8409 /* Do not free the softstate if the callback routine is active */ 8410 sd_sync_with_callback(un); 8411 8412 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8413 cmlb_free_handle(&un->un_cmlbhandle); 8414 8415 /* 8416 * Hold the detach mutex here, to make sure that no other threads ever 8417 * can access a (partially) freed soft state structure. 8418 */ 8419 mutex_enter(&sd_detach_mutex); 8420 8421 /* 8422 * Clean up the soft state struct. 8423 * Cleanup is done in reverse order of allocs/inits. 8424 * At this point there should be no competing threads anymore. 8425 */ 8426 8427 scsi_fm_fini(devp); 8428 8429 /* 8430 * Deallocate memory for SCSI FMA. 8431 */ 8432 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8433 8434 /* Unregister and free device id. */ 8435 ddi_devid_unregister(devi); 8436 if (un->un_devid) { 8437 ddi_devid_free(un->un_devid); 8438 un->un_devid = NULL; 8439 } 8440 8441 /* 8442 * Destroy wmap cache if it exists. 8443 */ 8444 if (un->un_wm_cache != NULL) { 8445 kmem_cache_destroy(un->un_wm_cache); 8446 un->un_wm_cache = NULL; 8447 } 8448 8449 /* 8450 * kstat cleanup is done in detach for all device types (4363169). 8451 * We do not want to fail detach if the device kstats are not deleted 8452 * since there is a confusion about the devo_refcnt for the device. 8453 * We just delete the kstats and let detach complete successfully. 8454 */ 8455 if (un->un_stats != NULL) { 8456 kstat_delete(un->un_stats); 8457 un->un_stats = NULL; 8458 } 8459 if (un->un_errstats != NULL) { 8460 kstat_delete(un->un_errstats); 8461 un->un_errstats = NULL; 8462 } 8463 8464 /* Remove partition stats */ 8465 if (un->un_f_pkstats_enabled) { 8466 for (i = 0; i < NSDMAP; i++) { 8467 if (un->un_pstats[i] != NULL) { 8468 kstat_delete(un->un_pstats[i]); 8469 un->un_pstats[i] = NULL; 8470 } 8471 } 8472 } 8473 8474 /* Remove xbuf registration */ 8475 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8476 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8477 8478 /* Remove driver properties */ 8479 ddi_prop_remove_all(devi); 8480 8481 mutex_destroy(&un->un_pm_mutex); 8482 cv_destroy(&un->un_pm_busy_cv); 8483 8484 cv_destroy(&un->un_wcc_cv); 8485 8486 /* Open/close semaphore */ 8487 sema_destroy(&un->un_semoclose); 8488 8489 /* Removable media condvar. */ 8490 cv_destroy(&un->un_state_cv); 8491 8492 /* Suspend/resume condvar. */ 8493 cv_destroy(&un->un_suspend_cv); 8494 cv_destroy(&un->un_disk_busy_cv); 8495 8496 sd_free_rqs(un); 8497 8498 /* Free up soft state */ 8499 devp->sd_private = NULL; 8500 8501 bzero(un, sizeof (struct sd_lun)); 8502 ddi_soft_state_free(sd_state, instance); 8503 8504 mutex_exit(&sd_detach_mutex); 8505 8506 /* This frees up the INQUIRY data associated with the device. */ 8507 scsi_unprobe(devp); 8508 8509 /* 8510 * After successfully detaching an instance, we update the information 8511 * of how many luns have been attached in the relative target and 8512 * controller for parallel SCSI. This information is used when sd tries 8513 * to set the tagged queuing capability in HBA. 8514 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8515 * check if the device is parallel SCSI. However, we don't need to 8516 * check here because we've already checked during attach. No device 8517 * that is not parallel SCSI is in the chain. 8518 */ 8519 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8520 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8521 } 8522 8523 return (DDI_SUCCESS); 8524 8525 err_notclosed: 8526 mutex_exit(SD_MUTEX(un)); 8527 8528 err_stillbusy: 8529 _NOTE(NO_COMPETING_THREADS_NOW); 8530 8531 err_remove_event: 8532 mutex_enter(&sd_detach_mutex); 8533 un->un_detach_count--; 8534 mutex_exit(&sd_detach_mutex); 8535 8536 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8537 return (DDI_FAILURE); 8538 } 8539 8540 8541 /* 8542 * Function: sd_create_errstats 8543 * 8544 * Description: This routine instantiates the device error stats. 8545 * 8546 * Note: During attach the stats are instantiated first so they are 8547 * available for attach-time routines that utilize the driver 8548 * iopath to send commands to the device. The stats are initialized 8549 * separately so data obtained during some attach-time routines is 8550 * available. (4362483) 8551 * 8552 * Arguments: un - driver soft state (unit) structure 8553 * instance - driver instance 8554 * 8555 * Context: Kernel thread context 8556 */ 8557 8558 static void 8559 sd_create_errstats(struct sd_lun *un, int instance) 8560 { 8561 struct sd_errstats *stp; 8562 char kstatmodule_err[KSTAT_STRLEN]; 8563 char kstatname[KSTAT_STRLEN]; 8564 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8565 8566 ASSERT(un != NULL); 8567 8568 if (un->un_errstats != NULL) { 8569 return; 8570 } 8571 8572 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8573 "%serr", sd_label); 8574 (void) snprintf(kstatname, sizeof (kstatname), 8575 "%s%d,err", sd_label, instance); 8576 8577 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8578 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8579 8580 if (un->un_errstats == NULL) { 8581 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8582 "sd_create_errstats: Failed kstat_create\n"); 8583 return; 8584 } 8585 8586 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8587 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8588 KSTAT_DATA_UINT32); 8589 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8590 KSTAT_DATA_UINT32); 8591 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8592 KSTAT_DATA_UINT32); 8593 kstat_named_init(&stp->sd_vid, "Vendor", 8594 KSTAT_DATA_CHAR); 8595 kstat_named_init(&stp->sd_pid, "Product", 8596 KSTAT_DATA_CHAR); 8597 kstat_named_init(&stp->sd_revision, "Revision", 8598 KSTAT_DATA_CHAR); 8599 kstat_named_init(&stp->sd_serial, "Serial No", 8600 KSTAT_DATA_CHAR); 8601 kstat_named_init(&stp->sd_capacity, "Size", 8602 KSTAT_DATA_ULONGLONG); 8603 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8604 KSTAT_DATA_UINT32); 8605 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8606 KSTAT_DATA_UINT32); 8607 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8608 KSTAT_DATA_UINT32); 8609 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8610 KSTAT_DATA_UINT32); 8611 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8612 KSTAT_DATA_UINT32); 8613 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8614 KSTAT_DATA_UINT32); 8615 8616 un->un_errstats->ks_private = un; 8617 un->un_errstats->ks_update = nulldev; 8618 8619 kstat_install(un->un_errstats); 8620 } 8621 8622 8623 /* 8624 * Function: sd_set_errstats 8625 * 8626 * Description: This routine sets the value of the vendor id, product id, 8627 * revision, serial number, and capacity device error stats. 8628 * 8629 * Note: During attach the stats are instantiated first so they are 8630 * available for attach-time routines that utilize the driver 8631 * iopath to send commands to the device. The stats are initialized 8632 * separately so data obtained during some attach-time routines is 8633 * available. (4362483) 8634 * 8635 * Arguments: un - driver soft state (unit) structure 8636 * 8637 * Context: Kernel thread context 8638 */ 8639 8640 static void 8641 sd_set_errstats(struct sd_lun *un) 8642 { 8643 struct sd_errstats *stp; 8644 8645 ASSERT(un != NULL); 8646 ASSERT(un->un_errstats != NULL); 8647 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8648 ASSERT(stp != NULL); 8649 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8650 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8651 (void) strncpy(stp->sd_revision.value.c, 8652 un->un_sd->sd_inq->inq_revision, 4); 8653 8654 /* 8655 * All the errstats are persistent across detach/attach, 8656 * so reset all the errstats here in case of the hot 8657 * replacement of disk drives, except for not changed 8658 * Sun qualified drives. 8659 */ 8660 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8661 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8662 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8663 stp->sd_softerrs.value.ui32 = 0; 8664 stp->sd_harderrs.value.ui32 = 0; 8665 stp->sd_transerrs.value.ui32 = 0; 8666 stp->sd_rq_media_err.value.ui32 = 0; 8667 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8668 stp->sd_rq_nodev_err.value.ui32 = 0; 8669 stp->sd_rq_recov_err.value.ui32 = 0; 8670 stp->sd_rq_illrq_err.value.ui32 = 0; 8671 stp->sd_rq_pfa_err.value.ui32 = 0; 8672 } 8673 8674 /* 8675 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8676 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8677 * (4376302)) 8678 */ 8679 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8680 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8681 sizeof (SD_INQUIRY(un)->inq_serial)); 8682 } 8683 8684 if (un->un_f_blockcount_is_valid != TRUE) { 8685 /* 8686 * Set capacity error stat to 0 for no media. This ensures 8687 * a valid capacity is displayed in response to 'iostat -E' 8688 * when no media is present in the device. 8689 */ 8690 stp->sd_capacity.value.ui64 = 0; 8691 } else { 8692 /* 8693 * Multiply un_blockcount by un->un_sys_blocksize to get 8694 * capacity. 8695 * 8696 * Note: for non-512 blocksize devices "un_blockcount" has been 8697 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8698 * (un_tgt_blocksize / un->un_sys_blocksize). 8699 */ 8700 stp->sd_capacity.value.ui64 = (uint64_t) 8701 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8702 } 8703 } 8704 8705 8706 /* 8707 * Function: sd_set_pstats 8708 * 8709 * Description: This routine instantiates and initializes the partition 8710 * stats for each partition with more than zero blocks. 8711 * (4363169) 8712 * 8713 * Arguments: un - driver soft state (unit) structure 8714 * 8715 * Context: Kernel thread context 8716 */ 8717 8718 static void 8719 sd_set_pstats(struct sd_lun *un) 8720 { 8721 char kstatname[KSTAT_STRLEN]; 8722 int instance; 8723 int i; 8724 diskaddr_t nblks = 0; 8725 char *partname = NULL; 8726 8727 ASSERT(un != NULL); 8728 8729 instance = ddi_get_instance(SD_DEVINFO(un)); 8730 8731 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8732 for (i = 0; i < NSDMAP; i++) { 8733 8734 if (cmlb_partinfo(un->un_cmlbhandle, i, 8735 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8736 continue; 8737 mutex_enter(SD_MUTEX(un)); 8738 8739 if ((un->un_pstats[i] == NULL) && 8740 (nblks != 0)) { 8741 8742 (void) snprintf(kstatname, sizeof (kstatname), 8743 "%s%d,%s", sd_label, instance, 8744 partname); 8745 8746 un->un_pstats[i] = kstat_create(sd_label, 8747 instance, kstatname, "partition", KSTAT_TYPE_IO, 8748 1, KSTAT_FLAG_PERSISTENT); 8749 if (un->un_pstats[i] != NULL) { 8750 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8751 kstat_install(un->un_pstats[i]); 8752 } 8753 } 8754 mutex_exit(SD_MUTEX(un)); 8755 } 8756 } 8757 8758 8759 #if (defined(__fibre)) 8760 /* 8761 * Function: sd_init_event_callbacks 8762 * 8763 * Description: This routine initializes the insertion and removal event 8764 * callbacks. (fibre only) 8765 * 8766 * Arguments: un - driver soft state (unit) structure 8767 * 8768 * Context: Kernel thread context 8769 */ 8770 8771 static void 8772 sd_init_event_callbacks(struct sd_lun *un) 8773 { 8774 ASSERT(un != NULL); 8775 8776 if ((un->un_insert_event == NULL) && 8777 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8778 &un->un_insert_event) == DDI_SUCCESS)) { 8779 /* 8780 * Add the callback for an insertion event 8781 */ 8782 (void) ddi_add_event_handler(SD_DEVINFO(un), 8783 un->un_insert_event, sd_event_callback, (void *)un, 8784 &(un->un_insert_cb_id)); 8785 } 8786 8787 if ((un->un_remove_event == NULL) && 8788 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8789 &un->un_remove_event) == DDI_SUCCESS)) { 8790 /* 8791 * Add the callback for a removal event 8792 */ 8793 (void) ddi_add_event_handler(SD_DEVINFO(un), 8794 un->un_remove_event, sd_event_callback, (void *)un, 8795 &(un->un_remove_cb_id)); 8796 } 8797 } 8798 8799 8800 /* 8801 * Function: sd_event_callback 8802 * 8803 * Description: This routine handles insert/remove events (photon). The 8804 * state is changed to OFFLINE which can be used to supress 8805 * error msgs. (fibre only) 8806 * 8807 * Arguments: un - driver soft state (unit) structure 8808 * 8809 * Context: Callout thread context 8810 */ 8811 /* ARGSUSED */ 8812 static void 8813 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8814 void *bus_impldata) 8815 { 8816 struct sd_lun *un = (struct sd_lun *)arg; 8817 8818 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8819 if (event == un->un_insert_event) { 8820 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8821 mutex_enter(SD_MUTEX(un)); 8822 if (un->un_state == SD_STATE_OFFLINE) { 8823 if (un->un_last_state != SD_STATE_SUSPENDED) { 8824 un->un_state = un->un_last_state; 8825 } else { 8826 /* 8827 * We have gone through SUSPEND/RESUME while 8828 * we were offline. Restore the last state 8829 */ 8830 un->un_state = un->un_save_state; 8831 } 8832 } 8833 mutex_exit(SD_MUTEX(un)); 8834 8835 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8836 } else if (event == un->un_remove_event) { 8837 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8838 mutex_enter(SD_MUTEX(un)); 8839 /* 8840 * We need to handle an event callback that occurs during 8841 * the suspend operation, since we don't prevent it. 8842 */ 8843 if (un->un_state != SD_STATE_OFFLINE) { 8844 if (un->un_state != SD_STATE_SUSPENDED) { 8845 New_state(un, SD_STATE_OFFLINE); 8846 } else { 8847 un->un_last_state = SD_STATE_OFFLINE; 8848 } 8849 } 8850 mutex_exit(SD_MUTEX(un)); 8851 } else { 8852 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8853 "!Unknown event\n"); 8854 } 8855 8856 } 8857 #endif 8858 8859 /* 8860 * Function: sd_cache_control() 8861 * 8862 * Description: This routine is the driver entry point for setting 8863 * read and write caching by modifying the WCE (write cache 8864 * enable) and RCD (read cache disable) bits of mode 8865 * page 8 (MODEPAGE_CACHING). 8866 * 8867 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8868 * structure for this target. 8869 * rcd_flag - flag for controlling the read cache 8870 * wce_flag - flag for controlling the write cache 8871 * 8872 * Return Code: EIO 8873 * code returned by sd_send_scsi_MODE_SENSE and 8874 * sd_send_scsi_MODE_SELECT 8875 * 8876 * Context: Kernel Thread 8877 */ 8878 8879 static int 8880 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8881 { 8882 struct mode_caching *mode_caching_page; 8883 uchar_t *header; 8884 size_t buflen; 8885 int hdrlen; 8886 int bd_len; 8887 int rval = 0; 8888 struct mode_header_grp2 *mhp; 8889 struct sd_lun *un; 8890 int status; 8891 8892 ASSERT(ssc != NULL); 8893 un = ssc->ssc_un; 8894 ASSERT(un != NULL); 8895 8896 /* 8897 * Do a test unit ready, otherwise a mode sense may not work if this 8898 * is the first command sent to the device after boot. 8899 */ 8900 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8901 if (status != 0) 8902 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8903 8904 if (un->un_f_cfg_is_atapi == TRUE) { 8905 hdrlen = MODE_HEADER_LENGTH_GRP2; 8906 } else { 8907 hdrlen = MODE_HEADER_LENGTH; 8908 } 8909 8910 /* 8911 * Allocate memory for the retrieved mode page and its headers. Set 8912 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8913 * we get all of the mode sense data otherwise, the mode select 8914 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8915 */ 8916 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8917 sizeof (struct mode_cache_scsi3); 8918 8919 header = kmem_zalloc(buflen, KM_SLEEP); 8920 8921 /* Get the information from the device. */ 8922 if (un->un_f_cfg_is_atapi == TRUE) { 8923 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 8924 MODEPAGE_CACHING, SD_PATH_DIRECT); 8925 } else { 8926 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 8927 MODEPAGE_CACHING, SD_PATH_DIRECT); 8928 } 8929 8930 if (rval != 0) { 8931 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8932 "sd_cache_control: Mode Sense Failed\n"); 8933 goto mode_sense_failed; 8934 } 8935 8936 /* 8937 * Determine size of Block Descriptors in order to locate 8938 * the mode page data. ATAPI devices return 0, SCSI devices 8939 * should return MODE_BLK_DESC_LENGTH. 8940 */ 8941 if (un->un_f_cfg_is_atapi == TRUE) { 8942 mhp = (struct mode_header_grp2 *)header; 8943 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8944 } else { 8945 bd_len = ((struct mode_header *)header)->bdesc_length; 8946 } 8947 8948 if (bd_len > MODE_BLK_DESC_LENGTH) { 8949 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8950 "sd_cache_control: Mode Sense returned invalid " 8951 "block descriptor length\n"); 8952 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8953 "sd_cache_control: Mode Sense returned invalid " 8954 "block descriptor length"); 8955 rval = EIO; 8956 goto mode_sense_failed; 8957 } 8958 8959 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8960 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8961 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8962 " caching page code mismatch %d\n", 8963 mode_caching_page->mode_page.code); 8964 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8965 "sd_cache_control: Mode Sense caching page code " 8966 "mismatch %d", mode_caching_page->mode_page.code); 8967 rval = EIO; 8968 goto mode_sense_failed; 8969 } 8970 8971 /* Check the relevant bits on successful mode sense. */ 8972 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8973 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8974 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8975 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8976 8977 size_t sbuflen; 8978 uchar_t save_pg; 8979 8980 /* 8981 * Construct select buffer length based on the 8982 * length of the sense data returned. 8983 */ 8984 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8985 sizeof (struct mode_page) + 8986 (int)mode_caching_page->mode_page.length; 8987 8988 /* 8989 * Set the caching bits as requested. 8990 */ 8991 if (rcd_flag == SD_CACHE_ENABLE) 8992 mode_caching_page->rcd = 0; 8993 else if (rcd_flag == SD_CACHE_DISABLE) 8994 mode_caching_page->rcd = 1; 8995 8996 if (wce_flag == SD_CACHE_ENABLE) 8997 mode_caching_page->wce = 1; 8998 else if (wce_flag == SD_CACHE_DISABLE) 8999 mode_caching_page->wce = 0; 9000 9001 /* 9002 * Save the page if the mode sense says the 9003 * drive supports it. 9004 */ 9005 save_pg = mode_caching_page->mode_page.ps ? 9006 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9007 9008 /* Clear reserved bits before mode select. */ 9009 mode_caching_page->mode_page.ps = 0; 9010 9011 /* 9012 * Clear out mode header for mode select. 9013 * The rest of the retrieved page will be reused. 9014 */ 9015 bzero(header, hdrlen); 9016 9017 if (un->un_f_cfg_is_atapi == TRUE) { 9018 mhp = (struct mode_header_grp2 *)header; 9019 mhp->bdesc_length_hi = bd_len >> 8; 9020 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9021 } else { 9022 ((struct mode_header *)header)->bdesc_length = bd_len; 9023 } 9024 9025 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9026 9027 /* Issue mode select to change the cache settings */ 9028 if (un->un_f_cfg_is_atapi == TRUE) { 9029 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9030 sbuflen, save_pg, SD_PATH_DIRECT); 9031 } else { 9032 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9033 sbuflen, save_pg, SD_PATH_DIRECT); 9034 } 9035 9036 } 9037 9038 9039 mode_sense_failed: 9040 9041 kmem_free(header, buflen); 9042 9043 if (rval != 0) { 9044 if (rval == EIO) 9045 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9046 else 9047 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9048 } 9049 return (rval); 9050 } 9051 9052 9053 /* 9054 * Function: sd_get_write_cache_enabled() 9055 * 9056 * Description: This routine is the driver entry point for determining if 9057 * write caching is enabled. It examines the WCE (write cache 9058 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9059 * 9060 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9061 * structure for this target. 9062 * is_enabled - pointer to int where write cache enabled state 9063 * is returned (non-zero -> write cache enabled) 9064 * 9065 * 9066 * Return Code: EIO 9067 * code returned by sd_send_scsi_MODE_SENSE 9068 * 9069 * Context: Kernel Thread 9070 * 9071 * NOTE: If ioctl is added to disable write cache, this sequence should 9072 * be followed so that no locking is required for accesses to 9073 * un->un_f_write_cache_enabled: 9074 * do mode select to clear wce 9075 * do synchronize cache to flush cache 9076 * set un->un_f_write_cache_enabled = FALSE 9077 * 9078 * Conversely, an ioctl to enable the write cache should be done 9079 * in this order: 9080 * set un->un_f_write_cache_enabled = TRUE 9081 * do mode select to set wce 9082 */ 9083 9084 static int 9085 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9086 { 9087 struct mode_caching *mode_caching_page; 9088 uchar_t *header; 9089 size_t buflen; 9090 int hdrlen; 9091 int bd_len; 9092 int rval = 0; 9093 struct sd_lun *un; 9094 int status; 9095 9096 ASSERT(ssc != NULL); 9097 un = ssc->ssc_un; 9098 ASSERT(un != NULL); 9099 ASSERT(is_enabled != NULL); 9100 9101 /* in case of error, flag as enabled */ 9102 *is_enabled = TRUE; 9103 9104 /* 9105 * Do a test unit ready, otherwise a mode sense may not work if this 9106 * is the first command sent to the device after boot. 9107 */ 9108 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9109 9110 if (status != 0) 9111 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9112 9113 if (un->un_f_cfg_is_atapi == TRUE) { 9114 hdrlen = MODE_HEADER_LENGTH_GRP2; 9115 } else { 9116 hdrlen = MODE_HEADER_LENGTH; 9117 } 9118 9119 /* 9120 * Allocate memory for the retrieved mode page and its headers. Set 9121 * a pointer to the page itself. 9122 */ 9123 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9124 header = kmem_zalloc(buflen, KM_SLEEP); 9125 9126 /* Get the information from the device. */ 9127 if (un->un_f_cfg_is_atapi == TRUE) { 9128 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9129 MODEPAGE_CACHING, SD_PATH_DIRECT); 9130 } else { 9131 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9132 MODEPAGE_CACHING, SD_PATH_DIRECT); 9133 } 9134 9135 if (rval != 0) { 9136 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9137 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9138 goto mode_sense_failed; 9139 } 9140 9141 /* 9142 * Determine size of Block Descriptors in order to locate 9143 * the mode page data. ATAPI devices return 0, SCSI devices 9144 * should return MODE_BLK_DESC_LENGTH. 9145 */ 9146 if (un->un_f_cfg_is_atapi == TRUE) { 9147 struct mode_header_grp2 *mhp; 9148 mhp = (struct mode_header_grp2 *)header; 9149 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9150 } else { 9151 bd_len = ((struct mode_header *)header)->bdesc_length; 9152 } 9153 9154 if (bd_len > MODE_BLK_DESC_LENGTH) { 9155 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9156 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9157 "block descriptor length\n"); 9158 /* FMA should make upset complain here */ 9159 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9160 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9161 "block descriptor length %d", bd_len); 9162 rval = EIO; 9163 goto mode_sense_failed; 9164 } 9165 9166 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9167 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9168 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 9169 " caching page code mismatch %d\n", 9170 mode_caching_page->mode_page.code); 9171 /* FMA could make upset complain here */ 9172 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9173 "sd_cache_control: Mode Sense caching page code " 9174 "mismatch %d", mode_caching_page->mode_page.code); 9175 rval = EIO; 9176 goto mode_sense_failed; 9177 } 9178 *is_enabled = mode_caching_page->wce; 9179 9180 mode_sense_failed: 9181 if (rval == 0) { 9182 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9183 } else if (rval == EIO) { 9184 /* 9185 * Some disks do not support mode sense(6), we 9186 * should ignore this kind of error(sense key is 9187 * 0x5 - illegal request). 9188 */ 9189 uint8_t *sensep; 9190 int senlen; 9191 9192 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9193 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9194 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9195 9196 if (senlen > 0 && 9197 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9198 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9199 } else { 9200 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9201 } 9202 } else { 9203 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9204 } 9205 kmem_free(header, buflen); 9206 return (rval); 9207 } 9208 9209 /* 9210 * Function: sd_get_nv_sup() 9211 * 9212 * Description: This routine is the driver entry point for 9213 * determining whether non-volatile cache is supported. This 9214 * determination process works as follows: 9215 * 9216 * 1. sd first queries sd.conf on whether 9217 * suppress_cache_flush bit is set for this device. 9218 * 9219 * 2. if not there, then queries the internal disk table. 9220 * 9221 * 3. if either sd.conf or internal disk table specifies 9222 * cache flush be suppressed, we don't bother checking 9223 * NV_SUP bit. 9224 * 9225 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9226 * the optional INQUIRY VPD page 0x86. If the device 9227 * supports VPD page 0x86, sd examines the NV_SUP 9228 * (non-volatile cache support) bit in the INQUIRY VPD page 9229 * 0x86: 9230 * o If NV_SUP bit is set, sd assumes the device has a 9231 * non-volatile cache and set the 9232 * un_f_sync_nv_supported to TRUE. 9233 * o Otherwise cache is not non-volatile, 9234 * un_f_sync_nv_supported is set to FALSE. 9235 * 9236 * Arguments: un - driver soft state (unit) structure 9237 * 9238 * Return Code: 9239 * 9240 * Context: Kernel Thread 9241 */ 9242 9243 static void 9244 sd_get_nv_sup(sd_ssc_t *ssc) 9245 { 9246 int rval = 0; 9247 uchar_t *inq86 = NULL; 9248 size_t inq86_len = MAX_INQUIRY_SIZE; 9249 size_t inq86_resid = 0; 9250 struct dk_callback *dkc; 9251 struct sd_lun *un; 9252 9253 ASSERT(ssc != NULL); 9254 un = ssc->ssc_un; 9255 ASSERT(un != NULL); 9256 9257 mutex_enter(SD_MUTEX(un)); 9258 9259 /* 9260 * Be conservative on the device's support of 9261 * SYNC_NV bit: un_f_sync_nv_supported is 9262 * initialized to be false. 9263 */ 9264 un->un_f_sync_nv_supported = FALSE; 9265 9266 /* 9267 * If either sd.conf or internal disk table 9268 * specifies cache flush be suppressed, then 9269 * we don't bother checking NV_SUP bit. 9270 */ 9271 if (un->un_f_suppress_cache_flush == TRUE) { 9272 mutex_exit(SD_MUTEX(un)); 9273 return; 9274 } 9275 9276 if (sd_check_vpd_page_support(ssc) == 0 && 9277 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9278 mutex_exit(SD_MUTEX(un)); 9279 /* collect page 86 data if available */ 9280 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9281 9282 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9283 0x01, 0x86, &inq86_resid); 9284 9285 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9286 SD_TRACE(SD_LOG_COMMON, un, 9287 "sd_get_nv_sup: \ 9288 successfully get VPD page: %x \ 9289 PAGE LENGTH: %x BYTE 6: %x\n", 9290 inq86[1], inq86[3], inq86[6]); 9291 9292 mutex_enter(SD_MUTEX(un)); 9293 /* 9294 * check the value of NV_SUP bit: only if the device 9295 * reports NV_SUP bit to be 1, the 9296 * un_f_sync_nv_supported bit will be set to true. 9297 */ 9298 if (inq86[6] & SD_VPD_NV_SUP) { 9299 un->un_f_sync_nv_supported = TRUE; 9300 } 9301 mutex_exit(SD_MUTEX(un)); 9302 } else if (rval != 0) { 9303 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9304 } 9305 9306 kmem_free(inq86, inq86_len); 9307 } else { 9308 mutex_exit(SD_MUTEX(un)); 9309 } 9310 9311 /* 9312 * Send a SYNC CACHE command to check whether 9313 * SYNC_NV bit is supported. This command should have 9314 * un_f_sync_nv_supported set to correct value. 9315 */ 9316 mutex_enter(SD_MUTEX(un)); 9317 if (un->un_f_sync_nv_supported) { 9318 mutex_exit(SD_MUTEX(un)); 9319 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9320 dkc->dkc_flag = FLUSH_VOLATILE; 9321 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9322 9323 /* 9324 * Send a TEST UNIT READY command to the device. This should 9325 * clear any outstanding UNIT ATTENTION that may be present. 9326 */ 9327 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9328 if (rval != 0) 9329 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9330 9331 kmem_free(dkc, sizeof (struct dk_callback)); 9332 } else { 9333 mutex_exit(SD_MUTEX(un)); 9334 } 9335 9336 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9337 un_f_suppress_cache_flush is set to %d\n", 9338 un->un_f_suppress_cache_flush); 9339 } 9340 9341 /* 9342 * Function: sd_make_device 9343 * 9344 * Description: Utility routine to return the Solaris device number from 9345 * the data in the device's dev_info structure. 9346 * 9347 * Return Code: The Solaris device number 9348 * 9349 * Context: Any 9350 */ 9351 9352 static dev_t 9353 sd_make_device(dev_info_t *devi) 9354 { 9355 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9356 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9357 } 9358 9359 9360 /* 9361 * Function: sd_pm_entry 9362 * 9363 * Description: Called at the start of a new command to manage power 9364 * and busy status of a device. This includes determining whether 9365 * the current power state of the device is sufficient for 9366 * performing the command or whether it must be changed. 9367 * The PM framework is notified appropriately. 9368 * Only with a return status of DDI_SUCCESS will the 9369 * component be busy to the framework. 9370 * 9371 * All callers of sd_pm_entry must check the return status 9372 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9373 * of DDI_FAILURE indicates the device failed to power up. 9374 * In this case un_pm_count has been adjusted so the result 9375 * on exit is still powered down, ie. count is less than 0. 9376 * Calling sd_pm_exit with this count value hits an ASSERT. 9377 * 9378 * Return Code: DDI_SUCCESS or DDI_FAILURE 9379 * 9380 * Context: Kernel thread context. 9381 */ 9382 9383 static int 9384 sd_pm_entry(struct sd_lun *un) 9385 { 9386 int return_status = DDI_SUCCESS; 9387 9388 ASSERT(!mutex_owned(SD_MUTEX(un))); 9389 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9390 9391 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9392 9393 if (un->un_f_pm_is_enabled == FALSE) { 9394 SD_TRACE(SD_LOG_IO_PM, un, 9395 "sd_pm_entry: exiting, PM not enabled\n"); 9396 return (return_status); 9397 } 9398 9399 /* 9400 * Just increment a counter if PM is enabled. On the transition from 9401 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9402 * the count with each IO and mark the device as idle when the count 9403 * hits 0. 9404 * 9405 * If the count is less than 0 the device is powered down. If a powered 9406 * down device is successfully powered up then the count must be 9407 * incremented to reflect the power up. Note that it'll get incremented 9408 * a second time to become busy. 9409 * 9410 * Because the following has the potential to change the device state 9411 * and must release the un_pm_mutex to do so, only one thread can be 9412 * allowed through at a time. 9413 */ 9414 9415 mutex_enter(&un->un_pm_mutex); 9416 while (un->un_pm_busy == TRUE) { 9417 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9418 } 9419 un->un_pm_busy = TRUE; 9420 9421 if (un->un_pm_count < 1) { 9422 9423 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9424 9425 /* 9426 * Indicate we are now busy so the framework won't attempt to 9427 * power down the device. This call will only fail if either 9428 * we passed a bad component number or the device has no 9429 * components. Neither of these should ever happen. 9430 */ 9431 mutex_exit(&un->un_pm_mutex); 9432 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9433 ASSERT(return_status == DDI_SUCCESS); 9434 9435 mutex_enter(&un->un_pm_mutex); 9436 9437 if (un->un_pm_count < 0) { 9438 mutex_exit(&un->un_pm_mutex); 9439 9440 SD_TRACE(SD_LOG_IO_PM, un, 9441 "sd_pm_entry: power up component\n"); 9442 9443 /* 9444 * pm_raise_power will cause sdpower to be called 9445 * which brings the device power level to the 9446 * desired state, ON in this case. If successful, 9447 * un_pm_count and un_power_level will be updated 9448 * appropriately. 9449 */ 9450 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9451 SD_SPINDLE_ON); 9452 9453 mutex_enter(&un->un_pm_mutex); 9454 9455 if (return_status != DDI_SUCCESS) { 9456 /* 9457 * Power up failed. 9458 * Idle the device and adjust the count 9459 * so the result on exit is that we're 9460 * still powered down, ie. count is less than 0. 9461 */ 9462 SD_TRACE(SD_LOG_IO_PM, un, 9463 "sd_pm_entry: power up failed," 9464 " idle the component\n"); 9465 9466 (void) pm_idle_component(SD_DEVINFO(un), 0); 9467 un->un_pm_count--; 9468 } else { 9469 /* 9470 * Device is powered up, verify the 9471 * count is non-negative. 9472 * This is debug only. 9473 */ 9474 ASSERT(un->un_pm_count == 0); 9475 } 9476 } 9477 9478 if (return_status == DDI_SUCCESS) { 9479 /* 9480 * For performance, now that the device has been tagged 9481 * as busy, and it's known to be powered up, update the 9482 * chain types to use jump tables that do not include 9483 * pm. This significantly lowers the overhead and 9484 * therefore improves performance. 9485 */ 9486 9487 mutex_exit(&un->un_pm_mutex); 9488 mutex_enter(SD_MUTEX(un)); 9489 SD_TRACE(SD_LOG_IO_PM, un, 9490 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9491 un->un_uscsi_chain_type); 9492 9493 if (un->un_f_non_devbsize_supported) { 9494 un->un_buf_chain_type = 9495 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9496 } else { 9497 un->un_buf_chain_type = 9498 SD_CHAIN_INFO_DISK_NO_PM; 9499 } 9500 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9501 9502 SD_TRACE(SD_LOG_IO_PM, un, 9503 " changed uscsi_chain_type to %d\n", 9504 un->un_uscsi_chain_type); 9505 mutex_exit(SD_MUTEX(un)); 9506 mutex_enter(&un->un_pm_mutex); 9507 9508 if (un->un_pm_idle_timeid == NULL) { 9509 /* 300 ms. */ 9510 un->un_pm_idle_timeid = 9511 timeout(sd_pm_idletimeout_handler, un, 9512 (drv_usectohz((clock_t)300000))); 9513 /* 9514 * Include an extra call to busy which keeps the 9515 * device busy with-respect-to the PM layer 9516 * until the timer fires, at which time it'll 9517 * get the extra idle call. 9518 */ 9519 (void) pm_busy_component(SD_DEVINFO(un), 0); 9520 } 9521 } 9522 } 9523 un->un_pm_busy = FALSE; 9524 /* Next... */ 9525 cv_signal(&un->un_pm_busy_cv); 9526 9527 un->un_pm_count++; 9528 9529 SD_TRACE(SD_LOG_IO_PM, un, 9530 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9531 9532 mutex_exit(&un->un_pm_mutex); 9533 9534 return (return_status); 9535 } 9536 9537 9538 /* 9539 * Function: sd_pm_exit 9540 * 9541 * Description: Called at the completion of a command to manage busy 9542 * status for the device. If the device becomes idle the 9543 * PM framework is notified. 9544 * 9545 * Context: Kernel thread context 9546 */ 9547 9548 static void 9549 sd_pm_exit(struct sd_lun *un) 9550 { 9551 ASSERT(!mutex_owned(SD_MUTEX(un))); 9552 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9553 9554 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9555 9556 /* 9557 * After attach the following flag is only read, so don't 9558 * take the penalty of acquiring a mutex for it. 9559 */ 9560 if (un->un_f_pm_is_enabled == TRUE) { 9561 9562 mutex_enter(&un->un_pm_mutex); 9563 un->un_pm_count--; 9564 9565 SD_TRACE(SD_LOG_IO_PM, un, 9566 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9567 9568 ASSERT(un->un_pm_count >= 0); 9569 if (un->un_pm_count == 0) { 9570 mutex_exit(&un->un_pm_mutex); 9571 9572 SD_TRACE(SD_LOG_IO_PM, un, 9573 "sd_pm_exit: idle component\n"); 9574 9575 (void) pm_idle_component(SD_DEVINFO(un), 0); 9576 9577 } else { 9578 mutex_exit(&un->un_pm_mutex); 9579 } 9580 } 9581 9582 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9583 } 9584 9585 9586 /* 9587 * Function: sdopen 9588 * 9589 * Description: Driver's open(9e) entry point function. 9590 * 9591 * Arguments: dev_i - pointer to device number 9592 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9593 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9594 * cred_p - user credential pointer 9595 * 9596 * Return Code: EINVAL 9597 * ENXIO 9598 * EIO 9599 * EROFS 9600 * EBUSY 9601 * 9602 * Context: Kernel thread context 9603 */ 9604 /* ARGSUSED */ 9605 static int 9606 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9607 { 9608 struct sd_lun *un; 9609 int nodelay; 9610 int part; 9611 uint64_t partmask; 9612 int instance; 9613 dev_t dev; 9614 int rval = EIO; 9615 diskaddr_t nblks = 0; 9616 diskaddr_t label_cap; 9617 9618 /* Validate the open type */ 9619 if (otyp >= OTYPCNT) { 9620 return (EINVAL); 9621 } 9622 9623 dev = *dev_p; 9624 instance = SDUNIT(dev); 9625 mutex_enter(&sd_detach_mutex); 9626 9627 /* 9628 * Fail the open if there is no softstate for the instance, or 9629 * if another thread somewhere is trying to detach the instance. 9630 */ 9631 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9632 (un->un_detach_count != 0)) { 9633 mutex_exit(&sd_detach_mutex); 9634 /* 9635 * The probe cache only needs to be cleared when open (9e) fails 9636 * with ENXIO (4238046). 9637 */ 9638 /* 9639 * un-conditionally clearing probe cache is ok with 9640 * separate sd/ssd binaries 9641 * x86 platform can be an issue with both parallel 9642 * and fibre in 1 binary 9643 */ 9644 sd_scsi_clear_probe_cache(); 9645 return (ENXIO); 9646 } 9647 9648 /* 9649 * The un_layer_count is to prevent another thread in specfs from 9650 * trying to detach the instance, which can happen when we are 9651 * called from a higher-layer driver instead of thru specfs. 9652 * This will not be needed when DDI provides a layered driver 9653 * interface that allows specfs to know that an instance is in 9654 * use by a layered driver & should not be detached. 9655 * 9656 * Note: the semantics for layered driver opens are exactly one 9657 * close for every open. 9658 */ 9659 if (otyp == OTYP_LYR) { 9660 un->un_layer_count++; 9661 } 9662 9663 /* 9664 * Keep a count of the current # of opens in progress. This is because 9665 * some layered drivers try to call us as a regular open. This can 9666 * cause problems that we cannot prevent, however by keeping this count 9667 * we can at least keep our open and detach routines from racing against 9668 * each other under such conditions. 9669 */ 9670 un->un_opens_in_progress++; 9671 mutex_exit(&sd_detach_mutex); 9672 9673 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9674 part = SDPART(dev); 9675 partmask = 1 << part; 9676 9677 /* 9678 * We use a semaphore here in order to serialize 9679 * open and close requests on the device. 9680 */ 9681 sema_p(&un->un_semoclose); 9682 9683 mutex_enter(SD_MUTEX(un)); 9684 9685 /* 9686 * All device accesses go thru sdstrategy() where we check 9687 * on suspend status but there could be a scsi_poll command, 9688 * which bypasses sdstrategy(), so we need to check pm 9689 * status. 9690 */ 9691 9692 if (!nodelay) { 9693 while ((un->un_state == SD_STATE_SUSPENDED) || 9694 (un->un_state == SD_STATE_PM_CHANGING)) { 9695 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9696 } 9697 9698 mutex_exit(SD_MUTEX(un)); 9699 if (sd_pm_entry(un) != DDI_SUCCESS) { 9700 rval = EIO; 9701 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9702 "sdopen: sd_pm_entry failed\n"); 9703 goto open_failed_with_pm; 9704 } 9705 mutex_enter(SD_MUTEX(un)); 9706 } 9707 9708 /* check for previous exclusive open */ 9709 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9710 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9711 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9712 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9713 9714 if (un->un_exclopen & (partmask)) { 9715 goto excl_open_fail; 9716 } 9717 9718 if (flag & FEXCL) { 9719 int i; 9720 if (un->un_ocmap.lyropen[part]) { 9721 goto excl_open_fail; 9722 } 9723 for (i = 0; i < (OTYPCNT - 1); i++) { 9724 if (un->un_ocmap.regopen[i] & (partmask)) { 9725 goto excl_open_fail; 9726 } 9727 } 9728 } 9729 9730 /* 9731 * Check the write permission if this is a removable media device, 9732 * NDELAY has not been set, and writable permission is requested. 9733 * 9734 * Note: If NDELAY was set and this is write-protected media the WRITE 9735 * attempt will fail with EIO as part of the I/O processing. This is a 9736 * more permissive implementation that allows the open to succeed and 9737 * WRITE attempts to fail when appropriate. 9738 */ 9739 if (un->un_f_chk_wp_open) { 9740 if ((flag & FWRITE) && (!nodelay)) { 9741 mutex_exit(SD_MUTEX(un)); 9742 /* 9743 * Defer the check for write permission on writable 9744 * DVD drive till sdstrategy and will not fail open even 9745 * if FWRITE is set as the device can be writable 9746 * depending upon the media and the media can change 9747 * after the call to open(). 9748 */ 9749 if (un->un_f_dvdram_writable_device == FALSE) { 9750 if (ISCD(un) || sr_check_wp(dev)) { 9751 rval = EROFS; 9752 mutex_enter(SD_MUTEX(un)); 9753 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9754 "write to cd or write protected media\n"); 9755 goto open_fail; 9756 } 9757 } 9758 mutex_enter(SD_MUTEX(un)); 9759 } 9760 } 9761 9762 /* 9763 * If opening in NDELAY/NONBLOCK mode, just return. 9764 * Check if disk is ready and has a valid geometry later. 9765 */ 9766 if (!nodelay) { 9767 sd_ssc_t *ssc; 9768 9769 mutex_exit(SD_MUTEX(un)); 9770 ssc = sd_ssc_init(un); 9771 rval = sd_ready_and_valid(ssc, part); 9772 sd_ssc_fini(ssc); 9773 mutex_enter(SD_MUTEX(un)); 9774 /* 9775 * Fail if device is not ready or if the number of disk 9776 * blocks is zero or negative for non CD devices. 9777 */ 9778 9779 nblks = 0; 9780 9781 if (rval == SD_READY_VALID && (!ISCD(un))) { 9782 /* if cmlb_partinfo fails, nblks remains 0 */ 9783 mutex_exit(SD_MUTEX(un)); 9784 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9785 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9786 mutex_enter(SD_MUTEX(un)); 9787 } 9788 9789 if ((rval != SD_READY_VALID) || 9790 (!ISCD(un) && nblks <= 0)) { 9791 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9792 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9793 "device not ready or invalid disk block value\n"); 9794 goto open_fail; 9795 } 9796 #if defined(__i386) || defined(__amd64) 9797 } else { 9798 uchar_t *cp; 9799 /* 9800 * x86 requires special nodelay handling, so that p0 is 9801 * always defined and accessible. 9802 * Invalidate geometry only if device is not already open. 9803 */ 9804 cp = &un->un_ocmap.chkd[0]; 9805 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9806 if (*cp != (uchar_t)0) { 9807 break; 9808 } 9809 cp++; 9810 } 9811 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9812 mutex_exit(SD_MUTEX(un)); 9813 cmlb_invalidate(un->un_cmlbhandle, 9814 (void *)SD_PATH_DIRECT); 9815 mutex_enter(SD_MUTEX(un)); 9816 } 9817 9818 #endif 9819 } 9820 9821 if (otyp == OTYP_LYR) { 9822 un->un_ocmap.lyropen[part]++; 9823 } else { 9824 un->un_ocmap.regopen[otyp] |= partmask; 9825 } 9826 9827 /* Set up open and exclusive open flags */ 9828 if (flag & FEXCL) { 9829 un->un_exclopen |= (partmask); 9830 } 9831 9832 /* 9833 * If the lun is EFI labeled and lun capacity is greater than the 9834 * capacity contained in the label, log a sys-event to notify the 9835 * interested module. 9836 * To avoid an infinite loop of logging sys-event, we only log the 9837 * event when the lun is not opened in NDELAY mode. The event handler 9838 * should open the lun in NDELAY mode. 9839 */ 9840 if (!(flag & FNDELAY)) { 9841 mutex_exit(SD_MUTEX(un)); 9842 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9843 (void*)SD_PATH_DIRECT) == 0) { 9844 mutex_enter(SD_MUTEX(un)); 9845 if (un->un_f_blockcount_is_valid && 9846 un->un_blockcount > label_cap) { 9847 mutex_exit(SD_MUTEX(un)); 9848 sd_log_lun_expansion_event(un, 9849 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9850 mutex_enter(SD_MUTEX(un)); 9851 } 9852 } else { 9853 mutex_enter(SD_MUTEX(un)); 9854 } 9855 } 9856 9857 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9858 "open of part %d type %d\n", part, otyp); 9859 9860 mutex_exit(SD_MUTEX(un)); 9861 if (!nodelay) { 9862 sd_pm_exit(un); 9863 } 9864 9865 sema_v(&un->un_semoclose); 9866 9867 mutex_enter(&sd_detach_mutex); 9868 un->un_opens_in_progress--; 9869 mutex_exit(&sd_detach_mutex); 9870 9871 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9872 return (DDI_SUCCESS); 9873 9874 excl_open_fail: 9875 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9876 rval = EBUSY; 9877 9878 open_fail: 9879 mutex_exit(SD_MUTEX(un)); 9880 9881 /* 9882 * On a failed open we must exit the pm management. 9883 */ 9884 if (!nodelay) { 9885 sd_pm_exit(un); 9886 } 9887 open_failed_with_pm: 9888 sema_v(&un->un_semoclose); 9889 9890 mutex_enter(&sd_detach_mutex); 9891 un->un_opens_in_progress--; 9892 if (otyp == OTYP_LYR) { 9893 un->un_layer_count--; 9894 } 9895 mutex_exit(&sd_detach_mutex); 9896 9897 return (rval); 9898 } 9899 9900 9901 /* 9902 * Function: sdclose 9903 * 9904 * Description: Driver's close(9e) entry point function. 9905 * 9906 * Arguments: dev - device number 9907 * flag - file status flag, informational only 9908 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9909 * cred_p - user credential pointer 9910 * 9911 * Return Code: ENXIO 9912 * 9913 * Context: Kernel thread context 9914 */ 9915 /* ARGSUSED */ 9916 static int 9917 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9918 { 9919 struct sd_lun *un; 9920 uchar_t *cp; 9921 int part; 9922 int nodelay; 9923 int rval = 0; 9924 9925 /* Validate the open type */ 9926 if (otyp >= OTYPCNT) { 9927 return (ENXIO); 9928 } 9929 9930 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9931 return (ENXIO); 9932 } 9933 9934 part = SDPART(dev); 9935 nodelay = flag & (FNDELAY | FNONBLOCK); 9936 9937 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9938 "sdclose: close of part %d type %d\n", part, otyp); 9939 9940 /* 9941 * We use a semaphore here in order to serialize 9942 * open and close requests on the device. 9943 */ 9944 sema_p(&un->un_semoclose); 9945 9946 mutex_enter(SD_MUTEX(un)); 9947 9948 /* Don't proceed if power is being changed. */ 9949 while (un->un_state == SD_STATE_PM_CHANGING) { 9950 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9951 } 9952 9953 if (un->un_exclopen & (1 << part)) { 9954 un->un_exclopen &= ~(1 << part); 9955 } 9956 9957 /* Update the open partition map */ 9958 if (otyp == OTYP_LYR) { 9959 un->un_ocmap.lyropen[part] -= 1; 9960 } else { 9961 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9962 } 9963 9964 cp = &un->un_ocmap.chkd[0]; 9965 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9966 if (*cp != NULL) { 9967 break; 9968 } 9969 cp++; 9970 } 9971 9972 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9973 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9974 9975 /* 9976 * We avoid persistance upon the last close, and set 9977 * the throttle back to the maximum. 9978 */ 9979 un->un_throttle = un->un_saved_throttle; 9980 9981 if (un->un_state == SD_STATE_OFFLINE) { 9982 if (un->un_f_is_fibre == FALSE) { 9983 scsi_log(SD_DEVINFO(un), sd_label, 9984 CE_WARN, "offline\n"); 9985 } 9986 mutex_exit(SD_MUTEX(un)); 9987 cmlb_invalidate(un->un_cmlbhandle, 9988 (void *)SD_PATH_DIRECT); 9989 mutex_enter(SD_MUTEX(un)); 9990 9991 } else { 9992 /* 9993 * Flush any outstanding writes in NVRAM cache. 9994 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9995 * cmd, it may not work for non-Pluto devices. 9996 * SYNCHRONIZE CACHE is not required for removables, 9997 * except DVD-RAM drives. 9998 * 9999 * Also note: because SYNCHRONIZE CACHE is currently 10000 * the only command issued here that requires the 10001 * drive be powered up, only do the power up before 10002 * sending the Sync Cache command. If additional 10003 * commands are added which require a powered up 10004 * drive, the following sequence may have to change. 10005 * 10006 * And finally, note that parallel SCSI on SPARC 10007 * only issues a Sync Cache to DVD-RAM, a newly 10008 * supported device. 10009 */ 10010 #if defined(__i386) || defined(__amd64) 10011 if ((un->un_f_sync_cache_supported && 10012 un->un_f_sync_cache_required) || 10013 un->un_f_dvdram_writable_device == TRUE) { 10014 #else 10015 if (un->un_f_dvdram_writable_device == TRUE) { 10016 #endif 10017 mutex_exit(SD_MUTEX(un)); 10018 if (sd_pm_entry(un) == DDI_SUCCESS) { 10019 rval = 10020 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10021 NULL); 10022 /* ignore error if not supported */ 10023 if (rval == ENOTSUP) { 10024 rval = 0; 10025 } else if (rval != 0) { 10026 rval = EIO; 10027 } 10028 sd_pm_exit(un); 10029 } else { 10030 rval = EIO; 10031 } 10032 mutex_enter(SD_MUTEX(un)); 10033 } 10034 10035 /* 10036 * For devices which supports DOOR_LOCK, send an ALLOW 10037 * MEDIA REMOVAL command, but don't get upset if it 10038 * fails. We need to raise the power of the drive before 10039 * we can call sd_send_scsi_DOORLOCK() 10040 */ 10041 if (un->un_f_doorlock_supported) { 10042 mutex_exit(SD_MUTEX(un)); 10043 if (sd_pm_entry(un) == DDI_SUCCESS) { 10044 sd_ssc_t *ssc; 10045 10046 ssc = sd_ssc_init(un); 10047 rval = sd_send_scsi_DOORLOCK(ssc, 10048 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10049 if (rval != 0) 10050 sd_ssc_assessment(ssc, 10051 SD_FMT_IGNORE); 10052 sd_ssc_fini(ssc); 10053 10054 sd_pm_exit(un); 10055 if (ISCD(un) && (rval != 0) && 10056 (nodelay != 0)) { 10057 rval = ENXIO; 10058 } 10059 } else { 10060 rval = EIO; 10061 } 10062 mutex_enter(SD_MUTEX(un)); 10063 } 10064 10065 /* 10066 * If a device has removable media, invalidate all 10067 * parameters related to media, such as geometry, 10068 * blocksize, and blockcount. 10069 */ 10070 if (un->un_f_has_removable_media) { 10071 sr_ejected(un); 10072 } 10073 10074 /* 10075 * Destroy the cache (if it exists) which was 10076 * allocated for the write maps since this is 10077 * the last close for this media. 10078 */ 10079 if (un->un_wm_cache) { 10080 /* 10081 * Check if there are pending commands. 10082 * and if there are give a warning and 10083 * do not destroy the cache. 10084 */ 10085 if (un->un_ncmds_in_driver > 0) { 10086 scsi_log(SD_DEVINFO(un), 10087 sd_label, CE_WARN, 10088 "Unable to clean up memory " 10089 "because of pending I/O\n"); 10090 } else { 10091 kmem_cache_destroy( 10092 un->un_wm_cache); 10093 un->un_wm_cache = NULL; 10094 } 10095 } 10096 } 10097 } 10098 10099 mutex_exit(SD_MUTEX(un)); 10100 sema_v(&un->un_semoclose); 10101 10102 if (otyp == OTYP_LYR) { 10103 mutex_enter(&sd_detach_mutex); 10104 /* 10105 * The detach routine may run when the layer count 10106 * drops to zero. 10107 */ 10108 un->un_layer_count--; 10109 mutex_exit(&sd_detach_mutex); 10110 } 10111 10112 return (rval); 10113 } 10114 10115 10116 /* 10117 * Function: sd_ready_and_valid 10118 * 10119 * Description: Test if device is ready and has a valid geometry. 10120 * 10121 * Arguments: ssc - sd_ssc_t will contain un 10122 * un - driver soft state (unit) structure 10123 * 10124 * Return Code: SD_READY_VALID ready and valid label 10125 * SD_NOT_READY_VALID not ready, no label 10126 * SD_RESERVED_BY_OTHERS reservation conflict 10127 * 10128 * Context: Never called at interrupt context. 10129 */ 10130 10131 static int 10132 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10133 { 10134 struct sd_errstats *stp; 10135 uint64_t capacity; 10136 uint_t lbasize; 10137 int rval = SD_READY_VALID; 10138 char name_str[48]; 10139 int is_valid; 10140 struct sd_lun *un; 10141 int status; 10142 10143 ASSERT(ssc != NULL); 10144 un = ssc->ssc_un; 10145 ASSERT(un != NULL); 10146 ASSERT(!mutex_owned(SD_MUTEX(un))); 10147 10148 mutex_enter(SD_MUTEX(un)); 10149 /* 10150 * If a device has removable media, we must check if media is 10151 * ready when checking if this device is ready and valid. 10152 */ 10153 if (un->un_f_has_removable_media) { 10154 mutex_exit(SD_MUTEX(un)); 10155 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10156 10157 if (status != 0) { 10158 rval = SD_NOT_READY_VALID; 10159 mutex_enter(SD_MUTEX(un)); 10160 10161 /* Ignore all failed status for removalbe media */ 10162 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10163 10164 goto done; 10165 } 10166 10167 is_valid = SD_IS_VALID_LABEL(un); 10168 mutex_enter(SD_MUTEX(un)); 10169 if (!is_valid || 10170 (un->un_f_blockcount_is_valid == FALSE) || 10171 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10172 10173 /* capacity has to be read every open. */ 10174 mutex_exit(SD_MUTEX(un)); 10175 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10176 &lbasize, SD_PATH_DIRECT); 10177 10178 if (status != 0) { 10179 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10180 10181 cmlb_invalidate(un->un_cmlbhandle, 10182 (void *)SD_PATH_DIRECT); 10183 mutex_enter(SD_MUTEX(un)); 10184 rval = SD_NOT_READY_VALID; 10185 10186 goto done; 10187 } else { 10188 mutex_enter(SD_MUTEX(un)); 10189 sd_update_block_info(un, lbasize, capacity); 10190 } 10191 } 10192 10193 /* 10194 * Check if the media in the device is writable or not. 10195 */ 10196 if (!is_valid && ISCD(un)) { 10197 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10198 } 10199 10200 } else { 10201 /* 10202 * Do a test unit ready to clear any unit attention from non-cd 10203 * devices. 10204 */ 10205 mutex_exit(SD_MUTEX(un)); 10206 10207 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10208 if (status != 0) { 10209 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10210 } 10211 10212 mutex_enter(SD_MUTEX(un)); 10213 } 10214 10215 10216 /* 10217 * If this is a non 512 block device, allocate space for 10218 * the wmap cache. This is being done here since every time 10219 * a media is changed this routine will be called and the 10220 * block size is a function of media rather than device. 10221 */ 10222 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10223 if (!(un->un_wm_cache)) { 10224 (void) snprintf(name_str, sizeof (name_str), 10225 "%s%d_cache", 10226 ddi_driver_name(SD_DEVINFO(un)), 10227 ddi_get_instance(SD_DEVINFO(un))); 10228 un->un_wm_cache = kmem_cache_create( 10229 name_str, sizeof (struct sd_w_map), 10230 8, sd_wm_cache_constructor, 10231 sd_wm_cache_destructor, NULL, 10232 (void *)un, NULL, 0); 10233 if (!(un->un_wm_cache)) { 10234 rval = ENOMEM; 10235 goto done; 10236 } 10237 } 10238 } 10239 10240 if (un->un_state == SD_STATE_NORMAL) { 10241 /* 10242 * If the target is not yet ready here (defined by a TUR 10243 * failure), invalidate the geometry and print an 'offline' 10244 * message. This is a legacy message, as the state of the 10245 * target is not actually changed to SD_STATE_OFFLINE. 10246 * 10247 * If the TUR fails for EACCES (Reservation Conflict), 10248 * SD_RESERVED_BY_OTHERS will be returned to indicate 10249 * reservation conflict. If the TUR fails for other 10250 * reasons, SD_NOT_READY_VALID will be returned. 10251 */ 10252 int err; 10253 10254 mutex_exit(SD_MUTEX(un)); 10255 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10256 mutex_enter(SD_MUTEX(un)); 10257 10258 if (err != 0) { 10259 mutex_exit(SD_MUTEX(un)); 10260 cmlb_invalidate(un->un_cmlbhandle, 10261 (void *)SD_PATH_DIRECT); 10262 mutex_enter(SD_MUTEX(un)); 10263 if (err == EACCES) { 10264 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10265 "reservation conflict\n"); 10266 rval = SD_RESERVED_BY_OTHERS; 10267 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10268 } else { 10269 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10270 "drive offline\n"); 10271 rval = SD_NOT_READY_VALID; 10272 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10273 } 10274 goto done; 10275 } 10276 } 10277 10278 if (un->un_f_format_in_progress == FALSE) { 10279 mutex_exit(SD_MUTEX(un)); 10280 10281 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10282 NULL, (void *) SD_PATH_DIRECT) != 0) { 10283 rval = SD_NOT_READY_VALID; 10284 mutex_enter(SD_MUTEX(un)); 10285 10286 goto done; 10287 } 10288 if (un->un_f_pkstats_enabled) { 10289 sd_set_pstats(un); 10290 SD_TRACE(SD_LOG_IO_PARTITION, un, 10291 "sd_ready_and_valid: un:0x%p pstats created and " 10292 "set\n", un); 10293 } 10294 mutex_enter(SD_MUTEX(un)); 10295 } 10296 10297 /* 10298 * If this device supports DOOR_LOCK command, try and send 10299 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10300 * if it fails. For a CD, however, it is an error 10301 */ 10302 if (un->un_f_doorlock_supported) { 10303 mutex_exit(SD_MUTEX(un)); 10304 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10305 SD_PATH_DIRECT); 10306 10307 if ((status != 0) && ISCD(un)) { 10308 rval = SD_NOT_READY_VALID; 10309 mutex_enter(SD_MUTEX(un)); 10310 10311 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10312 10313 goto done; 10314 } else if (status != 0) 10315 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10316 mutex_enter(SD_MUTEX(un)); 10317 } 10318 10319 /* The state has changed, inform the media watch routines */ 10320 un->un_mediastate = DKIO_INSERTED; 10321 cv_broadcast(&un->un_state_cv); 10322 rval = SD_READY_VALID; 10323 10324 done: 10325 10326 /* 10327 * Initialize the capacity kstat value, if no media previously 10328 * (capacity kstat is 0) and a media has been inserted 10329 * (un_blockcount > 0). 10330 */ 10331 if (un->un_errstats != NULL) { 10332 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10333 if ((stp->sd_capacity.value.ui64 == 0) && 10334 (un->un_f_blockcount_is_valid == TRUE)) { 10335 stp->sd_capacity.value.ui64 = 10336 (uint64_t)((uint64_t)un->un_blockcount * 10337 un->un_sys_blocksize); 10338 } 10339 } 10340 10341 mutex_exit(SD_MUTEX(un)); 10342 return (rval); 10343 } 10344 10345 10346 /* 10347 * Function: sdmin 10348 * 10349 * Description: Routine to limit the size of a data transfer. Used in 10350 * conjunction with physio(9F). 10351 * 10352 * Arguments: bp - pointer to the indicated buf(9S) struct. 10353 * 10354 * Context: Kernel thread context. 10355 */ 10356 10357 static void 10358 sdmin(struct buf *bp) 10359 { 10360 struct sd_lun *un; 10361 int instance; 10362 10363 instance = SDUNIT(bp->b_edev); 10364 10365 un = ddi_get_soft_state(sd_state, instance); 10366 ASSERT(un != NULL); 10367 10368 if (bp->b_bcount > un->un_max_xfer_size) { 10369 bp->b_bcount = un->un_max_xfer_size; 10370 } 10371 } 10372 10373 10374 /* 10375 * Function: sdread 10376 * 10377 * Description: Driver's read(9e) entry point function. 10378 * 10379 * Arguments: dev - device number 10380 * uio - structure pointer describing where data is to be stored 10381 * in user's space 10382 * cred_p - user credential pointer 10383 * 10384 * Return Code: ENXIO 10385 * EIO 10386 * EINVAL 10387 * value returned by physio 10388 * 10389 * Context: Kernel thread context. 10390 */ 10391 /* ARGSUSED */ 10392 static int 10393 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10394 { 10395 struct sd_lun *un = NULL; 10396 int secmask; 10397 int err = 0; 10398 sd_ssc_t *ssc; 10399 10400 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10401 return (ENXIO); 10402 } 10403 10404 ASSERT(!mutex_owned(SD_MUTEX(un))); 10405 10406 10407 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10408 mutex_enter(SD_MUTEX(un)); 10409 /* 10410 * Because the call to sd_ready_and_valid will issue I/O we 10411 * must wait here if either the device is suspended or 10412 * if it's power level is changing. 10413 */ 10414 while ((un->un_state == SD_STATE_SUSPENDED) || 10415 (un->un_state == SD_STATE_PM_CHANGING)) { 10416 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10417 } 10418 un->un_ncmds_in_driver++; 10419 mutex_exit(SD_MUTEX(un)); 10420 10421 /* Initialize sd_ssc_t for internal uscsi commands */ 10422 ssc = sd_ssc_init(un); 10423 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10424 err = EIO; 10425 } else { 10426 err = 0; 10427 } 10428 sd_ssc_fini(ssc); 10429 10430 mutex_enter(SD_MUTEX(un)); 10431 un->un_ncmds_in_driver--; 10432 ASSERT(un->un_ncmds_in_driver >= 0); 10433 mutex_exit(SD_MUTEX(un)); 10434 if (err != 0) 10435 return (err); 10436 } 10437 10438 /* 10439 * Read requests are restricted to multiples of the system block size. 10440 */ 10441 secmask = un->un_sys_blocksize - 1; 10442 10443 if (uio->uio_loffset & ((offset_t)(secmask))) { 10444 SD_ERROR(SD_LOG_READ_WRITE, un, 10445 "sdread: file offset not modulo %d\n", 10446 un->un_sys_blocksize); 10447 err = EINVAL; 10448 } else if (uio->uio_iov->iov_len & (secmask)) { 10449 SD_ERROR(SD_LOG_READ_WRITE, un, 10450 "sdread: transfer length not modulo %d\n", 10451 un->un_sys_blocksize); 10452 err = EINVAL; 10453 } else { 10454 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10455 } 10456 10457 return (err); 10458 } 10459 10460 10461 /* 10462 * Function: sdwrite 10463 * 10464 * Description: Driver's write(9e) entry point function. 10465 * 10466 * Arguments: dev - device number 10467 * uio - structure pointer describing where data is stored in 10468 * user's space 10469 * cred_p - user credential pointer 10470 * 10471 * Return Code: ENXIO 10472 * EIO 10473 * EINVAL 10474 * value returned by physio 10475 * 10476 * Context: Kernel thread context. 10477 */ 10478 /* ARGSUSED */ 10479 static int 10480 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10481 { 10482 struct sd_lun *un = NULL; 10483 int secmask; 10484 int err = 0; 10485 sd_ssc_t *ssc; 10486 10487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10488 return (ENXIO); 10489 } 10490 10491 ASSERT(!mutex_owned(SD_MUTEX(un))); 10492 10493 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10494 mutex_enter(SD_MUTEX(un)); 10495 /* 10496 * Because the call to sd_ready_and_valid will issue I/O we 10497 * must wait here if either the device is suspended or 10498 * if it's power level is changing. 10499 */ 10500 while ((un->un_state == SD_STATE_SUSPENDED) || 10501 (un->un_state == SD_STATE_PM_CHANGING)) { 10502 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10503 } 10504 un->un_ncmds_in_driver++; 10505 mutex_exit(SD_MUTEX(un)); 10506 10507 /* Initialize sd_ssc_t for internal uscsi commands */ 10508 ssc = sd_ssc_init(un); 10509 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10510 err = EIO; 10511 } else { 10512 err = 0; 10513 } 10514 sd_ssc_fini(ssc); 10515 10516 mutex_enter(SD_MUTEX(un)); 10517 un->un_ncmds_in_driver--; 10518 ASSERT(un->un_ncmds_in_driver >= 0); 10519 mutex_exit(SD_MUTEX(un)); 10520 if (err != 0) 10521 return (err); 10522 } 10523 10524 /* 10525 * Write requests are restricted to multiples of the system block size. 10526 */ 10527 secmask = un->un_sys_blocksize - 1; 10528 10529 if (uio->uio_loffset & ((offset_t)(secmask))) { 10530 SD_ERROR(SD_LOG_READ_WRITE, un, 10531 "sdwrite: file offset not modulo %d\n", 10532 un->un_sys_blocksize); 10533 err = EINVAL; 10534 } else if (uio->uio_iov->iov_len & (secmask)) { 10535 SD_ERROR(SD_LOG_READ_WRITE, un, 10536 "sdwrite: transfer length not modulo %d\n", 10537 un->un_sys_blocksize); 10538 err = EINVAL; 10539 } else { 10540 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10541 } 10542 10543 return (err); 10544 } 10545 10546 10547 /* 10548 * Function: sdaread 10549 * 10550 * Description: Driver's aread(9e) entry point function. 10551 * 10552 * Arguments: dev - device number 10553 * aio - structure pointer describing where data is to be stored 10554 * cred_p - user credential pointer 10555 * 10556 * Return Code: ENXIO 10557 * EIO 10558 * EINVAL 10559 * value returned by aphysio 10560 * 10561 * Context: Kernel thread context. 10562 */ 10563 /* ARGSUSED */ 10564 static int 10565 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10566 { 10567 struct sd_lun *un = NULL; 10568 struct uio *uio = aio->aio_uio; 10569 int secmask; 10570 int err = 0; 10571 sd_ssc_t *ssc; 10572 10573 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10574 return (ENXIO); 10575 } 10576 10577 ASSERT(!mutex_owned(SD_MUTEX(un))); 10578 10579 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10580 mutex_enter(SD_MUTEX(un)); 10581 /* 10582 * Because the call to sd_ready_and_valid will issue I/O we 10583 * must wait here if either the device is suspended or 10584 * if it's power level is changing. 10585 */ 10586 while ((un->un_state == SD_STATE_SUSPENDED) || 10587 (un->un_state == SD_STATE_PM_CHANGING)) { 10588 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10589 } 10590 un->un_ncmds_in_driver++; 10591 mutex_exit(SD_MUTEX(un)); 10592 10593 /* Initialize sd_ssc_t for internal uscsi commands */ 10594 ssc = sd_ssc_init(un); 10595 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10596 err = EIO; 10597 } else { 10598 err = 0; 10599 } 10600 sd_ssc_fini(ssc); 10601 10602 mutex_enter(SD_MUTEX(un)); 10603 un->un_ncmds_in_driver--; 10604 ASSERT(un->un_ncmds_in_driver >= 0); 10605 mutex_exit(SD_MUTEX(un)); 10606 if (err != 0) 10607 return (err); 10608 } 10609 10610 /* 10611 * Read requests are restricted to multiples of the system block size. 10612 */ 10613 secmask = un->un_sys_blocksize - 1; 10614 10615 if (uio->uio_loffset & ((offset_t)(secmask))) { 10616 SD_ERROR(SD_LOG_READ_WRITE, un, 10617 "sdaread: file offset not modulo %d\n", 10618 un->un_sys_blocksize); 10619 err = EINVAL; 10620 } else if (uio->uio_iov->iov_len & (secmask)) { 10621 SD_ERROR(SD_LOG_READ_WRITE, un, 10622 "sdaread: transfer length not modulo %d\n", 10623 un->un_sys_blocksize); 10624 err = EINVAL; 10625 } else { 10626 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10627 } 10628 10629 return (err); 10630 } 10631 10632 10633 /* 10634 * Function: sdawrite 10635 * 10636 * Description: Driver's awrite(9e) entry point function. 10637 * 10638 * Arguments: dev - device number 10639 * aio - structure pointer describing where data is stored 10640 * cred_p - user credential pointer 10641 * 10642 * Return Code: ENXIO 10643 * EIO 10644 * EINVAL 10645 * value returned by aphysio 10646 * 10647 * Context: Kernel thread context. 10648 */ 10649 /* ARGSUSED */ 10650 static int 10651 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10652 { 10653 struct sd_lun *un = NULL; 10654 struct uio *uio = aio->aio_uio; 10655 int secmask; 10656 int err = 0; 10657 sd_ssc_t *ssc; 10658 10659 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10660 return (ENXIO); 10661 } 10662 10663 ASSERT(!mutex_owned(SD_MUTEX(un))); 10664 10665 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10666 mutex_enter(SD_MUTEX(un)); 10667 /* 10668 * Because the call to sd_ready_and_valid will issue I/O we 10669 * must wait here if either the device is suspended or 10670 * if it's power level is changing. 10671 */ 10672 while ((un->un_state == SD_STATE_SUSPENDED) || 10673 (un->un_state == SD_STATE_PM_CHANGING)) { 10674 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10675 } 10676 un->un_ncmds_in_driver++; 10677 mutex_exit(SD_MUTEX(un)); 10678 10679 /* Initialize sd_ssc_t for internal uscsi commands */ 10680 ssc = sd_ssc_init(un); 10681 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10682 err = EIO; 10683 } else { 10684 err = 0; 10685 } 10686 sd_ssc_fini(ssc); 10687 10688 mutex_enter(SD_MUTEX(un)); 10689 un->un_ncmds_in_driver--; 10690 ASSERT(un->un_ncmds_in_driver >= 0); 10691 mutex_exit(SD_MUTEX(un)); 10692 if (err != 0) 10693 return (err); 10694 } 10695 10696 /* 10697 * Write requests are restricted to multiples of the system block size. 10698 */ 10699 secmask = un->un_sys_blocksize - 1; 10700 10701 if (uio->uio_loffset & ((offset_t)(secmask))) { 10702 SD_ERROR(SD_LOG_READ_WRITE, un, 10703 "sdawrite: file offset not modulo %d\n", 10704 un->un_sys_blocksize); 10705 err = EINVAL; 10706 } else if (uio->uio_iov->iov_len & (secmask)) { 10707 SD_ERROR(SD_LOG_READ_WRITE, un, 10708 "sdawrite: transfer length not modulo %d\n", 10709 un->un_sys_blocksize); 10710 err = EINVAL; 10711 } else { 10712 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10713 } 10714 10715 return (err); 10716 } 10717 10718 10719 10720 10721 10722 /* 10723 * Driver IO processing follows the following sequence: 10724 * 10725 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10726 * | | ^ 10727 * v v | 10728 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10729 * | | | | 10730 * v | | | 10731 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10732 * | | ^ ^ 10733 * v v | | 10734 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10735 * | | | | 10736 * +---+ | +------------+ +-------+ 10737 * | | | | 10738 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10739 * | v | | 10740 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10741 * | | ^ | 10742 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10743 * | v | | 10744 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10745 * | | ^ | 10746 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10747 * | v | | 10748 * | sd_checksum_iostart() sd_checksum_iodone() | 10749 * | | ^ | 10750 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10751 * | v | | 10752 * | sd_pm_iostart() sd_pm_iodone() | 10753 * | | ^ | 10754 * | | | | 10755 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10756 * | ^ 10757 * v | 10758 * sd_core_iostart() | 10759 * | | 10760 * | +------>(*destroypkt)() 10761 * +-> sd_start_cmds() <-+ | | 10762 * | | | v 10763 * | | | scsi_destroy_pkt(9F) 10764 * | | | 10765 * +->(*initpkt)() +- sdintr() 10766 * | | | | 10767 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10768 * | +-> scsi_setup_cdb(9F) | 10769 * | | 10770 * +--> scsi_transport(9F) | 10771 * | | 10772 * +----> SCSA ---->+ 10773 * 10774 * 10775 * This code is based upon the following presumptions: 10776 * 10777 * - iostart and iodone functions operate on buf(9S) structures. These 10778 * functions perform the necessary operations on the buf(9S) and pass 10779 * them along to the next function in the chain by using the macros 10780 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10781 * (for iodone side functions). 10782 * 10783 * - The iostart side functions may sleep. The iodone side functions 10784 * are called under interrupt context and may NOT sleep. Therefore 10785 * iodone side functions also may not call iostart side functions. 10786 * (NOTE: iostart side functions should NOT sleep for memory, as 10787 * this could result in deadlock.) 10788 * 10789 * - An iostart side function may call its corresponding iodone side 10790 * function directly (if necessary). 10791 * 10792 * - In the event of an error, an iostart side function can return a buf(9S) 10793 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10794 * b_error in the usual way of course). 10795 * 10796 * - The taskq mechanism may be used by the iodone side functions to dispatch 10797 * requests to the iostart side functions. The iostart side functions in 10798 * this case would be called under the context of a taskq thread, so it's 10799 * OK for them to block/sleep/spin in this case. 10800 * 10801 * - iostart side functions may allocate "shadow" buf(9S) structs and 10802 * pass them along to the next function in the chain. The corresponding 10803 * iodone side functions must coalesce the "shadow" bufs and return 10804 * the "original" buf to the next higher layer. 10805 * 10806 * - The b_private field of the buf(9S) struct holds a pointer to 10807 * an sd_xbuf struct, which contains information needed to 10808 * construct the scsi_pkt for the command. 10809 * 10810 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10811 * layer must acquire & release the SD_MUTEX(un) as needed. 10812 */ 10813 10814 10815 /* 10816 * Create taskq for all targets in the system. This is created at 10817 * _init(9E) and destroyed at _fini(9E). 10818 * 10819 * Note: here we set the minalloc to a reasonably high number to ensure that 10820 * we will have an adequate supply of task entries available at interrupt time. 10821 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10822 * sd_create_taskq(). Since we do not want to sleep for allocations at 10823 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10824 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10825 * requests any one instant in time. 10826 */ 10827 #define SD_TASKQ_NUMTHREADS 8 10828 #define SD_TASKQ_MINALLOC 256 10829 #define SD_TASKQ_MAXALLOC 256 10830 10831 static taskq_t *sd_tq = NULL; 10832 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10833 10834 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10835 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10836 10837 /* 10838 * The following task queue is being created for the write part of 10839 * read-modify-write of non-512 block size devices. 10840 * Limit the number of threads to 1 for now. This number has been chosen 10841 * considering the fact that it applies only to dvd ram drives/MO drives 10842 * currently. Performance for which is not main criteria at this stage. 10843 * Note: It needs to be explored if we can use a single taskq in future 10844 */ 10845 #define SD_WMR_TASKQ_NUMTHREADS 1 10846 static taskq_t *sd_wmr_tq = NULL; 10847 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10848 10849 /* 10850 * Function: sd_taskq_create 10851 * 10852 * Description: Create taskq thread(s) and preallocate task entries 10853 * 10854 * Return Code: Returns a pointer to the allocated taskq_t. 10855 * 10856 * Context: Can sleep. Requires blockable context. 10857 * 10858 * Notes: - The taskq() facility currently is NOT part of the DDI. 10859 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10860 * - taskq_create() will block for memory, also it will panic 10861 * if it cannot create the requested number of threads. 10862 * - Currently taskq_create() creates threads that cannot be 10863 * swapped. 10864 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10865 * supply of taskq entries at interrupt time (ie, so that we 10866 * do not have to sleep for memory) 10867 */ 10868 10869 static void 10870 sd_taskq_create(void) 10871 { 10872 char taskq_name[TASKQ_NAMELEN]; 10873 10874 ASSERT(sd_tq == NULL); 10875 ASSERT(sd_wmr_tq == NULL); 10876 10877 (void) snprintf(taskq_name, sizeof (taskq_name), 10878 "%s_drv_taskq", sd_label); 10879 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10880 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10881 TASKQ_PREPOPULATE)); 10882 10883 (void) snprintf(taskq_name, sizeof (taskq_name), 10884 "%s_rmw_taskq", sd_label); 10885 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10886 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10887 TASKQ_PREPOPULATE)); 10888 } 10889 10890 10891 /* 10892 * Function: sd_taskq_delete 10893 * 10894 * Description: Complementary cleanup routine for sd_taskq_create(). 10895 * 10896 * Context: Kernel thread context. 10897 */ 10898 10899 static void 10900 sd_taskq_delete(void) 10901 { 10902 ASSERT(sd_tq != NULL); 10903 ASSERT(sd_wmr_tq != NULL); 10904 taskq_destroy(sd_tq); 10905 taskq_destroy(sd_wmr_tq); 10906 sd_tq = NULL; 10907 sd_wmr_tq = NULL; 10908 } 10909 10910 10911 /* 10912 * Function: sdstrategy 10913 * 10914 * Description: Driver's strategy (9E) entry point function. 10915 * 10916 * Arguments: bp - pointer to buf(9S) 10917 * 10918 * Return Code: Always returns zero 10919 * 10920 * Context: Kernel thread context. 10921 */ 10922 10923 static int 10924 sdstrategy(struct buf *bp) 10925 { 10926 struct sd_lun *un; 10927 10928 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10929 if (un == NULL) { 10930 bioerror(bp, EIO); 10931 bp->b_resid = bp->b_bcount; 10932 biodone(bp); 10933 return (0); 10934 } 10935 /* As was done in the past, fail new cmds. if state is dumping. */ 10936 if (un->un_state == SD_STATE_DUMPING) { 10937 bioerror(bp, ENXIO); 10938 bp->b_resid = bp->b_bcount; 10939 biodone(bp); 10940 return (0); 10941 } 10942 10943 ASSERT(!mutex_owned(SD_MUTEX(un))); 10944 10945 /* 10946 * Commands may sneak in while we released the mutex in 10947 * DDI_SUSPEND, we should block new commands. However, old 10948 * commands that are still in the driver at this point should 10949 * still be allowed to drain. 10950 */ 10951 mutex_enter(SD_MUTEX(un)); 10952 /* 10953 * Must wait here if either the device is suspended or 10954 * if it's power level is changing. 10955 */ 10956 while ((un->un_state == SD_STATE_SUSPENDED) || 10957 (un->un_state == SD_STATE_PM_CHANGING)) { 10958 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10959 } 10960 10961 un->un_ncmds_in_driver++; 10962 10963 /* 10964 * atapi: Since we are running the CD for now in PIO mode we need to 10965 * call bp_mapin here to avoid bp_mapin called interrupt context under 10966 * the HBA's init_pkt routine. 10967 */ 10968 if (un->un_f_cfg_is_atapi == TRUE) { 10969 mutex_exit(SD_MUTEX(un)); 10970 bp_mapin(bp); 10971 mutex_enter(SD_MUTEX(un)); 10972 } 10973 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10974 un->un_ncmds_in_driver); 10975 10976 if (bp->b_flags & B_WRITE) 10977 un->un_f_sync_cache_required = TRUE; 10978 10979 mutex_exit(SD_MUTEX(un)); 10980 10981 /* 10982 * This will (eventually) allocate the sd_xbuf area and 10983 * call sd_xbuf_strategy(). We just want to return the 10984 * result of ddi_xbuf_qstrategy so that we have an opt- 10985 * imized tail call which saves us a stack frame. 10986 */ 10987 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10988 } 10989 10990 10991 /* 10992 * Function: sd_xbuf_strategy 10993 * 10994 * Description: Function for initiating IO operations via the 10995 * ddi_xbuf_qstrategy() mechanism. 10996 * 10997 * Context: Kernel thread context. 10998 */ 10999 11000 static void 11001 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11002 { 11003 struct sd_lun *un = arg; 11004 11005 ASSERT(bp != NULL); 11006 ASSERT(xp != NULL); 11007 ASSERT(un != NULL); 11008 ASSERT(!mutex_owned(SD_MUTEX(un))); 11009 11010 /* 11011 * Initialize the fields in the xbuf and save a pointer to the 11012 * xbuf in bp->b_private. 11013 */ 11014 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11015 11016 /* Send the buf down the iostart chain */ 11017 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11018 } 11019 11020 11021 /* 11022 * Function: sd_xbuf_init 11023 * 11024 * Description: Prepare the given sd_xbuf struct for use. 11025 * 11026 * Arguments: un - ptr to softstate 11027 * bp - ptr to associated buf(9S) 11028 * xp - ptr to associated sd_xbuf 11029 * chain_type - IO chain type to use: 11030 * SD_CHAIN_NULL 11031 * SD_CHAIN_BUFIO 11032 * SD_CHAIN_USCSI 11033 * SD_CHAIN_DIRECT 11034 * SD_CHAIN_DIRECT_PRIORITY 11035 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11036 * initialization; may be NULL if none. 11037 * 11038 * Context: Kernel thread context 11039 */ 11040 11041 static void 11042 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11043 uchar_t chain_type, void *pktinfop) 11044 { 11045 int index; 11046 11047 ASSERT(un != NULL); 11048 ASSERT(bp != NULL); 11049 ASSERT(xp != NULL); 11050 11051 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11052 bp, chain_type); 11053 11054 xp->xb_un = un; 11055 xp->xb_pktp = NULL; 11056 xp->xb_pktinfo = pktinfop; 11057 xp->xb_private = bp->b_private; 11058 xp->xb_blkno = (daddr_t)bp->b_blkno; 11059 11060 /* 11061 * Set up the iostart and iodone chain indexes in the xbuf, based 11062 * upon the specified chain type to use. 11063 */ 11064 switch (chain_type) { 11065 case SD_CHAIN_NULL: 11066 /* 11067 * Fall thru to just use the values for the buf type, even 11068 * tho for the NULL chain these values will never be used. 11069 */ 11070 /* FALLTHRU */ 11071 case SD_CHAIN_BUFIO: 11072 index = un->un_buf_chain_type; 11073 break; 11074 case SD_CHAIN_USCSI: 11075 index = un->un_uscsi_chain_type; 11076 break; 11077 case SD_CHAIN_DIRECT: 11078 index = un->un_direct_chain_type; 11079 break; 11080 case SD_CHAIN_DIRECT_PRIORITY: 11081 index = un->un_priority_chain_type; 11082 break; 11083 default: 11084 /* We're really broken if we ever get here... */ 11085 panic("sd_xbuf_init: illegal chain type!"); 11086 /*NOTREACHED*/ 11087 } 11088 11089 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11090 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11091 11092 /* 11093 * It might be a bit easier to simply bzero the entire xbuf above, 11094 * but it turns out that since we init a fair number of members anyway, 11095 * we save a fair number cycles by doing explicit assignment of zero. 11096 */ 11097 xp->xb_pkt_flags = 0; 11098 xp->xb_dma_resid = 0; 11099 xp->xb_retry_count = 0; 11100 xp->xb_victim_retry_count = 0; 11101 xp->xb_ua_retry_count = 0; 11102 xp->xb_nr_retry_count = 0; 11103 xp->xb_sense_bp = NULL; 11104 xp->xb_sense_status = 0; 11105 xp->xb_sense_state = 0; 11106 xp->xb_sense_resid = 0; 11107 xp->xb_ena = 0; 11108 11109 bp->b_private = xp; 11110 bp->b_flags &= ~(B_DONE | B_ERROR); 11111 bp->b_resid = 0; 11112 bp->av_forw = NULL; 11113 bp->av_back = NULL; 11114 bioerror(bp, 0); 11115 11116 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11117 } 11118 11119 11120 /* 11121 * Function: sd_uscsi_strategy 11122 * 11123 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11124 * 11125 * Arguments: bp - buf struct ptr 11126 * 11127 * Return Code: Always returns 0 11128 * 11129 * Context: Kernel thread context 11130 */ 11131 11132 static int 11133 sd_uscsi_strategy(struct buf *bp) 11134 { 11135 struct sd_lun *un; 11136 struct sd_uscsi_info *uip; 11137 struct sd_xbuf *xp; 11138 uchar_t chain_type; 11139 uchar_t cmd; 11140 11141 ASSERT(bp != NULL); 11142 11143 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11144 if (un == NULL) { 11145 bioerror(bp, EIO); 11146 bp->b_resid = bp->b_bcount; 11147 biodone(bp); 11148 return (0); 11149 } 11150 11151 ASSERT(!mutex_owned(SD_MUTEX(un))); 11152 11153 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11154 11155 /* 11156 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11157 */ 11158 ASSERT(bp->b_private != NULL); 11159 uip = (struct sd_uscsi_info *)bp->b_private; 11160 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11161 11162 mutex_enter(SD_MUTEX(un)); 11163 /* 11164 * atapi: Since we are running the CD for now in PIO mode we need to 11165 * call bp_mapin here to avoid bp_mapin called interrupt context under 11166 * the HBA's init_pkt routine. 11167 */ 11168 if (un->un_f_cfg_is_atapi == TRUE) { 11169 mutex_exit(SD_MUTEX(un)); 11170 bp_mapin(bp); 11171 mutex_enter(SD_MUTEX(un)); 11172 } 11173 un->un_ncmds_in_driver++; 11174 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11175 un->un_ncmds_in_driver); 11176 11177 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11178 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11179 un->un_f_sync_cache_required = TRUE; 11180 11181 mutex_exit(SD_MUTEX(un)); 11182 11183 switch (uip->ui_flags) { 11184 case SD_PATH_DIRECT: 11185 chain_type = SD_CHAIN_DIRECT; 11186 break; 11187 case SD_PATH_DIRECT_PRIORITY: 11188 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11189 break; 11190 default: 11191 chain_type = SD_CHAIN_USCSI; 11192 break; 11193 } 11194 11195 /* 11196 * We may allocate extra buf for external USCSI commands. If the 11197 * application asks for bigger than 20-byte sense data via USCSI, 11198 * SCSA layer will allocate 252 bytes sense buf for that command. 11199 */ 11200 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11201 SENSE_LENGTH) { 11202 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11203 MAX_SENSE_LENGTH, KM_SLEEP); 11204 } else { 11205 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11206 } 11207 11208 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11209 11210 /* Use the index obtained within xbuf_init */ 11211 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11212 11213 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11214 11215 return (0); 11216 } 11217 11218 /* 11219 * Function: sd_send_scsi_cmd 11220 * 11221 * Description: Runs a USCSI command for user (when called thru sdioctl), 11222 * or for the driver 11223 * 11224 * Arguments: dev - the dev_t for the device 11225 * incmd - ptr to a valid uscsi_cmd struct 11226 * flag - bit flag, indicating open settings, 32/64 bit type 11227 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11228 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11229 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11230 * to use the USCSI "direct" chain and bypass the normal 11231 * command waitq. 11232 * 11233 * Return Code: 0 - successful completion of the given command 11234 * EIO - scsi_uscsi_handle_command() failed 11235 * ENXIO - soft state not found for specified dev 11236 * EINVAL 11237 * EFAULT - copyin/copyout error 11238 * return code of scsi_uscsi_handle_command(): 11239 * EIO 11240 * ENXIO 11241 * EACCES 11242 * 11243 * Context: Waits for command to complete. Can sleep. 11244 */ 11245 11246 static int 11247 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11248 enum uio_seg dataspace, int path_flag) 11249 { 11250 struct sd_lun *un; 11251 sd_ssc_t *ssc; 11252 int rval; 11253 11254 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11255 if (un == NULL) { 11256 return (ENXIO); 11257 } 11258 11259 /* 11260 * Using sd_ssc_send to handle uscsi cmd 11261 */ 11262 ssc = sd_ssc_init(un); 11263 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11264 sd_ssc_fini(ssc); 11265 11266 return (rval); 11267 } 11268 11269 /* 11270 * Function: sd_ssc_init 11271 * 11272 * Description: Uscsi end-user call this function to initialize necessary 11273 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11274 * 11275 * The return value of sd_send_scsi_cmd will be treated as a 11276 * fault in various conditions. Even it is not Zero, some 11277 * callers may ignore the return value. That is to say, we can 11278 * not make an accurate assessment in sdintr, since if a 11279 * command is failed in sdintr it does not mean the caller of 11280 * sd_send_scsi_cmd will treat it as a real failure. 11281 * 11282 * To avoid printing too many error logs for a failed uscsi 11283 * packet that the caller may not treat it as a failure, the 11284 * sd will keep silent for handling all uscsi commands. 11285 * 11286 * During detach->attach and attach-open, for some types of 11287 * problems, the driver should be providing information about 11288 * the problem encountered. Device use USCSI_SILENT, which 11289 * suppresses all driver information. The result is that no 11290 * information about the problem is available. Being 11291 * completely silent during this time is inappropriate. The 11292 * driver needs a more selective filter than USCSI_SILENT, so 11293 * that information related to faults is provided. 11294 * 11295 * To make the accurate accessment, the caller of 11296 * sd_send_scsi_USCSI_CMD should take the ownership and 11297 * get necessary information to print error messages. 11298 * 11299 * If we want to print necessary info of uscsi command, we need to 11300 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11301 * assessment. We use sd_ssc_init to alloc necessary 11302 * structs for sending an uscsi command and we are also 11303 * responsible for free the memory by calling 11304 * sd_ssc_fini. 11305 * 11306 * The calling secquences will look like: 11307 * sd_ssc_init-> 11308 * 11309 * ... 11310 * 11311 * sd_send_scsi_USCSI_CMD-> 11312 * sd_ssc_send-> - - - sdintr 11313 * ... 11314 * 11315 * if we think the return value should be treated as a 11316 * failure, we make the accessment here and print out 11317 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11318 * 11319 * ... 11320 * 11321 * sd_ssc_fini 11322 * 11323 * 11324 * Arguments: un - pointer to driver soft state (unit) structure for this 11325 * target. 11326 * 11327 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11328 * uscsi_cmd and sd_uscsi_info. 11329 * NULL - if can not alloc memory for sd_ssc_t struct 11330 * 11331 * Context: Kernel Thread. 11332 */ 11333 static sd_ssc_t * 11334 sd_ssc_init(struct sd_lun *un) 11335 { 11336 sd_ssc_t *ssc; 11337 struct uscsi_cmd *ucmdp; 11338 struct sd_uscsi_info *uip; 11339 11340 ASSERT(un != NULL); 11341 ASSERT(!mutex_owned(SD_MUTEX(un))); 11342 11343 /* 11344 * Allocate sd_ssc_t structure 11345 */ 11346 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11347 11348 /* 11349 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11350 */ 11351 ucmdp = scsi_uscsi_alloc(); 11352 11353 /* 11354 * Allocate sd_uscsi_info structure 11355 */ 11356 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11357 11358 ssc->ssc_uscsi_cmd = ucmdp; 11359 ssc->ssc_uscsi_info = uip; 11360 ssc->ssc_un = un; 11361 11362 return (ssc); 11363 } 11364 11365 /* 11366 * Function: sd_ssc_fini 11367 * 11368 * Description: To free sd_ssc_t and it's hanging off 11369 * 11370 * Arguments: ssc - struct pointer of sd_ssc_t. 11371 */ 11372 static void 11373 sd_ssc_fini(sd_ssc_t *ssc) 11374 { 11375 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11376 11377 if (ssc->ssc_uscsi_info != NULL) { 11378 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11379 ssc->ssc_uscsi_info = NULL; 11380 } 11381 11382 kmem_free(ssc, sizeof (sd_ssc_t)); 11383 ssc = NULL; 11384 } 11385 11386 /* 11387 * Function: sd_ssc_send 11388 * 11389 * Description: Runs a USCSI command for user when called through sdioctl, 11390 * or for the driver. 11391 * 11392 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11393 * sd_uscsi_info in. 11394 * incmd - ptr to a valid uscsi_cmd struct 11395 * flag - bit flag, indicating open settings, 32/64 bit type 11396 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11397 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11398 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11399 * to use the USCSI "direct" chain and bypass the normal 11400 * command waitq. 11401 * 11402 * Return Code: 0 - successful completion of the given command 11403 * EIO - scsi_uscsi_handle_command() failed 11404 * ENXIO - soft state not found for specified dev 11405 * EINVAL 11406 * EFAULT - copyin/copyout error 11407 * return code of scsi_uscsi_handle_command(): 11408 * EIO 11409 * ENXIO 11410 * EACCES 11411 * 11412 * Context: Kernel Thread; 11413 * Waits for command to complete. Can sleep. 11414 */ 11415 static int 11416 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11417 enum uio_seg dataspace, int path_flag) 11418 { 11419 struct sd_uscsi_info *uip; 11420 struct uscsi_cmd *uscmd = ssc->ssc_uscsi_cmd; 11421 struct sd_lun *un; 11422 dev_t dev; 11423 11424 int format = 0; 11425 int rval; 11426 11427 11428 ASSERT(ssc != NULL); 11429 un = ssc->ssc_un; 11430 ASSERT(un != NULL); 11431 ASSERT(!mutex_owned(SD_MUTEX(un))); 11432 ASSERT(!(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT)); 11433 /* 11434 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11435 * followed to avoid missing any point of telemetry. 11436 */ 11437 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11438 11439 if (uscmd == NULL) { 11440 return (ENXIO); 11441 } 11442 11443 11444 #ifdef SDDEBUG 11445 switch (dataspace) { 11446 case UIO_USERSPACE: 11447 SD_TRACE(SD_LOG_IO, un, 11448 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11449 break; 11450 case UIO_SYSSPACE: 11451 SD_TRACE(SD_LOG_IO, un, 11452 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11453 break; 11454 default: 11455 SD_TRACE(SD_LOG_IO, un, 11456 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11457 break; 11458 } 11459 #endif 11460 11461 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11462 SD_ADDRESS(un), &uscmd); 11463 if (rval != 0) { 11464 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11465 "scsi_uscsi_alloc_and_copyin failed\n", un); 11466 return (rval); 11467 } 11468 11469 if ((uscmd->uscsi_cdb != NULL) && 11470 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11471 mutex_enter(SD_MUTEX(un)); 11472 un->un_f_format_in_progress = TRUE; 11473 mutex_exit(SD_MUTEX(un)); 11474 format = 1; 11475 } 11476 11477 /* 11478 * Allocate an sd_uscsi_info struct and fill it with the info 11479 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11480 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11481 * since we allocate the buf here in this function, we do not 11482 * need to preserve the prior contents of b_private. 11483 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11484 */ 11485 uip = ssc->ssc_uscsi_info; 11486 uip->ui_flags = path_flag; 11487 uip->ui_cmdp = uscmd; 11488 11489 /* 11490 * Commands sent with priority are intended for error recovery 11491 * situations, and do not have retries performed. 11492 */ 11493 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11494 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11495 } 11496 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11497 11498 dev = SD_GET_DEV(un); 11499 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11500 sd_uscsi_strategy, NULL, uip); 11501 11502 /* 11503 * mark ssc_flags right after handle_cmd to make sure 11504 * the uscsi has been sent 11505 */ 11506 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11507 11508 #ifdef SDDEBUG 11509 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11510 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11511 uscmd->uscsi_status, uscmd->uscsi_resid); 11512 if (uscmd->uscsi_bufaddr != NULL) { 11513 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11514 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11515 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11516 if (dataspace == UIO_SYSSPACE) { 11517 SD_DUMP_MEMORY(un, SD_LOG_IO, 11518 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11519 uscmd->uscsi_buflen, SD_LOG_HEX); 11520 } 11521 } 11522 #endif 11523 11524 if (format == 1) { 11525 mutex_enter(SD_MUTEX(un)); 11526 un->un_f_format_in_progress = FALSE; 11527 mutex_exit(SD_MUTEX(un)); 11528 } 11529 11530 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11531 11532 return (rval); 11533 } 11534 11535 /* 11536 * Function: sd_ssc_print 11537 * 11538 * Description: Print information available to the console. 11539 * 11540 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11541 * sd_uscsi_info in. 11542 * sd_severity - log level. 11543 * Context: Kernel thread or interrupt context. 11544 */ 11545 static void 11546 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11547 { 11548 struct uscsi_cmd *ucmdp; 11549 struct scsi_device *devp; 11550 dev_info_t *devinfo; 11551 uchar_t *sensep; 11552 int senlen; 11553 union scsi_cdb *cdbp; 11554 uchar_t com; 11555 extern struct scsi_key_strings scsi_cmds[]; 11556 11557 ASSERT(ssc != NULL); 11558 11559 ucmdp = ssc->ssc_uscsi_cmd; 11560 devp = SD_SCSI_DEVP(ssc->ssc_un); 11561 devinfo = SD_DEVINFO(ssc->ssc_un); 11562 ASSERT(ucmdp != NULL); 11563 ASSERT(devp != NULL); 11564 ASSERT(devinfo != NULL); 11565 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11566 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11567 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11568 11569 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11570 if (cdbp == NULL) 11571 return; 11572 /* We don't print log if no sense data available. */ 11573 if (senlen == 0) 11574 sensep = NULL; 11575 com = cdbp->scc_cmd; 11576 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11577 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11578 } 11579 11580 /* 11581 * Function: sd_ssc_assessment 11582 * 11583 * Description: We use this function to make an assessment at the point 11584 * where SD driver may encounter a potential error. 11585 * 11586 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11587 * sd_uscsi_info in. 11588 * tp_assess - a hint of strategy for ereport posting. 11589 * Possible values of tp_assess include: 11590 * SD_FMT_IGNORE - we don't post any ereport because we're 11591 * sure that it is ok to ignore the underlying problems. 11592 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11593 * but it might be not correct to ignore the underlying hardware 11594 * error. 11595 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11596 * payload driver-assessment of value "fail" or 11597 * "fatal"(depending on what information we have here). This 11598 * assessment value is usually set when SD driver think there 11599 * is a potential error occurred(Typically, when return value 11600 * of the SCSI command is EIO). 11601 * SD_FMT_STANDARD - we will post an ereport with the payload 11602 * driver-assessment of value "info". This assessment value is 11603 * set when the SCSI command returned successfully and with 11604 * sense data sent back. 11605 * 11606 * Context: Kernel thread. 11607 */ 11608 static void 11609 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11610 { 11611 int senlen = 0; 11612 struct uscsi_cmd *ucmdp = NULL; 11613 struct sd_lun *un; 11614 11615 ASSERT(ssc != NULL); 11616 ASSERT(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT); 11617 11618 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11619 un = ssc->ssc_un; 11620 ASSERT(un != NULL); 11621 11622 /* 11623 * We don't handle CD-ROM, and removable media 11624 */ 11625 if (ISCD(un) || un->un_f_has_removable_media) { 11626 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11627 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11628 return; 11629 } 11630 11631 /* 11632 * Only handle an issued command which is waiting for assessment. 11633 */ 11634 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11635 sd_ssc_print(ssc, SCSI_ERR_INFO); 11636 return; 11637 } else 11638 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11639 11640 ucmdp = ssc->ssc_uscsi_cmd; 11641 ASSERT(ucmdp != NULL); 11642 11643 /* 11644 * We will not deal with non-retryable commands here. 11645 */ 11646 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11647 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11648 return; 11649 } 11650 11651 switch (tp_assess) { 11652 case SD_FMT_IGNORE: 11653 case SD_FMT_IGNORE_COMPROMISE: 11654 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11655 break; 11656 case SD_FMT_STATUS_CHECK: 11657 /* 11658 * For a failed command(including the succeeded command 11659 * with invalid data sent back). 11660 */ 11661 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11662 break; 11663 case SD_FMT_STANDARD: 11664 /* 11665 * Always for the succeeded commands probably with sense 11666 * data sent back. 11667 * Limitation: 11668 * We can only handle a succeeded command with sense 11669 * data sent back when auto-request-sense is enabled. 11670 */ 11671 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11672 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11673 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11674 (un->un_f_arq_enabled == TRUE) && 11675 senlen > 0 && 11676 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11677 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11678 } 11679 break; 11680 default: 11681 /* 11682 * Should be an software error. 11683 */ 11684 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11685 "sd_ssc_assessment got wrong \ 11686 sd_type_assessment %d\n", tp_assess); 11687 break; 11688 } 11689 } 11690 11691 /* 11692 * Function: sd_ssc_post 11693 * 11694 * Description: 1. read the driver property to get fm-scsi-log flag. 11695 * 2. print log if fm_log_capable is non-zero. 11696 * 3. call sd_ssc_ereport_post to post ereport if possible. 11697 * 11698 * Context: May be called from kernel thread or interrupt context. 11699 */ 11700 static void 11701 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11702 { 11703 struct sd_lun *un; 11704 int fm_scsi_log = 0; 11705 int sd_severity; 11706 11707 ASSERT(ssc != NULL); 11708 un = ssc->ssc_un; 11709 ASSERT(un != NULL); 11710 11711 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 11712 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 11713 11714 if (fm_scsi_log != 0) { 11715 switch (sd_assess) { 11716 case SD_FM_DRV_FATAL: 11717 sd_severity = SCSI_ERR_FATAL; 11718 break; 11719 case SD_FM_DRV_RECOVERY: 11720 sd_severity = SCSI_ERR_RECOVERED; 11721 break; 11722 case SD_FM_DRV_RETRY: 11723 sd_severity = SCSI_ERR_RETRYABLE; 11724 break; 11725 case SD_FM_DRV_NOTICE: 11726 sd_severity = SCSI_ERR_INFO; 11727 break; 11728 default: 11729 sd_severity = SCSI_ERR_UNKNOWN; 11730 } 11731 /* print log */ 11732 sd_ssc_print(ssc, sd_severity); 11733 } 11734 11735 /* always post ereport */ 11736 sd_ssc_ereport_post(ssc, sd_assess); 11737 } 11738 11739 /* 11740 * Function: sd_ssc_set_info 11741 * 11742 * Description: Mark ssc_flags and set ssc_info which would be the 11743 * payload of uderr ereport. This function will cause 11744 * sd_ssc_ereport_post to post uderr ereport only. 11745 * 11746 * Context: Kernel thread or interrupt context 11747 */ 11748 static void 11749 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, const char *fmt, ...) 11750 { 11751 va_list ap; 11752 11753 ASSERT(ssc != NULL); 11754 11755 ssc->ssc_flags |= ssc_flags; 11756 va_start(ap, fmt); 11757 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11758 va_end(ap); 11759 } 11760 11761 /* 11762 * Function: sd_buf_iodone 11763 * 11764 * Description: Frees the sd_xbuf & returns the buf to its originator. 11765 * 11766 * Context: May be called from interrupt context. 11767 */ 11768 /* ARGSUSED */ 11769 static void 11770 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11771 { 11772 struct sd_xbuf *xp; 11773 11774 ASSERT(un != NULL); 11775 ASSERT(bp != NULL); 11776 ASSERT(!mutex_owned(SD_MUTEX(un))); 11777 11778 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11779 11780 xp = SD_GET_XBUF(bp); 11781 ASSERT(xp != NULL); 11782 11783 mutex_enter(SD_MUTEX(un)); 11784 11785 /* 11786 * Grab time when the cmd completed. 11787 * This is used for determining if the system has been 11788 * idle long enough to make it idle to the PM framework. 11789 * This is for lowering the overhead, and therefore improving 11790 * performance per I/O operation. 11791 */ 11792 un->un_pm_idle_time = ddi_get_time(); 11793 11794 un->un_ncmds_in_driver--; 11795 ASSERT(un->un_ncmds_in_driver >= 0); 11796 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11797 un->un_ncmds_in_driver); 11798 11799 mutex_exit(SD_MUTEX(un)); 11800 11801 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11802 biodone(bp); /* bp is gone after this */ 11803 11804 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11805 } 11806 11807 11808 /* 11809 * Function: sd_uscsi_iodone 11810 * 11811 * Description: Frees the sd_xbuf & returns the buf to its originator. 11812 * 11813 * Context: May be called from interrupt context. 11814 */ 11815 /* ARGSUSED */ 11816 static void 11817 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11818 { 11819 struct sd_xbuf *xp; 11820 11821 ASSERT(un != NULL); 11822 ASSERT(bp != NULL); 11823 11824 xp = SD_GET_XBUF(bp); 11825 ASSERT(xp != NULL); 11826 ASSERT(!mutex_owned(SD_MUTEX(un))); 11827 11828 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11829 11830 bp->b_private = xp->xb_private; 11831 11832 mutex_enter(SD_MUTEX(un)); 11833 11834 /* 11835 * Grab time when the cmd completed. 11836 * This is used for determining if the system has been 11837 * idle long enough to make it idle to the PM framework. 11838 * This is for lowering the overhead, and therefore improving 11839 * performance per I/O operation. 11840 */ 11841 un->un_pm_idle_time = ddi_get_time(); 11842 11843 un->un_ncmds_in_driver--; 11844 ASSERT(un->un_ncmds_in_driver >= 0); 11845 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11846 un->un_ncmds_in_driver); 11847 11848 mutex_exit(SD_MUTEX(un)); 11849 11850 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11851 SENSE_LENGTH) { 11852 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11853 MAX_SENSE_LENGTH); 11854 } else { 11855 kmem_free(xp, sizeof (struct sd_xbuf)); 11856 } 11857 11858 biodone(bp); 11859 11860 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11861 } 11862 11863 11864 /* 11865 * Function: sd_mapblockaddr_iostart 11866 * 11867 * Description: Verify request lies within the partition limits for 11868 * the indicated minor device. Issue "overrun" buf if 11869 * request would exceed partition range. Converts 11870 * partition-relative block address to absolute. 11871 * 11872 * Context: Can sleep 11873 * 11874 * Issues: This follows what the old code did, in terms of accessing 11875 * some of the partition info in the unit struct without holding 11876 * the mutext. This is a general issue, if the partition info 11877 * can be altered while IO is in progress... as soon as we send 11878 * a buf, its partitioning can be invalid before it gets to the 11879 * device. Probably the right fix is to move partitioning out 11880 * of the driver entirely. 11881 */ 11882 11883 static void 11884 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11885 { 11886 diskaddr_t nblocks; /* #blocks in the given partition */ 11887 daddr_t blocknum; /* Block number specified by the buf */ 11888 size_t requested_nblocks; 11889 size_t available_nblocks; 11890 int partition; 11891 diskaddr_t partition_offset; 11892 struct sd_xbuf *xp; 11893 11894 ASSERT(un != NULL); 11895 ASSERT(bp != NULL); 11896 ASSERT(!mutex_owned(SD_MUTEX(un))); 11897 11898 SD_TRACE(SD_LOG_IO_PARTITION, un, 11899 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11900 11901 xp = SD_GET_XBUF(bp); 11902 ASSERT(xp != NULL); 11903 11904 /* 11905 * If the geometry is not indicated as valid, attempt to access 11906 * the unit & verify the geometry/label. This can be the case for 11907 * removable-media devices, of if the device was opened in 11908 * NDELAY/NONBLOCK mode. 11909 */ 11910 partition = SDPART(bp->b_edev); 11911 11912 if (!SD_IS_VALID_LABEL(un)) { 11913 sd_ssc_t *ssc; 11914 /* 11915 * Initialize sd_ssc_t for internal uscsi commands 11916 * In case of potential porformance issue, we need 11917 * to alloc memory only if there is invalid label 11918 */ 11919 ssc = sd_ssc_init(un); 11920 11921 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 11922 /* 11923 * For removable devices it is possible to start an 11924 * I/O without a media by opening the device in nodelay 11925 * mode. Also for writable CDs there can be many 11926 * scenarios where there is no geometry yet but volume 11927 * manager is trying to issue a read() just because 11928 * it can see TOC on the CD. So do not print a message 11929 * for removables. 11930 */ 11931 if (!un->un_f_has_removable_media) { 11932 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11933 "i/o to invalid geometry\n"); 11934 } 11935 bioerror(bp, EIO); 11936 bp->b_resid = bp->b_bcount; 11937 SD_BEGIN_IODONE(index, un, bp); 11938 11939 sd_ssc_fini(ssc); 11940 return; 11941 } 11942 sd_ssc_fini(ssc); 11943 } 11944 11945 nblocks = 0; 11946 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11947 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 11948 11949 /* 11950 * blocknum is the starting block number of the request. At this 11951 * point it is still relative to the start of the minor device. 11952 */ 11953 blocknum = xp->xb_blkno; 11954 11955 /* 11956 * Legacy: If the starting block number is one past the last block 11957 * in the partition, do not set B_ERROR in the buf. 11958 */ 11959 if (blocknum == nblocks) { 11960 goto error_exit; 11961 } 11962 11963 /* 11964 * Confirm that the first block of the request lies within the 11965 * partition limits. Also the requested number of bytes must be 11966 * a multiple of the system block size. 11967 */ 11968 if ((blocknum < 0) || (blocknum >= nblocks) || 11969 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11970 bp->b_flags |= B_ERROR; 11971 goto error_exit; 11972 } 11973 11974 /* 11975 * If the requsted # blocks exceeds the available # blocks, that 11976 * is an overrun of the partition. 11977 */ 11978 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11979 available_nblocks = (size_t)(nblocks - blocknum); 11980 ASSERT(nblocks >= blocknum); 11981 11982 if (requested_nblocks > available_nblocks) { 11983 /* 11984 * Allocate an "overrun" buf to allow the request to proceed 11985 * for the amount of space available in the partition. The 11986 * amount not transferred will be added into the b_resid 11987 * when the operation is complete. The overrun buf 11988 * replaces the original buf here, and the original buf 11989 * is saved inside the overrun buf, for later use. 11990 */ 11991 size_t resid = SD_SYSBLOCKS2BYTES(un, 11992 (offset_t)(requested_nblocks - available_nblocks)); 11993 size_t count = bp->b_bcount - resid; 11994 /* 11995 * Note: count is an unsigned entity thus it'll NEVER 11996 * be less than 0 so ASSERT the original values are 11997 * correct. 11998 */ 11999 ASSERT(bp->b_bcount >= resid); 12000 12001 bp = sd_bioclone_alloc(bp, count, blocknum, 12002 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12003 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12004 ASSERT(xp != NULL); 12005 } 12006 12007 /* At this point there should be no residual for this buf. */ 12008 ASSERT(bp->b_resid == 0); 12009 12010 /* Convert the block number to an absolute address. */ 12011 xp->xb_blkno += partition_offset; 12012 12013 SD_NEXT_IOSTART(index, un, bp); 12014 12015 SD_TRACE(SD_LOG_IO_PARTITION, un, 12016 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12017 12018 return; 12019 12020 error_exit: 12021 bp->b_resid = bp->b_bcount; 12022 SD_BEGIN_IODONE(index, un, bp); 12023 SD_TRACE(SD_LOG_IO_PARTITION, un, 12024 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12025 } 12026 12027 12028 /* 12029 * Function: sd_mapblockaddr_iodone 12030 * 12031 * Description: Completion-side processing for partition management. 12032 * 12033 * Context: May be called under interrupt context 12034 */ 12035 12036 static void 12037 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12038 { 12039 /* int partition; */ /* Not used, see below. */ 12040 ASSERT(un != NULL); 12041 ASSERT(bp != NULL); 12042 ASSERT(!mutex_owned(SD_MUTEX(un))); 12043 12044 SD_TRACE(SD_LOG_IO_PARTITION, un, 12045 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12046 12047 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12048 /* 12049 * We have an "overrun" buf to deal with... 12050 */ 12051 struct sd_xbuf *xp; 12052 struct buf *obp; /* ptr to the original buf */ 12053 12054 xp = SD_GET_XBUF(bp); 12055 ASSERT(xp != NULL); 12056 12057 /* Retrieve the pointer to the original buf */ 12058 obp = (struct buf *)xp->xb_private; 12059 ASSERT(obp != NULL); 12060 12061 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12062 bioerror(obp, bp->b_error); 12063 12064 sd_bioclone_free(bp); 12065 12066 /* 12067 * Get back the original buf. 12068 * Note that since the restoration of xb_blkno below 12069 * was removed, the sd_xbuf is not needed. 12070 */ 12071 bp = obp; 12072 /* 12073 * xp = SD_GET_XBUF(bp); 12074 * ASSERT(xp != NULL); 12075 */ 12076 } 12077 12078 /* 12079 * Convert sd->xb_blkno back to a minor-device relative value. 12080 * Note: this has been commented out, as it is not needed in the 12081 * current implementation of the driver (ie, since this function 12082 * is at the top of the layering chains, so the info will be 12083 * discarded) and it is in the "hot" IO path. 12084 * 12085 * partition = getminor(bp->b_edev) & SDPART_MASK; 12086 * xp->xb_blkno -= un->un_offset[partition]; 12087 */ 12088 12089 SD_NEXT_IODONE(index, un, bp); 12090 12091 SD_TRACE(SD_LOG_IO_PARTITION, un, 12092 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12093 } 12094 12095 12096 /* 12097 * Function: sd_mapblocksize_iostart 12098 * 12099 * Description: Convert between system block size (un->un_sys_blocksize) 12100 * and target block size (un->un_tgt_blocksize). 12101 * 12102 * Context: Can sleep to allocate resources. 12103 * 12104 * Assumptions: A higher layer has already performed any partition validation, 12105 * and converted the xp->xb_blkno to an absolute value relative 12106 * to the start of the device. 12107 * 12108 * It is also assumed that the higher layer has implemented 12109 * an "overrun" mechanism for the case where the request would 12110 * read/write beyond the end of a partition. In this case we 12111 * assume (and ASSERT) that bp->b_resid == 0. 12112 * 12113 * Note: The implementation for this routine assumes the target 12114 * block size remains constant between allocation and transport. 12115 */ 12116 12117 static void 12118 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12119 { 12120 struct sd_mapblocksize_info *bsp; 12121 struct sd_xbuf *xp; 12122 offset_t first_byte; 12123 daddr_t start_block, end_block; 12124 daddr_t request_bytes; 12125 ushort_t is_aligned = FALSE; 12126 12127 ASSERT(un != NULL); 12128 ASSERT(bp != NULL); 12129 ASSERT(!mutex_owned(SD_MUTEX(un))); 12130 ASSERT(bp->b_resid == 0); 12131 12132 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12133 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12134 12135 /* 12136 * For a non-writable CD, a write request is an error 12137 */ 12138 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12139 (un->un_f_mmc_writable_media == FALSE)) { 12140 bioerror(bp, EIO); 12141 bp->b_resid = bp->b_bcount; 12142 SD_BEGIN_IODONE(index, un, bp); 12143 return; 12144 } 12145 12146 /* 12147 * We do not need a shadow buf if the device is using 12148 * un->un_sys_blocksize as its block size or if bcount == 0. 12149 * In this case there is no layer-private data block allocated. 12150 */ 12151 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12152 (bp->b_bcount == 0)) { 12153 goto done; 12154 } 12155 12156 #if defined(__i386) || defined(__amd64) 12157 /* We do not support non-block-aligned transfers for ROD devices */ 12158 ASSERT(!ISROD(un)); 12159 #endif 12160 12161 xp = SD_GET_XBUF(bp); 12162 ASSERT(xp != NULL); 12163 12164 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12165 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12166 un->un_tgt_blocksize, un->un_sys_blocksize); 12167 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12168 "request start block:0x%x\n", xp->xb_blkno); 12169 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12170 "request len:0x%x\n", bp->b_bcount); 12171 12172 /* 12173 * Allocate the layer-private data area for the mapblocksize layer. 12174 * Layers are allowed to use the xp_private member of the sd_xbuf 12175 * struct to store the pointer to their layer-private data block, but 12176 * each layer also has the responsibility of restoring the prior 12177 * contents of xb_private before returning the buf/xbuf to the 12178 * higher layer that sent it. 12179 * 12180 * Here we save the prior contents of xp->xb_private into the 12181 * bsp->mbs_oprivate field of our layer-private data area. This value 12182 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12183 * the layer-private area and returning the buf/xbuf to the layer 12184 * that sent it. 12185 * 12186 * Note that here we use kmem_zalloc for the allocation as there are 12187 * parts of the mapblocksize code that expect certain fields to be 12188 * zero unless explicitly set to a required value. 12189 */ 12190 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12191 bsp->mbs_oprivate = xp->xb_private; 12192 xp->xb_private = bsp; 12193 12194 /* 12195 * This treats the data on the disk (target) as an array of bytes. 12196 * first_byte is the byte offset, from the beginning of the device, 12197 * to the location of the request. This is converted from a 12198 * un->un_sys_blocksize block address to a byte offset, and then back 12199 * to a block address based upon a un->un_tgt_blocksize block size. 12200 * 12201 * xp->xb_blkno should be absolute upon entry into this function, 12202 * but, but it is based upon partitions that use the "system" 12203 * block size. It must be adjusted to reflect the block size of 12204 * the target. 12205 * 12206 * Note that end_block is actually the block that follows the last 12207 * block of the request, but that's what is needed for the computation. 12208 */ 12209 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12210 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12211 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12212 un->un_tgt_blocksize; 12213 12214 /* request_bytes is rounded up to a multiple of the target block size */ 12215 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12216 12217 /* 12218 * See if the starting address of the request and the request 12219 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12220 * then we do not need to allocate a shadow buf to handle the request. 12221 */ 12222 if (((first_byte % un->un_tgt_blocksize) == 0) && 12223 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12224 is_aligned = TRUE; 12225 } 12226 12227 if ((bp->b_flags & B_READ) == 0) { 12228 /* 12229 * Lock the range for a write operation. An aligned request is 12230 * considered a simple write; otherwise the request must be a 12231 * read-modify-write. 12232 */ 12233 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12234 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12235 } 12236 12237 /* 12238 * Alloc a shadow buf if the request is not aligned. Also, this is 12239 * where the READ command is generated for a read-modify-write. (The 12240 * write phase is deferred until after the read completes.) 12241 */ 12242 if (is_aligned == FALSE) { 12243 12244 struct sd_mapblocksize_info *shadow_bsp; 12245 struct sd_xbuf *shadow_xp; 12246 struct buf *shadow_bp; 12247 12248 /* 12249 * Allocate the shadow buf and it associated xbuf. Note that 12250 * after this call the xb_blkno value in both the original 12251 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12252 * same: absolute relative to the start of the device, and 12253 * adjusted for the target block size. The b_blkno in the 12254 * shadow buf will also be set to this value. We should never 12255 * change b_blkno in the original bp however. 12256 * 12257 * Note also that the shadow buf will always need to be a 12258 * READ command, regardless of whether the incoming command 12259 * is a READ or a WRITE. 12260 */ 12261 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12262 xp->xb_blkno, 12263 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12264 12265 shadow_xp = SD_GET_XBUF(shadow_bp); 12266 12267 /* 12268 * Allocate the layer-private data for the shadow buf. 12269 * (No need to preserve xb_private in the shadow xbuf.) 12270 */ 12271 shadow_xp->xb_private = shadow_bsp = 12272 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12273 12274 /* 12275 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12276 * to figure out where the start of the user data is (based upon 12277 * the system block size) in the data returned by the READ 12278 * command (which will be based upon the target blocksize). Note 12279 * that this is only really used if the request is unaligned. 12280 */ 12281 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12282 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12283 ASSERT((bsp->mbs_copy_offset >= 0) && 12284 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12285 12286 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12287 12288 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12289 12290 /* Transfer the wmap (if any) to the shadow buf */ 12291 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12292 bsp->mbs_wmp = NULL; 12293 12294 /* 12295 * The shadow buf goes on from here in place of the 12296 * original buf. 12297 */ 12298 shadow_bsp->mbs_orig_bp = bp; 12299 bp = shadow_bp; 12300 } 12301 12302 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12303 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12304 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12305 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12306 request_bytes); 12307 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12308 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12309 12310 done: 12311 SD_NEXT_IOSTART(index, un, bp); 12312 12313 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12314 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12315 } 12316 12317 12318 /* 12319 * Function: sd_mapblocksize_iodone 12320 * 12321 * Description: Completion side processing for block-size mapping. 12322 * 12323 * Context: May be called under interrupt context 12324 */ 12325 12326 static void 12327 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12328 { 12329 struct sd_mapblocksize_info *bsp; 12330 struct sd_xbuf *xp; 12331 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12332 struct buf *orig_bp; /* ptr to the original buf */ 12333 offset_t shadow_end; 12334 offset_t request_end; 12335 offset_t shadow_start; 12336 ssize_t copy_offset; 12337 size_t copy_length; 12338 size_t shortfall; 12339 uint_t is_write; /* TRUE if this bp is a WRITE */ 12340 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12341 12342 ASSERT(un != NULL); 12343 ASSERT(bp != NULL); 12344 12345 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12346 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12347 12348 /* 12349 * There is no shadow buf or layer-private data if the target is 12350 * using un->un_sys_blocksize as its block size or if bcount == 0. 12351 */ 12352 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12353 (bp->b_bcount == 0)) { 12354 goto exit; 12355 } 12356 12357 xp = SD_GET_XBUF(bp); 12358 ASSERT(xp != NULL); 12359 12360 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12361 bsp = xp->xb_private; 12362 12363 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12364 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12365 12366 if (is_write) { 12367 /* 12368 * For a WRITE request we must free up the block range that 12369 * we have locked up. This holds regardless of whether this is 12370 * an aligned write request or a read-modify-write request. 12371 */ 12372 sd_range_unlock(un, bsp->mbs_wmp); 12373 bsp->mbs_wmp = NULL; 12374 } 12375 12376 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12377 /* 12378 * An aligned read or write command will have no shadow buf; 12379 * there is not much else to do with it. 12380 */ 12381 goto done; 12382 } 12383 12384 orig_bp = bsp->mbs_orig_bp; 12385 ASSERT(orig_bp != NULL); 12386 orig_xp = SD_GET_XBUF(orig_bp); 12387 ASSERT(orig_xp != NULL); 12388 ASSERT(!mutex_owned(SD_MUTEX(un))); 12389 12390 if (!is_write && has_wmap) { 12391 /* 12392 * A READ with a wmap means this is the READ phase of a 12393 * read-modify-write. If an error occurred on the READ then 12394 * we do not proceed with the WRITE phase or copy any data. 12395 * Just release the write maps and return with an error. 12396 */ 12397 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12398 orig_bp->b_resid = orig_bp->b_bcount; 12399 bioerror(orig_bp, bp->b_error); 12400 sd_range_unlock(un, bsp->mbs_wmp); 12401 goto freebuf_done; 12402 } 12403 } 12404 12405 /* 12406 * Here is where we set up to copy the data from the shadow buf 12407 * into the space associated with the original buf. 12408 * 12409 * To deal with the conversion between block sizes, these 12410 * computations treat the data as an array of bytes, with the 12411 * first byte (byte 0) corresponding to the first byte in the 12412 * first block on the disk. 12413 */ 12414 12415 /* 12416 * shadow_start and shadow_len indicate the location and size of 12417 * the data returned with the shadow IO request. 12418 */ 12419 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12420 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12421 12422 /* 12423 * copy_offset gives the offset (in bytes) from the start of the first 12424 * block of the READ request to the beginning of the data. We retrieve 12425 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12426 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12427 * data to be copied (in bytes). 12428 */ 12429 copy_offset = bsp->mbs_copy_offset; 12430 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12431 copy_length = orig_bp->b_bcount; 12432 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12433 12434 /* 12435 * Set up the resid and error fields of orig_bp as appropriate. 12436 */ 12437 if (shadow_end >= request_end) { 12438 /* We got all the requested data; set resid to zero */ 12439 orig_bp->b_resid = 0; 12440 } else { 12441 /* 12442 * We failed to get enough data to fully satisfy the original 12443 * request. Just copy back whatever data we got and set 12444 * up the residual and error code as required. 12445 * 12446 * 'shortfall' is the amount by which the data received with the 12447 * shadow buf has "fallen short" of the requested amount. 12448 */ 12449 shortfall = (size_t)(request_end - shadow_end); 12450 12451 if (shortfall > orig_bp->b_bcount) { 12452 /* 12453 * We did not get enough data to even partially 12454 * fulfill the original request. The residual is 12455 * equal to the amount requested. 12456 */ 12457 orig_bp->b_resid = orig_bp->b_bcount; 12458 } else { 12459 /* 12460 * We did not get all the data that we requested 12461 * from the device, but we will try to return what 12462 * portion we did get. 12463 */ 12464 orig_bp->b_resid = shortfall; 12465 } 12466 ASSERT(copy_length >= orig_bp->b_resid); 12467 copy_length -= orig_bp->b_resid; 12468 } 12469 12470 /* Propagate the error code from the shadow buf to the original buf */ 12471 bioerror(orig_bp, bp->b_error); 12472 12473 if (is_write) { 12474 goto freebuf_done; /* No data copying for a WRITE */ 12475 } 12476 12477 if (has_wmap) { 12478 /* 12479 * This is a READ command from the READ phase of a 12480 * read-modify-write request. We have to copy the data given 12481 * by the user OVER the data returned by the READ command, 12482 * then convert the command from a READ to a WRITE and send 12483 * it back to the target. 12484 */ 12485 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12486 copy_length); 12487 12488 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12489 12490 /* 12491 * Dispatch the WRITE command to the taskq thread, which 12492 * will in turn send the command to the target. When the 12493 * WRITE command completes, we (sd_mapblocksize_iodone()) 12494 * will get called again as part of the iodone chain 12495 * processing for it. Note that we will still be dealing 12496 * with the shadow buf at that point. 12497 */ 12498 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12499 KM_NOSLEEP) != 0) { 12500 /* 12501 * Dispatch was successful so we are done. Return 12502 * without going any higher up the iodone chain. Do 12503 * not free up any layer-private data until after the 12504 * WRITE completes. 12505 */ 12506 return; 12507 } 12508 12509 /* 12510 * Dispatch of the WRITE command failed; set up the error 12511 * condition and send this IO back up the iodone chain. 12512 */ 12513 bioerror(orig_bp, EIO); 12514 orig_bp->b_resid = orig_bp->b_bcount; 12515 12516 } else { 12517 /* 12518 * This is a regular READ request (ie, not a RMW). Copy the 12519 * data from the shadow buf into the original buf. The 12520 * copy_offset compensates for any "misalignment" between the 12521 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12522 * original buf (with its un->un_sys_blocksize blocks). 12523 */ 12524 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12525 copy_length); 12526 } 12527 12528 freebuf_done: 12529 12530 /* 12531 * At this point we still have both the shadow buf AND the original 12532 * buf to deal with, as well as the layer-private data area in each. 12533 * Local variables are as follows: 12534 * 12535 * bp -- points to shadow buf 12536 * xp -- points to xbuf of shadow buf 12537 * bsp -- points to layer-private data area of shadow buf 12538 * orig_bp -- points to original buf 12539 * 12540 * First free the shadow buf and its associated xbuf, then free the 12541 * layer-private data area from the shadow buf. There is no need to 12542 * restore xb_private in the shadow xbuf. 12543 */ 12544 sd_shadow_buf_free(bp); 12545 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12546 12547 /* 12548 * Now update the local variables to point to the original buf, xbuf, 12549 * and layer-private area. 12550 */ 12551 bp = orig_bp; 12552 xp = SD_GET_XBUF(bp); 12553 ASSERT(xp != NULL); 12554 ASSERT(xp == orig_xp); 12555 bsp = xp->xb_private; 12556 ASSERT(bsp != NULL); 12557 12558 done: 12559 /* 12560 * Restore xb_private to whatever it was set to by the next higher 12561 * layer in the chain, then free the layer-private data area. 12562 */ 12563 xp->xb_private = bsp->mbs_oprivate; 12564 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12565 12566 exit: 12567 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12568 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12569 12570 SD_NEXT_IODONE(index, un, bp); 12571 } 12572 12573 12574 /* 12575 * Function: sd_checksum_iostart 12576 * 12577 * Description: A stub function for a layer that's currently not used. 12578 * For now just a placeholder. 12579 * 12580 * Context: Kernel thread context 12581 */ 12582 12583 static void 12584 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12585 { 12586 ASSERT(un != NULL); 12587 ASSERT(bp != NULL); 12588 ASSERT(!mutex_owned(SD_MUTEX(un))); 12589 SD_NEXT_IOSTART(index, un, bp); 12590 } 12591 12592 12593 /* 12594 * Function: sd_checksum_iodone 12595 * 12596 * Description: A stub function for a layer that's currently not used. 12597 * For now just a placeholder. 12598 * 12599 * Context: May be called under interrupt context 12600 */ 12601 12602 static void 12603 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12604 { 12605 ASSERT(un != NULL); 12606 ASSERT(bp != NULL); 12607 ASSERT(!mutex_owned(SD_MUTEX(un))); 12608 SD_NEXT_IODONE(index, un, bp); 12609 } 12610 12611 12612 /* 12613 * Function: sd_checksum_uscsi_iostart 12614 * 12615 * Description: A stub function for a layer that's currently not used. 12616 * For now just a placeholder. 12617 * 12618 * Context: Kernel thread context 12619 */ 12620 12621 static void 12622 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12623 { 12624 ASSERT(un != NULL); 12625 ASSERT(bp != NULL); 12626 ASSERT(!mutex_owned(SD_MUTEX(un))); 12627 SD_NEXT_IOSTART(index, un, bp); 12628 } 12629 12630 12631 /* 12632 * Function: sd_checksum_uscsi_iodone 12633 * 12634 * Description: A stub function for a layer that's currently not used. 12635 * For now just a placeholder. 12636 * 12637 * Context: May be called under interrupt context 12638 */ 12639 12640 static void 12641 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12642 { 12643 ASSERT(un != NULL); 12644 ASSERT(bp != NULL); 12645 ASSERT(!mutex_owned(SD_MUTEX(un))); 12646 SD_NEXT_IODONE(index, un, bp); 12647 } 12648 12649 12650 /* 12651 * Function: sd_pm_iostart 12652 * 12653 * Description: iostart-side routine for Power mangement. 12654 * 12655 * Context: Kernel thread context 12656 */ 12657 12658 static void 12659 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12660 { 12661 ASSERT(un != NULL); 12662 ASSERT(bp != NULL); 12663 ASSERT(!mutex_owned(SD_MUTEX(un))); 12664 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12665 12666 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12667 12668 if (sd_pm_entry(un) != DDI_SUCCESS) { 12669 /* 12670 * Set up to return the failed buf back up the 'iodone' 12671 * side of the calling chain. 12672 */ 12673 bioerror(bp, EIO); 12674 bp->b_resid = bp->b_bcount; 12675 12676 SD_BEGIN_IODONE(index, un, bp); 12677 12678 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12679 return; 12680 } 12681 12682 SD_NEXT_IOSTART(index, un, bp); 12683 12684 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12685 } 12686 12687 12688 /* 12689 * Function: sd_pm_iodone 12690 * 12691 * Description: iodone-side routine for power mangement. 12692 * 12693 * Context: may be called from interrupt context 12694 */ 12695 12696 static void 12697 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12698 { 12699 ASSERT(un != NULL); 12700 ASSERT(bp != NULL); 12701 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12702 12703 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12704 12705 /* 12706 * After attach the following flag is only read, so don't 12707 * take the penalty of acquiring a mutex for it. 12708 */ 12709 if (un->un_f_pm_is_enabled == TRUE) { 12710 sd_pm_exit(un); 12711 } 12712 12713 SD_NEXT_IODONE(index, un, bp); 12714 12715 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12716 } 12717 12718 12719 /* 12720 * Function: sd_core_iostart 12721 * 12722 * Description: Primary driver function for enqueuing buf(9S) structs from 12723 * the system and initiating IO to the target device 12724 * 12725 * Context: Kernel thread context. Can sleep. 12726 * 12727 * Assumptions: - The given xp->xb_blkno is absolute 12728 * (ie, relative to the start of the device). 12729 * - The IO is to be done using the native blocksize of 12730 * the device, as specified in un->un_tgt_blocksize. 12731 */ 12732 /* ARGSUSED */ 12733 static void 12734 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12735 { 12736 struct sd_xbuf *xp; 12737 12738 ASSERT(un != NULL); 12739 ASSERT(bp != NULL); 12740 ASSERT(!mutex_owned(SD_MUTEX(un))); 12741 ASSERT(bp->b_resid == 0); 12742 12743 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12744 12745 xp = SD_GET_XBUF(bp); 12746 ASSERT(xp != NULL); 12747 12748 mutex_enter(SD_MUTEX(un)); 12749 12750 /* 12751 * If we are currently in the failfast state, fail any new IO 12752 * that has B_FAILFAST set, then return. 12753 */ 12754 if ((bp->b_flags & B_FAILFAST) && 12755 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12756 mutex_exit(SD_MUTEX(un)); 12757 bioerror(bp, EIO); 12758 bp->b_resid = bp->b_bcount; 12759 SD_BEGIN_IODONE(index, un, bp); 12760 return; 12761 } 12762 12763 if (SD_IS_DIRECT_PRIORITY(xp)) { 12764 /* 12765 * Priority command -- transport it immediately. 12766 * 12767 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12768 * because all direct priority commands should be associated 12769 * with error recovery actions which we don't want to retry. 12770 */ 12771 sd_start_cmds(un, bp); 12772 } else { 12773 /* 12774 * Normal command -- add it to the wait queue, then start 12775 * transporting commands from the wait queue. 12776 */ 12777 sd_add_buf_to_waitq(un, bp); 12778 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12779 sd_start_cmds(un, NULL); 12780 } 12781 12782 mutex_exit(SD_MUTEX(un)); 12783 12784 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12785 } 12786 12787 12788 /* 12789 * Function: sd_init_cdb_limits 12790 * 12791 * Description: This is to handle scsi_pkt initialization differences 12792 * between the driver platforms. 12793 * 12794 * Legacy behaviors: 12795 * 12796 * If the block number or the sector count exceeds the 12797 * capabilities of a Group 0 command, shift over to a 12798 * Group 1 command. We don't blindly use Group 1 12799 * commands because a) some drives (CDC Wren IVs) get a 12800 * bit confused, and b) there is probably a fair amount 12801 * of speed difference for a target to receive and decode 12802 * a 10 byte command instead of a 6 byte command. 12803 * 12804 * The xfer time difference of 6 vs 10 byte CDBs is 12805 * still significant so this code is still worthwhile. 12806 * 10 byte CDBs are very inefficient with the fas HBA driver 12807 * and older disks. Each CDB byte took 1 usec with some 12808 * popular disks. 12809 * 12810 * Context: Must be called at attach time 12811 */ 12812 12813 static void 12814 sd_init_cdb_limits(struct sd_lun *un) 12815 { 12816 int hba_cdb_limit; 12817 12818 /* 12819 * Use CDB_GROUP1 commands for most devices except for 12820 * parallel SCSI fixed drives in which case we get better 12821 * performance using CDB_GROUP0 commands (where applicable). 12822 */ 12823 un->un_mincdb = SD_CDB_GROUP1; 12824 #if !defined(__fibre) 12825 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12826 !un->un_f_has_removable_media) { 12827 un->un_mincdb = SD_CDB_GROUP0; 12828 } 12829 #endif 12830 12831 /* 12832 * Try to read the max-cdb-length supported by HBA. 12833 */ 12834 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 12835 if (0 >= un->un_max_hba_cdb) { 12836 un->un_max_hba_cdb = CDB_GROUP4; 12837 hba_cdb_limit = SD_CDB_GROUP4; 12838 } else if (0 < un->un_max_hba_cdb && 12839 un->un_max_hba_cdb < CDB_GROUP1) { 12840 hba_cdb_limit = SD_CDB_GROUP0; 12841 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 12842 un->un_max_hba_cdb < CDB_GROUP5) { 12843 hba_cdb_limit = SD_CDB_GROUP1; 12844 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 12845 un->un_max_hba_cdb < CDB_GROUP4) { 12846 hba_cdb_limit = SD_CDB_GROUP5; 12847 } else { 12848 hba_cdb_limit = SD_CDB_GROUP4; 12849 } 12850 12851 /* 12852 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12853 * commands for fixed disks unless we are building for a 32 bit 12854 * kernel. 12855 */ 12856 #ifdef _LP64 12857 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12858 min(hba_cdb_limit, SD_CDB_GROUP4); 12859 #else 12860 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12861 min(hba_cdb_limit, SD_CDB_GROUP1); 12862 #endif 12863 12864 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12865 ? sizeof (struct scsi_arq_status) : 1); 12866 un->un_cmd_timeout = (ushort_t)sd_io_time; 12867 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12868 } 12869 12870 12871 /* 12872 * Function: sd_initpkt_for_buf 12873 * 12874 * Description: Allocate and initialize for transport a scsi_pkt struct, 12875 * based upon the info specified in the given buf struct. 12876 * 12877 * Assumes the xb_blkno in the request is absolute (ie, 12878 * relative to the start of the device (NOT partition!). 12879 * Also assumes that the request is using the native block 12880 * size of the device (as returned by the READ CAPACITY 12881 * command). 12882 * 12883 * Return Code: SD_PKT_ALLOC_SUCCESS 12884 * SD_PKT_ALLOC_FAILURE 12885 * SD_PKT_ALLOC_FAILURE_NO_DMA 12886 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12887 * 12888 * Context: Kernel thread and may be called from software interrupt context 12889 * as part of a sdrunout callback. This function may not block or 12890 * call routines that block 12891 */ 12892 12893 static int 12894 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12895 { 12896 struct sd_xbuf *xp; 12897 struct scsi_pkt *pktp = NULL; 12898 struct sd_lun *un; 12899 size_t blockcount; 12900 daddr_t startblock; 12901 int rval; 12902 int cmd_flags; 12903 12904 ASSERT(bp != NULL); 12905 ASSERT(pktpp != NULL); 12906 xp = SD_GET_XBUF(bp); 12907 ASSERT(xp != NULL); 12908 un = SD_GET_UN(bp); 12909 ASSERT(un != NULL); 12910 ASSERT(mutex_owned(SD_MUTEX(un))); 12911 ASSERT(bp->b_resid == 0); 12912 12913 SD_TRACE(SD_LOG_IO_CORE, un, 12914 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12915 12916 mutex_exit(SD_MUTEX(un)); 12917 12918 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12919 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12920 /* 12921 * Already have a scsi_pkt -- just need DMA resources. 12922 * We must recompute the CDB in case the mapping returns 12923 * a nonzero pkt_resid. 12924 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12925 * that is being retried, the unmap/remap of the DMA resouces 12926 * will result in the entire transfer starting over again 12927 * from the very first block. 12928 */ 12929 ASSERT(xp->xb_pktp != NULL); 12930 pktp = xp->xb_pktp; 12931 } else { 12932 pktp = NULL; 12933 } 12934 #endif /* __i386 || __amd64 */ 12935 12936 startblock = xp->xb_blkno; /* Absolute block num. */ 12937 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12938 12939 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12940 12941 /* 12942 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12943 * call scsi_init_pkt, and build the CDB. 12944 */ 12945 rval = sd_setup_rw_pkt(un, &pktp, bp, 12946 cmd_flags, sdrunout, (caddr_t)un, 12947 startblock, blockcount); 12948 12949 if (rval == 0) { 12950 /* 12951 * Success. 12952 * 12953 * If partial DMA is being used and required for this transfer. 12954 * set it up here. 12955 */ 12956 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12957 (pktp->pkt_resid != 0)) { 12958 12959 /* 12960 * Save the CDB length and pkt_resid for the 12961 * next xfer 12962 */ 12963 xp->xb_dma_resid = pktp->pkt_resid; 12964 12965 /* rezero resid */ 12966 pktp->pkt_resid = 0; 12967 12968 } else { 12969 xp->xb_dma_resid = 0; 12970 } 12971 12972 pktp->pkt_flags = un->un_tagflags; 12973 pktp->pkt_time = un->un_cmd_timeout; 12974 pktp->pkt_comp = sdintr; 12975 12976 pktp->pkt_private = bp; 12977 *pktpp = pktp; 12978 12979 SD_TRACE(SD_LOG_IO_CORE, un, 12980 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12981 12982 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12983 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12984 #endif 12985 12986 mutex_enter(SD_MUTEX(un)); 12987 return (SD_PKT_ALLOC_SUCCESS); 12988 12989 } 12990 12991 /* 12992 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12993 * from sd_setup_rw_pkt. 12994 */ 12995 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12996 12997 if (rval == SD_PKT_ALLOC_FAILURE) { 12998 *pktpp = NULL; 12999 /* 13000 * Set the driver state to RWAIT to indicate the driver 13001 * is waiting on resource allocations. The driver will not 13002 * suspend, pm_suspend, or detatch while the state is RWAIT. 13003 */ 13004 mutex_enter(SD_MUTEX(un)); 13005 New_state(un, SD_STATE_RWAIT); 13006 13007 SD_ERROR(SD_LOG_IO_CORE, un, 13008 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13009 13010 if ((bp->b_flags & B_ERROR) != 0) { 13011 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13012 } 13013 return (SD_PKT_ALLOC_FAILURE); 13014 } else { 13015 /* 13016 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13017 * 13018 * This should never happen. Maybe someone messed with the 13019 * kernel's minphys? 13020 */ 13021 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13022 "Request rejected: too large for CDB: " 13023 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13024 SD_ERROR(SD_LOG_IO_CORE, un, 13025 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13026 mutex_enter(SD_MUTEX(un)); 13027 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13028 13029 } 13030 } 13031 13032 13033 /* 13034 * Function: sd_destroypkt_for_buf 13035 * 13036 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13037 * 13038 * Context: Kernel thread or interrupt context 13039 */ 13040 13041 static void 13042 sd_destroypkt_for_buf(struct buf *bp) 13043 { 13044 ASSERT(bp != NULL); 13045 ASSERT(SD_GET_UN(bp) != NULL); 13046 13047 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13048 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13049 13050 ASSERT(SD_GET_PKTP(bp) != NULL); 13051 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13052 13053 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13054 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13055 } 13056 13057 /* 13058 * Function: sd_setup_rw_pkt 13059 * 13060 * Description: Determines appropriate CDB group for the requested LBA 13061 * and transfer length, calls scsi_init_pkt, and builds 13062 * the CDB. Do not use for partial DMA transfers except 13063 * for the initial transfer since the CDB size must 13064 * remain constant. 13065 * 13066 * Context: Kernel thread and may be called from software interrupt 13067 * context as part of a sdrunout callback. This function may not 13068 * block or call routines that block 13069 */ 13070 13071 13072 int 13073 sd_setup_rw_pkt(struct sd_lun *un, 13074 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13075 int (*callback)(caddr_t), caddr_t callback_arg, 13076 diskaddr_t lba, uint32_t blockcount) 13077 { 13078 struct scsi_pkt *return_pktp; 13079 union scsi_cdb *cdbp; 13080 struct sd_cdbinfo *cp = NULL; 13081 int i; 13082 13083 /* 13084 * See which size CDB to use, based upon the request. 13085 */ 13086 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13087 13088 /* 13089 * Check lba and block count against sd_cdbtab limits. 13090 * In the partial DMA case, we have to use the same size 13091 * CDB for all the transfers. Check lba + blockcount 13092 * against the max LBA so we know that segment of the 13093 * transfer can use the CDB we select. 13094 */ 13095 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13096 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13097 13098 /* 13099 * The command will fit into the CDB type 13100 * specified by sd_cdbtab[i]. 13101 */ 13102 cp = sd_cdbtab + i; 13103 13104 /* 13105 * Call scsi_init_pkt so we can fill in the 13106 * CDB. 13107 */ 13108 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13109 bp, cp->sc_grpcode, un->un_status_len, 0, 13110 flags, callback, callback_arg); 13111 13112 if (return_pktp != NULL) { 13113 13114 /* 13115 * Return new value of pkt 13116 */ 13117 *pktpp = return_pktp; 13118 13119 /* 13120 * To be safe, zero the CDB insuring there is 13121 * no leftover data from a previous command. 13122 */ 13123 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13124 13125 /* 13126 * Handle partial DMA mapping 13127 */ 13128 if (return_pktp->pkt_resid != 0) { 13129 13130 /* 13131 * Not going to xfer as many blocks as 13132 * originally expected 13133 */ 13134 blockcount -= 13135 SD_BYTES2TGTBLOCKS(un, 13136 return_pktp->pkt_resid); 13137 } 13138 13139 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13140 13141 /* 13142 * Set command byte based on the CDB 13143 * type we matched. 13144 */ 13145 cdbp->scc_cmd = cp->sc_grpmask | 13146 ((bp->b_flags & B_READ) ? 13147 SCMD_READ : SCMD_WRITE); 13148 13149 SD_FILL_SCSI1_LUN(un, return_pktp); 13150 13151 /* 13152 * Fill in LBA and length 13153 */ 13154 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13155 (cp->sc_grpcode == CDB_GROUP4) || 13156 (cp->sc_grpcode == CDB_GROUP0) || 13157 (cp->sc_grpcode == CDB_GROUP5)); 13158 13159 if (cp->sc_grpcode == CDB_GROUP1) { 13160 FORMG1ADDR(cdbp, lba); 13161 FORMG1COUNT(cdbp, blockcount); 13162 return (0); 13163 } else if (cp->sc_grpcode == CDB_GROUP4) { 13164 FORMG4LONGADDR(cdbp, lba); 13165 FORMG4COUNT(cdbp, blockcount); 13166 return (0); 13167 } else if (cp->sc_grpcode == CDB_GROUP0) { 13168 FORMG0ADDR(cdbp, lba); 13169 FORMG0COUNT(cdbp, blockcount); 13170 return (0); 13171 } else if (cp->sc_grpcode == CDB_GROUP5) { 13172 FORMG5ADDR(cdbp, lba); 13173 FORMG5COUNT(cdbp, blockcount); 13174 return (0); 13175 } 13176 13177 /* 13178 * It should be impossible to not match one 13179 * of the CDB types above, so we should never 13180 * reach this point. Set the CDB command byte 13181 * to test-unit-ready to avoid writing 13182 * to somewhere we don't intend. 13183 */ 13184 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13185 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13186 } else { 13187 /* 13188 * Couldn't get scsi_pkt 13189 */ 13190 return (SD_PKT_ALLOC_FAILURE); 13191 } 13192 } 13193 } 13194 13195 /* 13196 * None of the available CDB types were suitable. This really 13197 * should never happen: on a 64 bit system we support 13198 * READ16/WRITE16 which will hold an entire 64 bit disk address 13199 * and on a 32 bit system we will refuse to bind to a device 13200 * larger than 2TB so addresses will never be larger than 32 bits. 13201 */ 13202 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13203 } 13204 13205 /* 13206 * Function: sd_setup_next_rw_pkt 13207 * 13208 * Description: Setup packet for partial DMA transfers, except for the 13209 * initial transfer. sd_setup_rw_pkt should be used for 13210 * the initial transfer. 13211 * 13212 * Context: Kernel thread and may be called from interrupt context. 13213 */ 13214 13215 int 13216 sd_setup_next_rw_pkt(struct sd_lun *un, 13217 struct scsi_pkt *pktp, struct buf *bp, 13218 diskaddr_t lba, uint32_t blockcount) 13219 { 13220 uchar_t com; 13221 union scsi_cdb *cdbp; 13222 uchar_t cdb_group_id; 13223 13224 ASSERT(pktp != NULL); 13225 ASSERT(pktp->pkt_cdbp != NULL); 13226 13227 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13228 com = cdbp->scc_cmd; 13229 cdb_group_id = CDB_GROUPID(com); 13230 13231 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13232 (cdb_group_id == CDB_GROUPID_1) || 13233 (cdb_group_id == CDB_GROUPID_4) || 13234 (cdb_group_id == CDB_GROUPID_5)); 13235 13236 /* 13237 * Move pkt to the next portion of the xfer. 13238 * func is NULL_FUNC so we do not have to release 13239 * the disk mutex here. 13240 */ 13241 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13242 NULL_FUNC, NULL) == pktp) { 13243 /* Success. Handle partial DMA */ 13244 if (pktp->pkt_resid != 0) { 13245 blockcount -= 13246 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13247 } 13248 13249 cdbp->scc_cmd = com; 13250 SD_FILL_SCSI1_LUN(un, pktp); 13251 if (cdb_group_id == CDB_GROUPID_1) { 13252 FORMG1ADDR(cdbp, lba); 13253 FORMG1COUNT(cdbp, blockcount); 13254 return (0); 13255 } else if (cdb_group_id == CDB_GROUPID_4) { 13256 FORMG4LONGADDR(cdbp, lba); 13257 FORMG4COUNT(cdbp, blockcount); 13258 return (0); 13259 } else if (cdb_group_id == CDB_GROUPID_0) { 13260 FORMG0ADDR(cdbp, lba); 13261 FORMG0COUNT(cdbp, blockcount); 13262 return (0); 13263 } else if (cdb_group_id == CDB_GROUPID_5) { 13264 FORMG5ADDR(cdbp, lba); 13265 FORMG5COUNT(cdbp, blockcount); 13266 return (0); 13267 } 13268 13269 /* Unreachable */ 13270 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13271 } 13272 13273 /* 13274 * Error setting up next portion of cmd transfer. 13275 * Something is definitely very wrong and this 13276 * should not happen. 13277 */ 13278 return (SD_PKT_ALLOC_FAILURE); 13279 } 13280 13281 /* 13282 * Function: sd_initpkt_for_uscsi 13283 * 13284 * Description: Allocate and initialize for transport a scsi_pkt struct, 13285 * based upon the info specified in the given uscsi_cmd struct. 13286 * 13287 * Return Code: SD_PKT_ALLOC_SUCCESS 13288 * SD_PKT_ALLOC_FAILURE 13289 * SD_PKT_ALLOC_FAILURE_NO_DMA 13290 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13291 * 13292 * Context: Kernel thread and may be called from software interrupt context 13293 * as part of a sdrunout callback. This function may not block or 13294 * call routines that block 13295 */ 13296 13297 static int 13298 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13299 { 13300 struct uscsi_cmd *uscmd; 13301 struct sd_xbuf *xp; 13302 struct scsi_pkt *pktp; 13303 struct sd_lun *un; 13304 uint32_t flags = 0; 13305 13306 ASSERT(bp != NULL); 13307 ASSERT(pktpp != NULL); 13308 xp = SD_GET_XBUF(bp); 13309 ASSERT(xp != NULL); 13310 un = SD_GET_UN(bp); 13311 ASSERT(un != NULL); 13312 ASSERT(mutex_owned(SD_MUTEX(un))); 13313 13314 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13315 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13316 ASSERT(uscmd != NULL); 13317 13318 SD_TRACE(SD_LOG_IO_CORE, un, 13319 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13320 13321 /* 13322 * Allocate the scsi_pkt for the command. 13323 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13324 * during scsi_init_pkt time and will continue to use the 13325 * same path as long as the same scsi_pkt is used without 13326 * intervening scsi_dma_free(). Since uscsi command does 13327 * not call scsi_dmafree() before retry failed command, it 13328 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13329 * set such that scsi_vhci can use other available path for 13330 * retry. Besides, ucsci command does not allow DMA breakup, 13331 * so there is no need to set PKT_DMA_PARTIAL flag. 13332 */ 13333 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13334 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13335 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13336 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13337 - sizeof (struct scsi_extended_sense)), 0, 13338 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13339 sdrunout, (caddr_t)un); 13340 } else { 13341 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13342 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13343 sizeof (struct scsi_arq_status), 0, 13344 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13345 sdrunout, (caddr_t)un); 13346 } 13347 13348 if (pktp == NULL) { 13349 *pktpp = NULL; 13350 /* 13351 * Set the driver state to RWAIT to indicate the driver 13352 * is waiting on resource allocations. The driver will not 13353 * suspend, pm_suspend, or detatch while the state is RWAIT. 13354 */ 13355 New_state(un, SD_STATE_RWAIT); 13356 13357 SD_ERROR(SD_LOG_IO_CORE, un, 13358 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13359 13360 if ((bp->b_flags & B_ERROR) != 0) { 13361 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13362 } 13363 return (SD_PKT_ALLOC_FAILURE); 13364 } 13365 13366 /* 13367 * We do not do DMA breakup for USCSI commands, so return failure 13368 * here if all the needed DMA resources were not allocated. 13369 */ 13370 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13371 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13372 scsi_destroy_pkt(pktp); 13373 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13374 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13375 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13376 } 13377 13378 /* Init the cdb from the given uscsi struct */ 13379 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13380 uscmd->uscsi_cdb[0], 0, 0, 0); 13381 13382 SD_FILL_SCSI1_LUN(un, pktp); 13383 13384 /* 13385 * Set up the optional USCSI flags. See the uscsi (7I) man page 13386 * for listing of the supported flags. 13387 */ 13388 13389 if (uscmd->uscsi_flags & USCSI_SILENT) { 13390 flags |= FLAG_SILENT; 13391 } 13392 13393 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13394 flags |= FLAG_DIAGNOSE; 13395 } 13396 13397 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13398 flags |= FLAG_ISOLATE; 13399 } 13400 13401 if (un->un_f_is_fibre == FALSE) { 13402 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13403 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13404 } 13405 } 13406 13407 /* 13408 * Set the pkt flags here so we save time later. 13409 * Note: These flags are NOT in the uscsi man page!!! 13410 */ 13411 if (uscmd->uscsi_flags & USCSI_HEAD) { 13412 flags |= FLAG_HEAD; 13413 } 13414 13415 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13416 flags |= FLAG_NOINTR; 13417 } 13418 13419 /* 13420 * For tagged queueing, things get a bit complicated. 13421 * Check first for head of queue and last for ordered queue. 13422 * If neither head nor order, use the default driver tag flags. 13423 */ 13424 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13425 if (uscmd->uscsi_flags & USCSI_HTAG) { 13426 flags |= FLAG_HTAG; 13427 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13428 flags |= FLAG_OTAG; 13429 } else { 13430 flags |= un->un_tagflags & FLAG_TAGMASK; 13431 } 13432 } 13433 13434 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13435 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13436 } 13437 13438 pktp->pkt_flags = flags; 13439 13440 /* Transfer uscsi information to scsi_pkt */ 13441 (void) scsi_uscsi_pktinit(uscmd, pktp); 13442 13443 /* Copy the caller's CDB into the pkt... */ 13444 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13445 13446 if (uscmd->uscsi_timeout == 0) { 13447 pktp->pkt_time = un->un_uscsi_timeout; 13448 } else { 13449 pktp->pkt_time = uscmd->uscsi_timeout; 13450 } 13451 13452 /* need it later to identify USCSI request in sdintr */ 13453 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13454 13455 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13456 13457 pktp->pkt_private = bp; 13458 pktp->pkt_comp = sdintr; 13459 *pktpp = pktp; 13460 13461 SD_TRACE(SD_LOG_IO_CORE, un, 13462 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13463 13464 return (SD_PKT_ALLOC_SUCCESS); 13465 } 13466 13467 13468 /* 13469 * Function: sd_destroypkt_for_uscsi 13470 * 13471 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13472 * IOs.. Also saves relevant info into the associated uscsi_cmd 13473 * struct. 13474 * 13475 * Context: May be called under interrupt context 13476 */ 13477 13478 static void 13479 sd_destroypkt_for_uscsi(struct buf *bp) 13480 { 13481 struct uscsi_cmd *uscmd; 13482 struct sd_xbuf *xp; 13483 struct scsi_pkt *pktp; 13484 struct sd_lun *un; 13485 struct sd_uscsi_info *suip; 13486 13487 ASSERT(bp != NULL); 13488 xp = SD_GET_XBUF(bp); 13489 ASSERT(xp != NULL); 13490 un = SD_GET_UN(bp); 13491 ASSERT(un != NULL); 13492 ASSERT(!mutex_owned(SD_MUTEX(un))); 13493 pktp = SD_GET_PKTP(bp); 13494 ASSERT(pktp != NULL); 13495 13496 SD_TRACE(SD_LOG_IO_CORE, un, 13497 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13498 13499 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13500 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13501 ASSERT(uscmd != NULL); 13502 13503 /* Save the status and the residual into the uscsi_cmd struct */ 13504 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13505 uscmd->uscsi_resid = bp->b_resid; 13506 13507 /* Transfer scsi_pkt information to uscsi */ 13508 (void) scsi_uscsi_pktfini(pktp, uscmd); 13509 13510 /* 13511 * If enabled, copy any saved sense data into the area specified 13512 * by the uscsi command. 13513 */ 13514 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13515 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13516 /* 13517 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13518 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13519 */ 13520 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13521 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13522 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13523 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13524 MAX_SENSE_LENGTH); 13525 } else { 13526 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13527 SENSE_LENGTH); 13528 } 13529 } 13530 /* 13531 * The following assignments are for SCSI FMA. 13532 */ 13533 ASSERT(xp->xb_private != NULL); 13534 suip = (struct sd_uscsi_info *)xp->xb_private; 13535 suip->ui_pkt_reason = pktp->pkt_reason; 13536 suip->ui_pkt_state = pktp->pkt_state; 13537 suip->ui_pkt_statistics = pktp->pkt_statistics; 13538 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13539 13540 /* We are done with the scsi_pkt; free it now */ 13541 ASSERT(SD_GET_PKTP(bp) != NULL); 13542 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13543 13544 SD_TRACE(SD_LOG_IO_CORE, un, 13545 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13546 } 13547 13548 13549 /* 13550 * Function: sd_bioclone_alloc 13551 * 13552 * Description: Allocate a buf(9S) and init it as per the given buf 13553 * and the various arguments. The associated sd_xbuf 13554 * struct is (nearly) duplicated. The struct buf *bp 13555 * argument is saved in new_xp->xb_private. 13556 * 13557 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13558 * datalen - size of data area for the shadow bp 13559 * blkno - starting LBA 13560 * func - function pointer for b_iodone in the shadow buf. (May 13561 * be NULL if none.) 13562 * 13563 * Return Code: Pointer to allocates buf(9S) struct 13564 * 13565 * Context: Can sleep. 13566 */ 13567 13568 static struct buf * 13569 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13570 daddr_t blkno, int (*func)(struct buf *)) 13571 { 13572 struct sd_lun *un; 13573 struct sd_xbuf *xp; 13574 struct sd_xbuf *new_xp; 13575 struct buf *new_bp; 13576 13577 ASSERT(bp != NULL); 13578 xp = SD_GET_XBUF(bp); 13579 ASSERT(xp != NULL); 13580 un = SD_GET_UN(bp); 13581 ASSERT(un != NULL); 13582 ASSERT(!mutex_owned(SD_MUTEX(un))); 13583 13584 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13585 NULL, KM_SLEEP); 13586 13587 new_bp->b_lblkno = blkno; 13588 13589 /* 13590 * Allocate an xbuf for the shadow bp and copy the contents of the 13591 * original xbuf into it. 13592 */ 13593 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13594 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13595 13596 /* 13597 * The given bp is automatically saved in the xb_private member 13598 * of the new xbuf. Callers are allowed to depend on this. 13599 */ 13600 new_xp->xb_private = bp; 13601 13602 new_bp->b_private = new_xp; 13603 13604 return (new_bp); 13605 } 13606 13607 /* 13608 * Function: sd_shadow_buf_alloc 13609 * 13610 * Description: Allocate a buf(9S) and init it as per the given buf 13611 * and the various arguments. The associated sd_xbuf 13612 * struct is (nearly) duplicated. The struct buf *bp 13613 * argument is saved in new_xp->xb_private. 13614 * 13615 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13616 * datalen - size of data area for the shadow bp 13617 * bflags - B_READ or B_WRITE (pseudo flag) 13618 * blkno - starting LBA 13619 * func - function pointer for b_iodone in the shadow buf. (May 13620 * be NULL if none.) 13621 * 13622 * Return Code: Pointer to allocates buf(9S) struct 13623 * 13624 * Context: Can sleep. 13625 */ 13626 13627 static struct buf * 13628 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13629 daddr_t blkno, int (*func)(struct buf *)) 13630 { 13631 struct sd_lun *un; 13632 struct sd_xbuf *xp; 13633 struct sd_xbuf *new_xp; 13634 struct buf *new_bp; 13635 13636 ASSERT(bp != NULL); 13637 xp = SD_GET_XBUF(bp); 13638 ASSERT(xp != NULL); 13639 un = SD_GET_UN(bp); 13640 ASSERT(un != NULL); 13641 ASSERT(!mutex_owned(SD_MUTEX(un))); 13642 13643 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13644 bp_mapin(bp); 13645 } 13646 13647 bflags &= (B_READ | B_WRITE); 13648 #if defined(__i386) || defined(__amd64) 13649 new_bp = getrbuf(KM_SLEEP); 13650 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13651 new_bp->b_bcount = datalen; 13652 new_bp->b_flags = bflags | 13653 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13654 #else 13655 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13656 datalen, bflags, SLEEP_FUNC, NULL); 13657 #endif 13658 new_bp->av_forw = NULL; 13659 new_bp->av_back = NULL; 13660 new_bp->b_dev = bp->b_dev; 13661 new_bp->b_blkno = blkno; 13662 new_bp->b_iodone = func; 13663 new_bp->b_edev = bp->b_edev; 13664 new_bp->b_resid = 0; 13665 13666 /* We need to preserve the B_FAILFAST flag */ 13667 if (bp->b_flags & B_FAILFAST) { 13668 new_bp->b_flags |= B_FAILFAST; 13669 } 13670 13671 /* 13672 * Allocate an xbuf for the shadow bp and copy the contents of the 13673 * original xbuf into it. 13674 */ 13675 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13676 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13677 13678 /* Need later to copy data between the shadow buf & original buf! */ 13679 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13680 13681 /* 13682 * The given bp is automatically saved in the xb_private member 13683 * of the new xbuf. Callers are allowed to depend on this. 13684 */ 13685 new_xp->xb_private = bp; 13686 13687 new_bp->b_private = new_xp; 13688 13689 return (new_bp); 13690 } 13691 13692 /* 13693 * Function: sd_bioclone_free 13694 * 13695 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13696 * in the larger than partition operation. 13697 * 13698 * Context: May be called under interrupt context 13699 */ 13700 13701 static void 13702 sd_bioclone_free(struct buf *bp) 13703 { 13704 struct sd_xbuf *xp; 13705 13706 ASSERT(bp != NULL); 13707 xp = SD_GET_XBUF(bp); 13708 ASSERT(xp != NULL); 13709 13710 /* 13711 * Call bp_mapout() before freeing the buf, in case a lower 13712 * layer or HBA had done a bp_mapin(). we must do this here 13713 * as we are the "originator" of the shadow buf. 13714 */ 13715 bp_mapout(bp); 13716 13717 /* 13718 * Null out b_iodone before freeing the bp, to ensure that the driver 13719 * never gets confused by a stale value in this field. (Just a little 13720 * extra defensiveness here.) 13721 */ 13722 bp->b_iodone = NULL; 13723 13724 freerbuf(bp); 13725 13726 kmem_free(xp, sizeof (struct sd_xbuf)); 13727 } 13728 13729 /* 13730 * Function: sd_shadow_buf_free 13731 * 13732 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13733 * 13734 * Context: May be called under interrupt context 13735 */ 13736 13737 static void 13738 sd_shadow_buf_free(struct buf *bp) 13739 { 13740 struct sd_xbuf *xp; 13741 13742 ASSERT(bp != NULL); 13743 xp = SD_GET_XBUF(bp); 13744 ASSERT(xp != NULL); 13745 13746 #if defined(__sparc) 13747 /* 13748 * Call bp_mapout() before freeing the buf, in case a lower 13749 * layer or HBA had done a bp_mapin(). we must do this here 13750 * as we are the "originator" of the shadow buf. 13751 */ 13752 bp_mapout(bp); 13753 #endif 13754 13755 /* 13756 * Null out b_iodone before freeing the bp, to ensure that the driver 13757 * never gets confused by a stale value in this field. (Just a little 13758 * extra defensiveness here.) 13759 */ 13760 bp->b_iodone = NULL; 13761 13762 #if defined(__i386) || defined(__amd64) 13763 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13764 freerbuf(bp); 13765 #else 13766 scsi_free_consistent_buf(bp); 13767 #endif 13768 13769 kmem_free(xp, sizeof (struct sd_xbuf)); 13770 } 13771 13772 13773 /* 13774 * Function: sd_print_transport_rejected_message 13775 * 13776 * Description: This implements the ludicrously complex rules for printing 13777 * a "transport rejected" message. This is to address the 13778 * specific problem of having a flood of this error message 13779 * produced when a failover occurs. 13780 * 13781 * Context: Any. 13782 */ 13783 13784 static void 13785 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13786 int code) 13787 { 13788 ASSERT(un != NULL); 13789 ASSERT(mutex_owned(SD_MUTEX(un))); 13790 ASSERT(xp != NULL); 13791 13792 /* 13793 * Print the "transport rejected" message under the following 13794 * conditions: 13795 * 13796 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13797 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13798 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13799 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13800 * scsi_transport(9F) (which indicates that the target might have 13801 * gone off-line). This uses the un->un_tran_fatal_count 13802 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13803 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13804 * from scsi_transport(). 13805 * 13806 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13807 * the preceeding cases in order for the message to be printed. 13808 */ 13809 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13810 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13811 (code != TRAN_FATAL_ERROR) || 13812 (un->un_tran_fatal_count == 1)) { 13813 switch (code) { 13814 case TRAN_BADPKT: 13815 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13816 "transport rejected bad packet\n"); 13817 break; 13818 case TRAN_FATAL_ERROR: 13819 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13820 "transport rejected fatal error\n"); 13821 break; 13822 default: 13823 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13824 "transport rejected (%d)\n", code); 13825 break; 13826 } 13827 } 13828 } 13829 } 13830 13831 13832 /* 13833 * Function: sd_add_buf_to_waitq 13834 * 13835 * Description: Add the given buf(9S) struct to the wait queue for the 13836 * instance. If sorting is enabled, then the buf is added 13837 * to the queue via an elevator sort algorithm (a la 13838 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13839 * If sorting is not enabled, then the buf is just added 13840 * to the end of the wait queue. 13841 * 13842 * Return Code: void 13843 * 13844 * Context: Does not sleep/block, therefore technically can be called 13845 * from any context. However if sorting is enabled then the 13846 * execution time is indeterminate, and may take long if 13847 * the wait queue grows large. 13848 */ 13849 13850 static void 13851 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13852 { 13853 struct buf *ap; 13854 13855 ASSERT(bp != NULL); 13856 ASSERT(un != NULL); 13857 ASSERT(mutex_owned(SD_MUTEX(un))); 13858 13859 /* If the queue is empty, add the buf as the only entry & return. */ 13860 if (un->un_waitq_headp == NULL) { 13861 ASSERT(un->un_waitq_tailp == NULL); 13862 un->un_waitq_headp = un->un_waitq_tailp = bp; 13863 bp->av_forw = NULL; 13864 return; 13865 } 13866 13867 ASSERT(un->un_waitq_tailp != NULL); 13868 13869 /* 13870 * If sorting is disabled, just add the buf to the tail end of 13871 * the wait queue and return. 13872 */ 13873 if (un->un_f_disksort_disabled) { 13874 un->un_waitq_tailp->av_forw = bp; 13875 un->un_waitq_tailp = bp; 13876 bp->av_forw = NULL; 13877 return; 13878 } 13879 13880 /* 13881 * Sort thru the list of requests currently on the wait queue 13882 * and add the new buf request at the appropriate position. 13883 * 13884 * The un->un_waitq_headp is an activity chain pointer on which 13885 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13886 * first queue holds those requests which are positioned after 13887 * the current SD_GET_BLKNO() (in the first request); the second holds 13888 * requests which came in after their SD_GET_BLKNO() number was passed. 13889 * Thus we implement a one way scan, retracting after reaching 13890 * the end of the drive to the first request on the second 13891 * queue, at which time it becomes the first queue. 13892 * A one-way scan is natural because of the way UNIX read-ahead 13893 * blocks are allocated. 13894 * 13895 * If we lie after the first request, then we must locate the 13896 * second request list and add ourselves to it. 13897 */ 13898 ap = un->un_waitq_headp; 13899 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13900 while (ap->av_forw != NULL) { 13901 /* 13902 * Look for an "inversion" in the (normally 13903 * ascending) block numbers. This indicates 13904 * the start of the second request list. 13905 */ 13906 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13907 /* 13908 * Search the second request list for the 13909 * first request at a larger block number. 13910 * We go before that; however if there is 13911 * no such request, we go at the end. 13912 */ 13913 do { 13914 if (SD_GET_BLKNO(bp) < 13915 SD_GET_BLKNO(ap->av_forw)) { 13916 goto insert; 13917 } 13918 ap = ap->av_forw; 13919 } while (ap->av_forw != NULL); 13920 goto insert; /* after last */ 13921 } 13922 ap = ap->av_forw; 13923 } 13924 13925 /* 13926 * No inversions... we will go after the last, and 13927 * be the first request in the second request list. 13928 */ 13929 goto insert; 13930 } 13931 13932 /* 13933 * Request is at/after the current request... 13934 * sort in the first request list. 13935 */ 13936 while (ap->av_forw != NULL) { 13937 /* 13938 * We want to go after the current request (1) if 13939 * there is an inversion after it (i.e. it is the end 13940 * of the first request list), or (2) if the next 13941 * request is a larger block no. than our request. 13942 */ 13943 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13944 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13945 goto insert; 13946 } 13947 ap = ap->av_forw; 13948 } 13949 13950 /* 13951 * Neither a second list nor a larger request, therefore 13952 * we go at the end of the first list (which is the same 13953 * as the end of the whole schebang). 13954 */ 13955 insert: 13956 bp->av_forw = ap->av_forw; 13957 ap->av_forw = bp; 13958 13959 /* 13960 * If we inserted onto the tail end of the waitq, make sure the 13961 * tail pointer is updated. 13962 */ 13963 if (ap == un->un_waitq_tailp) { 13964 un->un_waitq_tailp = bp; 13965 } 13966 } 13967 13968 13969 /* 13970 * Function: sd_start_cmds 13971 * 13972 * Description: Remove and transport cmds from the driver queues. 13973 * 13974 * Arguments: un - pointer to the unit (soft state) struct for the target. 13975 * 13976 * immed_bp - ptr to a buf to be transported immediately. Only 13977 * the immed_bp is transported; bufs on the waitq are not 13978 * processed and the un_retry_bp is not checked. If immed_bp is 13979 * NULL, then normal queue processing is performed. 13980 * 13981 * Context: May be called from kernel thread context, interrupt context, 13982 * or runout callback context. This function may not block or 13983 * call routines that block. 13984 */ 13985 13986 static void 13987 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13988 { 13989 struct sd_xbuf *xp; 13990 struct buf *bp; 13991 void (*statp)(kstat_io_t *); 13992 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13993 void (*saved_statp)(kstat_io_t *); 13994 #endif 13995 int rval; 13996 struct sd_fm_internal *sfip = NULL; 13997 13998 ASSERT(un != NULL); 13999 ASSERT(mutex_owned(SD_MUTEX(un))); 14000 ASSERT(un->un_ncmds_in_transport >= 0); 14001 ASSERT(un->un_throttle >= 0); 14002 14003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14004 14005 do { 14006 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14007 saved_statp = NULL; 14008 #endif 14009 14010 /* 14011 * If we are syncing or dumping, fail the command to 14012 * avoid recursively calling back into scsi_transport(). 14013 * The dump I/O itself uses a separate code path so this 14014 * only prevents non-dump I/O from being sent while dumping. 14015 * File system sync takes place before dumping begins. 14016 * During panic, filesystem I/O is allowed provided 14017 * un_in_callback is <= 1. This is to prevent recursion 14018 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14019 * sd_start_cmds and so on. See panic.c for more information 14020 * about the states the system can be in during panic. 14021 */ 14022 if ((un->un_state == SD_STATE_DUMPING) || 14023 (ddi_in_panic() && (un->un_in_callback > 1))) { 14024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14025 "sd_start_cmds: panicking\n"); 14026 goto exit; 14027 } 14028 14029 if ((bp = immed_bp) != NULL) { 14030 /* 14031 * We have a bp that must be transported immediately. 14032 * It's OK to transport the immed_bp here without doing 14033 * the throttle limit check because the immed_bp is 14034 * always used in a retry/recovery case. This means 14035 * that we know we are not at the throttle limit by 14036 * virtue of the fact that to get here we must have 14037 * already gotten a command back via sdintr(). This also 14038 * relies on (1) the command on un_retry_bp preventing 14039 * further commands from the waitq from being issued; 14040 * and (2) the code in sd_retry_command checking the 14041 * throttle limit before issuing a delayed or immediate 14042 * retry. This holds even if the throttle limit is 14043 * currently ratcheted down from its maximum value. 14044 */ 14045 statp = kstat_runq_enter; 14046 if (bp == un->un_retry_bp) { 14047 ASSERT((un->un_retry_statp == NULL) || 14048 (un->un_retry_statp == kstat_waitq_enter) || 14049 (un->un_retry_statp == 14050 kstat_runq_back_to_waitq)); 14051 /* 14052 * If the waitq kstat was incremented when 14053 * sd_set_retry_bp() queued this bp for a retry, 14054 * then we must set up statp so that the waitq 14055 * count will get decremented correctly below. 14056 * Also we must clear un->un_retry_statp to 14057 * ensure that we do not act on a stale value 14058 * in this field. 14059 */ 14060 if ((un->un_retry_statp == kstat_waitq_enter) || 14061 (un->un_retry_statp == 14062 kstat_runq_back_to_waitq)) { 14063 statp = kstat_waitq_to_runq; 14064 } 14065 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14066 saved_statp = un->un_retry_statp; 14067 #endif 14068 un->un_retry_statp = NULL; 14069 14070 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14071 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14072 "un_throttle:%d un_ncmds_in_transport:%d\n", 14073 un, un->un_retry_bp, un->un_throttle, 14074 un->un_ncmds_in_transport); 14075 } else { 14076 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14077 "processing priority bp:0x%p\n", bp); 14078 } 14079 14080 } else if ((bp = un->un_waitq_headp) != NULL) { 14081 /* 14082 * A command on the waitq is ready to go, but do not 14083 * send it if: 14084 * 14085 * (1) the throttle limit has been reached, or 14086 * (2) a retry is pending, or 14087 * (3) a START_STOP_UNIT callback pending, or 14088 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14089 * command is pending. 14090 * 14091 * For all of these conditions, IO processing will 14092 * restart after the condition is cleared. 14093 */ 14094 if (un->un_ncmds_in_transport >= un->un_throttle) { 14095 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14096 "sd_start_cmds: exiting, " 14097 "throttle limit reached!\n"); 14098 goto exit; 14099 } 14100 if (un->un_retry_bp != NULL) { 14101 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14102 "sd_start_cmds: exiting, retry pending!\n"); 14103 goto exit; 14104 } 14105 if (un->un_startstop_timeid != NULL) { 14106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14107 "sd_start_cmds: exiting, " 14108 "START_STOP pending!\n"); 14109 goto exit; 14110 } 14111 if (un->un_direct_priority_timeid != NULL) { 14112 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14113 "sd_start_cmds: exiting, " 14114 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14115 goto exit; 14116 } 14117 14118 /* Dequeue the command */ 14119 un->un_waitq_headp = bp->av_forw; 14120 if (un->un_waitq_headp == NULL) { 14121 un->un_waitq_tailp = NULL; 14122 } 14123 bp->av_forw = NULL; 14124 statp = kstat_waitq_to_runq; 14125 SD_TRACE(SD_LOG_IO_CORE, un, 14126 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14127 14128 } else { 14129 /* No work to do so bail out now */ 14130 SD_TRACE(SD_LOG_IO_CORE, un, 14131 "sd_start_cmds: no more work, exiting!\n"); 14132 goto exit; 14133 } 14134 14135 /* 14136 * Reset the state to normal. This is the mechanism by which 14137 * the state transitions from either SD_STATE_RWAIT or 14138 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14139 * If state is SD_STATE_PM_CHANGING then this command is 14140 * part of the device power control and the state must 14141 * not be put back to normal. Doing so would would 14142 * allow new commands to proceed when they shouldn't, 14143 * the device may be going off. 14144 */ 14145 if ((un->un_state != SD_STATE_SUSPENDED) && 14146 (un->un_state != SD_STATE_PM_CHANGING)) { 14147 New_state(un, SD_STATE_NORMAL); 14148 } 14149 14150 xp = SD_GET_XBUF(bp); 14151 ASSERT(xp != NULL); 14152 14153 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14154 /* 14155 * Allocate the scsi_pkt if we need one, or attach DMA 14156 * resources if we have a scsi_pkt that needs them. The 14157 * latter should only occur for commands that are being 14158 * retried. 14159 */ 14160 if ((xp->xb_pktp == NULL) || 14161 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14162 #else 14163 if (xp->xb_pktp == NULL) { 14164 #endif 14165 /* 14166 * There is no scsi_pkt allocated for this buf. Call 14167 * the initpkt function to allocate & init one. 14168 * 14169 * The scsi_init_pkt runout callback functionality is 14170 * implemented as follows: 14171 * 14172 * 1) The initpkt function always calls 14173 * scsi_init_pkt(9F) with sdrunout specified as the 14174 * callback routine. 14175 * 2) A successful packet allocation is initialized and 14176 * the I/O is transported. 14177 * 3) The I/O associated with an allocation resource 14178 * failure is left on its queue to be retried via 14179 * runout or the next I/O. 14180 * 4) The I/O associated with a DMA error is removed 14181 * from the queue and failed with EIO. Processing of 14182 * the transport queues is also halted to be 14183 * restarted via runout or the next I/O. 14184 * 5) The I/O associated with a CDB size or packet 14185 * size error is removed from the queue and failed 14186 * with EIO. Processing of the transport queues is 14187 * continued. 14188 * 14189 * Note: there is no interface for canceling a runout 14190 * callback. To prevent the driver from detaching or 14191 * suspending while a runout is pending the driver 14192 * state is set to SD_STATE_RWAIT 14193 * 14194 * Note: using the scsi_init_pkt callback facility can 14195 * result in an I/O request persisting at the head of 14196 * the list which cannot be satisfied even after 14197 * multiple retries. In the future the driver may 14198 * implement some kind of maximum runout count before 14199 * failing an I/O. 14200 * 14201 * Note: the use of funcp below may seem superfluous, 14202 * but it helps warlock figure out the correct 14203 * initpkt function calls (see [s]sd.wlcmd). 14204 */ 14205 struct scsi_pkt *pktp; 14206 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14207 14208 ASSERT(bp != un->un_rqs_bp); 14209 14210 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14211 switch ((*funcp)(bp, &pktp)) { 14212 case SD_PKT_ALLOC_SUCCESS: 14213 xp->xb_pktp = pktp; 14214 SD_TRACE(SD_LOG_IO_CORE, un, 14215 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14216 pktp); 14217 goto got_pkt; 14218 14219 case SD_PKT_ALLOC_FAILURE: 14220 /* 14221 * Temporary (hopefully) resource depletion. 14222 * Since retries and RQS commands always have a 14223 * scsi_pkt allocated, these cases should never 14224 * get here. So the only cases this needs to 14225 * handle is a bp from the waitq (which we put 14226 * back onto the waitq for sdrunout), or a bp 14227 * sent as an immed_bp (which we just fail). 14228 */ 14229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14230 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14231 14232 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14233 14234 if (bp == immed_bp) { 14235 /* 14236 * If SD_XB_DMA_FREED is clear, then 14237 * this is a failure to allocate a 14238 * scsi_pkt, and we must fail the 14239 * command. 14240 */ 14241 if ((xp->xb_pkt_flags & 14242 SD_XB_DMA_FREED) == 0) { 14243 break; 14244 } 14245 14246 /* 14247 * If this immediate command is NOT our 14248 * un_retry_bp, then we must fail it. 14249 */ 14250 if (bp != un->un_retry_bp) { 14251 break; 14252 } 14253 14254 /* 14255 * We get here if this cmd is our 14256 * un_retry_bp that was DMAFREED, but 14257 * scsi_init_pkt() failed to reallocate 14258 * DMA resources when we attempted to 14259 * retry it. This can happen when an 14260 * mpxio failover is in progress, but 14261 * we don't want to just fail the 14262 * command in this case. 14263 * 14264 * Use timeout(9F) to restart it after 14265 * a 100ms delay. We don't want to 14266 * let sdrunout() restart it, because 14267 * sdrunout() is just supposed to start 14268 * commands that are sitting on the 14269 * wait queue. The un_retry_bp stays 14270 * set until the command completes, but 14271 * sdrunout can be called many times 14272 * before that happens. Since sdrunout 14273 * cannot tell if the un_retry_bp is 14274 * already in the transport, it could 14275 * end up calling scsi_transport() for 14276 * the un_retry_bp multiple times. 14277 * 14278 * Also: don't schedule the callback 14279 * if some other callback is already 14280 * pending. 14281 */ 14282 if (un->un_retry_statp == NULL) { 14283 /* 14284 * restore the kstat pointer to 14285 * keep kstat counts coherent 14286 * when we do retry the command. 14287 */ 14288 un->un_retry_statp = 14289 saved_statp; 14290 } 14291 14292 if ((un->un_startstop_timeid == NULL) && 14293 (un->un_retry_timeid == NULL) && 14294 (un->un_direct_priority_timeid == 14295 NULL)) { 14296 14297 un->un_retry_timeid = 14298 timeout( 14299 sd_start_retry_command, 14300 un, SD_RESTART_TIMEOUT); 14301 } 14302 goto exit; 14303 } 14304 14305 #else 14306 if (bp == immed_bp) { 14307 break; /* Just fail the command */ 14308 } 14309 #endif 14310 14311 /* Add the buf back to the head of the waitq */ 14312 bp->av_forw = un->un_waitq_headp; 14313 un->un_waitq_headp = bp; 14314 if (un->un_waitq_tailp == NULL) { 14315 un->un_waitq_tailp = bp; 14316 } 14317 goto exit; 14318 14319 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14320 /* 14321 * HBA DMA resource failure. Fail the command 14322 * and continue processing of the queues. 14323 */ 14324 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14325 "sd_start_cmds: " 14326 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14327 break; 14328 14329 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14330 /* 14331 * Note:x86: Partial DMA mapping not supported 14332 * for USCSI commands, and all the needed DMA 14333 * resources were not allocated. 14334 */ 14335 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14336 "sd_start_cmds: " 14337 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14338 break; 14339 14340 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14341 /* 14342 * Note:x86: Request cannot fit into CDB based 14343 * on lba and len. 14344 */ 14345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14346 "sd_start_cmds: " 14347 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14348 break; 14349 14350 default: 14351 /* Should NEVER get here! */ 14352 panic("scsi_initpkt error"); 14353 /*NOTREACHED*/ 14354 } 14355 14356 /* 14357 * Fatal error in allocating a scsi_pkt for this buf. 14358 * Update kstats & return the buf with an error code. 14359 * We must use sd_return_failed_command_no_restart() to 14360 * avoid a recursive call back into sd_start_cmds(). 14361 * However this also means that we must keep processing 14362 * the waitq here in order to avoid stalling. 14363 */ 14364 if (statp == kstat_waitq_to_runq) { 14365 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14366 } 14367 sd_return_failed_command_no_restart(un, bp, EIO); 14368 if (bp == immed_bp) { 14369 /* immed_bp is gone by now, so clear this */ 14370 immed_bp = NULL; 14371 } 14372 continue; 14373 } 14374 got_pkt: 14375 if (bp == immed_bp) { 14376 /* goto the head of the class.... */ 14377 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14378 } 14379 14380 un->un_ncmds_in_transport++; 14381 SD_UPDATE_KSTATS(un, statp, bp); 14382 14383 /* 14384 * Call scsi_transport() to send the command to the target. 14385 * According to SCSA architecture, we must drop the mutex here 14386 * before calling scsi_transport() in order to avoid deadlock. 14387 * Note that the scsi_pkt's completion routine can be executed 14388 * (from interrupt context) even before the call to 14389 * scsi_transport() returns. 14390 */ 14391 SD_TRACE(SD_LOG_IO_CORE, un, 14392 "sd_start_cmds: calling scsi_transport()\n"); 14393 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14394 14395 mutex_exit(SD_MUTEX(un)); 14396 rval = scsi_transport(xp->xb_pktp); 14397 mutex_enter(SD_MUTEX(un)); 14398 14399 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14400 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14401 14402 switch (rval) { 14403 case TRAN_ACCEPT: 14404 /* Clear this with every pkt accepted by the HBA */ 14405 un->un_tran_fatal_count = 0; 14406 break; /* Success; try the next cmd (if any) */ 14407 14408 case TRAN_BUSY: 14409 un->un_ncmds_in_transport--; 14410 ASSERT(un->un_ncmds_in_transport >= 0); 14411 14412 /* 14413 * Don't retry request sense, the sense data 14414 * is lost when another request is sent. 14415 * Free up the rqs buf and retry 14416 * the original failed cmd. Update kstat. 14417 */ 14418 if (bp == un->un_rqs_bp) { 14419 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14420 bp = sd_mark_rqs_idle(un, xp); 14421 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14422 NULL, NULL, EIO, un->un_busy_timeout / 500, 14423 kstat_waitq_enter); 14424 goto exit; 14425 } 14426 14427 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14428 /* 14429 * Free the DMA resources for the scsi_pkt. This will 14430 * allow mpxio to select another path the next time 14431 * we call scsi_transport() with this scsi_pkt. 14432 * See sdintr() for the rationalization behind this. 14433 */ 14434 if ((un->un_f_is_fibre == TRUE) && 14435 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14436 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14437 scsi_dmafree(xp->xb_pktp); 14438 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14439 } 14440 #endif 14441 14442 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14443 /* 14444 * Commands that are SD_PATH_DIRECT_PRIORITY 14445 * are for error recovery situations. These do 14446 * not use the normal command waitq, so if they 14447 * get a TRAN_BUSY we cannot put them back onto 14448 * the waitq for later retry. One possible 14449 * problem is that there could already be some 14450 * other command on un_retry_bp that is waiting 14451 * for this one to complete, so we would be 14452 * deadlocked if we put this command back onto 14453 * the waitq for later retry (since un_retry_bp 14454 * must complete before the driver gets back to 14455 * commands on the waitq). 14456 * 14457 * To avoid deadlock we must schedule a callback 14458 * that will restart this command after a set 14459 * interval. This should keep retrying for as 14460 * long as the underlying transport keeps 14461 * returning TRAN_BUSY (just like for other 14462 * commands). Use the same timeout interval as 14463 * for the ordinary TRAN_BUSY retry. 14464 */ 14465 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14466 "sd_start_cmds: scsi_transport() returned " 14467 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14468 14469 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14470 un->un_direct_priority_timeid = 14471 timeout(sd_start_direct_priority_command, 14472 bp, un->un_busy_timeout / 500); 14473 14474 goto exit; 14475 } 14476 14477 /* 14478 * For TRAN_BUSY, we want to reduce the throttle value, 14479 * unless we are retrying a command. 14480 */ 14481 if (bp != un->un_retry_bp) { 14482 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14483 } 14484 14485 /* 14486 * Set up the bp to be tried again 10 ms later. 14487 * Note:x86: Is there a timeout value in the sd_lun 14488 * for this condition? 14489 */ 14490 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14491 kstat_runq_back_to_waitq); 14492 goto exit; 14493 14494 case TRAN_FATAL_ERROR: 14495 un->un_tran_fatal_count++; 14496 /* FALLTHRU */ 14497 14498 case TRAN_BADPKT: 14499 default: 14500 un->un_ncmds_in_transport--; 14501 ASSERT(un->un_ncmds_in_transport >= 0); 14502 14503 /* 14504 * If this is our REQUEST SENSE command with a 14505 * transport error, we must get back the pointers 14506 * to the original buf, and mark the REQUEST 14507 * SENSE command as "available". 14508 */ 14509 if (bp == un->un_rqs_bp) { 14510 bp = sd_mark_rqs_idle(un, xp); 14511 xp = SD_GET_XBUF(bp); 14512 } else { 14513 /* 14514 * Legacy behavior: do not update transport 14515 * error count for request sense commands. 14516 */ 14517 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14518 } 14519 14520 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14521 sd_print_transport_rejected_message(un, xp, rval); 14522 14523 /* 14524 * This command will be terminated by SD driver due 14525 * to a fatal transport error. We should post 14526 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14527 * of "fail" for any command to indicate this 14528 * situation. 14529 */ 14530 if (xp->xb_ena > 0) { 14531 ASSERT(un->un_fm_private != NULL); 14532 sfip = un->un_fm_private; 14533 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14534 sd_ssc_extract_info(&sfip->fm_ssc, un, 14535 xp->xb_pktp, bp, xp); 14536 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14537 } 14538 14539 /* 14540 * We must use sd_return_failed_command_no_restart() to 14541 * avoid a recursive call back into sd_start_cmds(). 14542 * However this also means that we must keep processing 14543 * the waitq here in order to avoid stalling. 14544 */ 14545 sd_return_failed_command_no_restart(un, bp, EIO); 14546 14547 /* 14548 * Notify any threads waiting in sd_ddi_suspend() that 14549 * a command completion has occurred. 14550 */ 14551 if (un->un_state == SD_STATE_SUSPENDED) { 14552 cv_broadcast(&un->un_disk_busy_cv); 14553 } 14554 14555 if (bp == immed_bp) { 14556 /* immed_bp is gone by now, so clear this */ 14557 immed_bp = NULL; 14558 } 14559 break; 14560 } 14561 14562 } while (immed_bp == NULL); 14563 14564 exit: 14565 ASSERT(mutex_owned(SD_MUTEX(un))); 14566 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14567 } 14568 14569 14570 /* 14571 * Function: sd_return_command 14572 * 14573 * Description: Returns a command to its originator (with or without an 14574 * error). Also starts commands waiting to be transported 14575 * to the target. 14576 * 14577 * Context: May be called from interrupt, kernel, or timeout context 14578 */ 14579 14580 static void 14581 sd_return_command(struct sd_lun *un, struct buf *bp) 14582 { 14583 struct sd_xbuf *xp; 14584 struct scsi_pkt *pktp; 14585 struct sd_fm_internal *sfip; 14586 14587 ASSERT(bp != NULL); 14588 ASSERT(un != NULL); 14589 ASSERT(mutex_owned(SD_MUTEX(un))); 14590 ASSERT(bp != un->un_rqs_bp); 14591 xp = SD_GET_XBUF(bp); 14592 ASSERT(xp != NULL); 14593 14594 pktp = SD_GET_PKTP(bp); 14595 sfip = (struct sd_fm_internal *)un->un_fm_private; 14596 ASSERT(sfip != NULL); 14597 14598 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14599 14600 /* 14601 * Note: check for the "sdrestart failed" case. 14602 */ 14603 if ((un->un_partial_dma_supported == 1) && 14604 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14605 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14606 (xp->xb_pktp->pkt_resid == 0)) { 14607 14608 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14609 /* 14610 * Successfully set up next portion of cmd 14611 * transfer, try sending it 14612 */ 14613 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14614 NULL, NULL, 0, (clock_t)0, NULL); 14615 sd_start_cmds(un, NULL); 14616 return; /* Note:x86: need a return here? */ 14617 } 14618 } 14619 14620 /* 14621 * If this is the failfast bp, clear it from un_failfast_bp. This 14622 * can happen if upon being re-tried the failfast bp either 14623 * succeeded or encountered another error (possibly even a different 14624 * error than the one that precipitated the failfast state, but in 14625 * that case it would have had to exhaust retries as well). Regardless, 14626 * this should not occur whenever the instance is in the active 14627 * failfast state. 14628 */ 14629 if (bp == un->un_failfast_bp) { 14630 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14631 un->un_failfast_bp = NULL; 14632 } 14633 14634 /* 14635 * Clear the failfast state upon successful completion of ANY cmd. 14636 */ 14637 if (bp->b_error == 0) { 14638 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14639 /* 14640 * If this is a successful command, but used to be retried, 14641 * we will take it as a recovered command and post an 14642 * ereport with driver-assessment of "recovered". 14643 */ 14644 if (xp->xb_ena > 0) { 14645 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14646 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14647 } 14648 } else { 14649 /* 14650 * If this is a failed non-USCSI command we will post an 14651 * ereport with driver-assessment set accordingly("fail" or 14652 * "fatal"). 14653 */ 14654 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 14655 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14656 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14657 } 14658 } 14659 14660 /* 14661 * This is used if the command was retried one or more times. Show that 14662 * we are done with it, and allow processing of the waitq to resume. 14663 */ 14664 if (bp == un->un_retry_bp) { 14665 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14666 "sd_return_command: un:0x%p: " 14667 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14668 un->un_retry_bp = NULL; 14669 un->un_retry_statp = NULL; 14670 } 14671 14672 SD_UPDATE_RDWR_STATS(un, bp); 14673 SD_UPDATE_PARTITION_STATS(un, bp); 14674 14675 switch (un->un_state) { 14676 case SD_STATE_SUSPENDED: 14677 /* 14678 * Notify any threads waiting in sd_ddi_suspend() that 14679 * a command completion has occurred. 14680 */ 14681 cv_broadcast(&un->un_disk_busy_cv); 14682 break; 14683 default: 14684 sd_start_cmds(un, NULL); 14685 break; 14686 } 14687 14688 /* Return this command up the iodone chain to its originator. */ 14689 mutex_exit(SD_MUTEX(un)); 14690 14691 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14692 xp->xb_pktp = NULL; 14693 14694 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14695 14696 ASSERT(!mutex_owned(SD_MUTEX(un))); 14697 mutex_enter(SD_MUTEX(un)); 14698 14699 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14700 } 14701 14702 14703 /* 14704 * Function: sd_return_failed_command 14705 * 14706 * Description: Command completion when an error occurred. 14707 * 14708 * Context: May be called from interrupt context 14709 */ 14710 14711 static void 14712 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14713 { 14714 ASSERT(bp != NULL); 14715 ASSERT(un != NULL); 14716 ASSERT(mutex_owned(SD_MUTEX(un))); 14717 14718 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14719 "sd_return_failed_command: entry\n"); 14720 14721 /* 14722 * b_resid could already be nonzero due to a partial data 14723 * transfer, so do not change it here. 14724 */ 14725 SD_BIOERROR(bp, errcode); 14726 14727 sd_return_command(un, bp); 14728 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14729 "sd_return_failed_command: exit\n"); 14730 } 14731 14732 14733 /* 14734 * Function: sd_return_failed_command_no_restart 14735 * 14736 * Description: Same as sd_return_failed_command, but ensures that no 14737 * call back into sd_start_cmds will be issued. 14738 * 14739 * Context: May be called from interrupt context 14740 */ 14741 14742 static void 14743 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14744 int errcode) 14745 { 14746 struct sd_xbuf *xp; 14747 14748 ASSERT(bp != NULL); 14749 ASSERT(un != NULL); 14750 ASSERT(mutex_owned(SD_MUTEX(un))); 14751 xp = SD_GET_XBUF(bp); 14752 ASSERT(xp != NULL); 14753 ASSERT(errcode != 0); 14754 14755 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14756 "sd_return_failed_command_no_restart: entry\n"); 14757 14758 /* 14759 * b_resid could already be nonzero due to a partial data 14760 * transfer, so do not change it here. 14761 */ 14762 SD_BIOERROR(bp, errcode); 14763 14764 /* 14765 * If this is the failfast bp, clear it. This can happen if the 14766 * failfast bp encounterd a fatal error when we attempted to 14767 * re-try it (such as a scsi_transport(9F) failure). However 14768 * we should NOT be in an active failfast state if the failfast 14769 * bp is not NULL. 14770 */ 14771 if (bp == un->un_failfast_bp) { 14772 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14773 un->un_failfast_bp = NULL; 14774 } 14775 14776 if (bp == un->un_retry_bp) { 14777 /* 14778 * This command was retried one or more times. Show that we are 14779 * done with it, and allow processing of the waitq to resume. 14780 */ 14781 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14782 "sd_return_failed_command_no_restart: " 14783 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14784 un->un_retry_bp = NULL; 14785 un->un_retry_statp = NULL; 14786 } 14787 14788 SD_UPDATE_RDWR_STATS(un, bp); 14789 SD_UPDATE_PARTITION_STATS(un, bp); 14790 14791 mutex_exit(SD_MUTEX(un)); 14792 14793 if (xp->xb_pktp != NULL) { 14794 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14795 xp->xb_pktp = NULL; 14796 } 14797 14798 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14799 14800 mutex_enter(SD_MUTEX(un)); 14801 14802 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14803 "sd_return_failed_command_no_restart: exit\n"); 14804 } 14805 14806 14807 /* 14808 * Function: sd_retry_command 14809 * 14810 * Description: queue up a command for retry, or (optionally) fail it 14811 * if retry counts are exhausted. 14812 * 14813 * Arguments: un - Pointer to the sd_lun struct for the target. 14814 * 14815 * bp - Pointer to the buf for the command to be retried. 14816 * 14817 * retry_check_flag - Flag to see which (if any) of the retry 14818 * counts should be decremented/checked. If the indicated 14819 * retry count is exhausted, then the command will not be 14820 * retried; it will be failed instead. This should use a 14821 * value equal to one of the following: 14822 * 14823 * SD_RETRIES_NOCHECK 14824 * SD_RESD_RETRIES_STANDARD 14825 * SD_RETRIES_VICTIM 14826 * 14827 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14828 * if the check should be made to see of FLAG_ISOLATE is set 14829 * in the pkt. If FLAG_ISOLATE is set, then the command is 14830 * not retried, it is simply failed. 14831 * 14832 * user_funcp - Ptr to function to call before dispatching the 14833 * command. May be NULL if no action needs to be performed. 14834 * (Primarily intended for printing messages.) 14835 * 14836 * user_arg - Optional argument to be passed along to 14837 * the user_funcp call. 14838 * 14839 * failure_code - errno return code to set in the bp if the 14840 * command is going to be failed. 14841 * 14842 * retry_delay - Retry delay interval in (clock_t) units. May 14843 * be zero which indicates that the retry should be retried 14844 * immediately (ie, without an intervening delay). 14845 * 14846 * statp - Ptr to kstat function to be updated if the command 14847 * is queued for a delayed retry. May be NULL if no kstat 14848 * update is desired. 14849 * 14850 * Context: May be called from interrupt context. 14851 */ 14852 14853 static void 14854 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14855 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14856 code), void *user_arg, int failure_code, clock_t retry_delay, 14857 void (*statp)(kstat_io_t *)) 14858 { 14859 struct sd_xbuf *xp; 14860 struct scsi_pkt *pktp; 14861 struct sd_fm_internal *sfip; 14862 14863 ASSERT(un != NULL); 14864 ASSERT(mutex_owned(SD_MUTEX(un))); 14865 ASSERT(bp != NULL); 14866 xp = SD_GET_XBUF(bp); 14867 ASSERT(xp != NULL); 14868 pktp = SD_GET_PKTP(bp); 14869 ASSERT(pktp != NULL); 14870 14871 sfip = (struct sd_fm_internal *)un->un_fm_private; 14872 ASSERT(sfip != NULL); 14873 14874 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14875 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14876 14877 /* 14878 * If we are syncing or dumping, fail the command to avoid 14879 * recursively calling back into scsi_transport(). 14880 */ 14881 if (ddi_in_panic()) { 14882 goto fail_command_no_log; 14883 } 14884 14885 /* 14886 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14887 * log an error and fail the command. 14888 */ 14889 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14890 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14891 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14892 sd_dump_memory(un, SD_LOG_IO, "CDB", 14893 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14894 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14895 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14896 goto fail_command; 14897 } 14898 14899 /* 14900 * If we are suspended, then put the command onto head of the 14901 * wait queue since we don't want to start more commands, and 14902 * clear the un_retry_bp. Next time when we are resumed, will 14903 * handle the command in the wait queue. 14904 */ 14905 switch (un->un_state) { 14906 case SD_STATE_SUSPENDED: 14907 case SD_STATE_DUMPING: 14908 bp->av_forw = un->un_waitq_headp; 14909 un->un_waitq_headp = bp; 14910 if (un->un_waitq_tailp == NULL) { 14911 un->un_waitq_tailp = bp; 14912 } 14913 if (bp == un->un_retry_bp) { 14914 un->un_retry_bp = NULL; 14915 un->un_retry_statp = NULL; 14916 } 14917 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14918 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14919 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14920 return; 14921 default: 14922 break; 14923 } 14924 14925 /* 14926 * If the caller wants us to check FLAG_ISOLATE, then see if that 14927 * is set; if it is then we do not want to retry the command. 14928 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14929 */ 14930 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14931 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14932 goto fail_command; 14933 } 14934 } 14935 14936 14937 /* 14938 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14939 * command timeout or a selection timeout has occurred. This means 14940 * that we were unable to establish an kind of communication with 14941 * the target, and subsequent retries and/or commands are likely 14942 * to encounter similar results and take a long time to complete. 14943 * 14944 * If this is a failfast error condition, we need to update the 14945 * failfast state, even if this bp does not have B_FAILFAST set. 14946 */ 14947 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14948 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14949 ASSERT(un->un_failfast_bp == NULL); 14950 /* 14951 * If we are already in the active failfast state, and 14952 * another failfast error condition has been detected, 14953 * then fail this command if it has B_FAILFAST set. 14954 * If B_FAILFAST is clear, then maintain the legacy 14955 * behavior of retrying heroically, even tho this will 14956 * take a lot more time to fail the command. 14957 */ 14958 if (bp->b_flags & B_FAILFAST) { 14959 goto fail_command; 14960 } 14961 } else { 14962 /* 14963 * We're not in the active failfast state, but we 14964 * have a failfast error condition, so we must begin 14965 * transition to the next state. We do this regardless 14966 * of whether or not this bp has B_FAILFAST set. 14967 */ 14968 if (un->un_failfast_bp == NULL) { 14969 /* 14970 * This is the first bp to meet a failfast 14971 * condition so save it on un_failfast_bp & 14972 * do normal retry processing. Do not enter 14973 * active failfast state yet. This marks 14974 * entry into the "failfast pending" state. 14975 */ 14976 un->un_failfast_bp = bp; 14977 14978 } else if (un->un_failfast_bp == bp) { 14979 /* 14980 * This is the second time *this* bp has 14981 * encountered a failfast error condition, 14982 * so enter active failfast state & flush 14983 * queues as appropriate. 14984 */ 14985 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14986 un->un_failfast_bp = NULL; 14987 sd_failfast_flushq(un); 14988 14989 /* 14990 * Fail this bp now if B_FAILFAST set; 14991 * otherwise continue with retries. (It would 14992 * be pretty ironic if this bp succeeded on a 14993 * subsequent retry after we just flushed all 14994 * the queues). 14995 */ 14996 if (bp->b_flags & B_FAILFAST) { 14997 goto fail_command; 14998 } 14999 15000 #if !defined(lint) && !defined(__lint) 15001 } else { 15002 /* 15003 * If neither of the preceeding conditionals 15004 * was true, it means that there is some 15005 * *other* bp that has met an inital failfast 15006 * condition and is currently either being 15007 * retried or is waiting to be retried. In 15008 * that case we should perform normal retry 15009 * processing on *this* bp, since there is a 15010 * chance that the current failfast condition 15011 * is transient and recoverable. If that does 15012 * not turn out to be the case, then retries 15013 * will be cleared when the wait queue is 15014 * flushed anyway. 15015 */ 15016 #endif 15017 } 15018 } 15019 } else { 15020 /* 15021 * SD_RETRIES_FAILFAST is clear, which indicates that we 15022 * likely were able to at least establish some level of 15023 * communication with the target and subsequent commands 15024 * and/or retries are likely to get through to the target, 15025 * In this case we want to be aggressive about clearing 15026 * the failfast state. Note that this does not affect 15027 * the "failfast pending" condition. 15028 */ 15029 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15030 } 15031 15032 15033 /* 15034 * Check the specified retry count to see if we can still do 15035 * any retries with this pkt before we should fail it. 15036 */ 15037 switch (retry_check_flag & SD_RETRIES_MASK) { 15038 case SD_RETRIES_VICTIM: 15039 /* 15040 * Check the victim retry count. If exhausted, then fall 15041 * thru & check against the standard retry count. 15042 */ 15043 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15044 /* Increment count & proceed with the retry */ 15045 xp->xb_victim_retry_count++; 15046 break; 15047 } 15048 /* Victim retries exhausted, fall back to std. retries... */ 15049 /* FALLTHRU */ 15050 15051 case SD_RETRIES_STANDARD: 15052 if (xp->xb_retry_count >= un->un_retry_count) { 15053 /* Retries exhausted, fail the command */ 15054 SD_TRACE(SD_LOG_IO_CORE, un, 15055 "sd_retry_command: retries exhausted!\n"); 15056 /* 15057 * update b_resid for failed SCMD_READ & SCMD_WRITE 15058 * commands with nonzero pkt_resid. 15059 */ 15060 if ((pktp->pkt_reason == CMD_CMPLT) && 15061 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15062 (pktp->pkt_resid != 0)) { 15063 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15064 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15065 SD_UPDATE_B_RESID(bp, pktp); 15066 } 15067 } 15068 goto fail_command; 15069 } 15070 xp->xb_retry_count++; 15071 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15072 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15073 break; 15074 15075 case SD_RETRIES_UA: 15076 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15077 /* Retries exhausted, fail the command */ 15078 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15079 "Unit Attention retries exhausted. " 15080 "Check the target.\n"); 15081 goto fail_command; 15082 } 15083 xp->xb_ua_retry_count++; 15084 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15085 "sd_retry_command: retry count:%d\n", 15086 xp->xb_ua_retry_count); 15087 break; 15088 15089 case SD_RETRIES_BUSY: 15090 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15091 /* Retries exhausted, fail the command */ 15092 SD_TRACE(SD_LOG_IO_CORE, un, 15093 "sd_retry_command: retries exhausted!\n"); 15094 goto fail_command; 15095 } 15096 xp->xb_retry_count++; 15097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15098 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15099 break; 15100 15101 case SD_RETRIES_NOCHECK: 15102 default: 15103 /* No retry count to check. Just proceed with the retry */ 15104 break; 15105 } 15106 15107 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15108 15109 /* 15110 * If this is a non-USCSI command being retried 15111 * during execution last time, we should post an ereport with 15112 * driver-assessment of the value "retry". 15113 * For partial DMA, request sense and STATUS_QFULL, there are no 15114 * hardware errors, we bypass ereport posting. 15115 */ 15116 if (failure_code != 0) { 15117 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15118 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15119 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15120 } 15121 } 15122 15123 /* 15124 * If we were given a zero timeout, we must attempt to retry the 15125 * command immediately (ie, without a delay). 15126 */ 15127 if (retry_delay == 0) { 15128 /* 15129 * Check some limiting conditions to see if we can actually 15130 * do the immediate retry. If we cannot, then we must 15131 * fall back to queueing up a delayed retry. 15132 */ 15133 if (un->un_ncmds_in_transport >= un->un_throttle) { 15134 /* 15135 * We are at the throttle limit for the target, 15136 * fall back to delayed retry. 15137 */ 15138 retry_delay = un->un_busy_timeout; 15139 statp = kstat_waitq_enter; 15140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15141 "sd_retry_command: immed. retry hit " 15142 "throttle!\n"); 15143 } else { 15144 /* 15145 * We're clear to proceed with the immediate retry. 15146 * First call the user-provided function (if any) 15147 */ 15148 if (user_funcp != NULL) { 15149 (*user_funcp)(un, bp, user_arg, 15150 SD_IMMEDIATE_RETRY_ISSUED); 15151 #ifdef __lock_lint 15152 sd_print_incomplete_msg(un, bp, user_arg, 15153 SD_IMMEDIATE_RETRY_ISSUED); 15154 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15155 SD_IMMEDIATE_RETRY_ISSUED); 15156 sd_print_sense_failed_msg(un, bp, user_arg, 15157 SD_IMMEDIATE_RETRY_ISSUED); 15158 #endif 15159 } 15160 15161 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15162 "sd_retry_command: issuing immediate retry\n"); 15163 15164 /* 15165 * Call sd_start_cmds() to transport the command to 15166 * the target. 15167 */ 15168 sd_start_cmds(un, bp); 15169 15170 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15171 "sd_retry_command exit\n"); 15172 return; 15173 } 15174 } 15175 15176 /* 15177 * Set up to retry the command after a delay. 15178 * First call the user-provided function (if any) 15179 */ 15180 if (user_funcp != NULL) { 15181 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15182 } 15183 15184 sd_set_retry_bp(un, bp, retry_delay, statp); 15185 15186 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15187 return; 15188 15189 fail_command: 15190 15191 if (user_funcp != NULL) { 15192 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15193 } 15194 15195 fail_command_no_log: 15196 15197 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15198 "sd_retry_command: returning failed command\n"); 15199 15200 sd_return_failed_command(un, bp, failure_code); 15201 15202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15203 } 15204 15205 15206 /* 15207 * Function: sd_set_retry_bp 15208 * 15209 * Description: Set up the given bp for retry. 15210 * 15211 * Arguments: un - ptr to associated softstate 15212 * bp - ptr to buf(9S) for the command 15213 * retry_delay - time interval before issuing retry (may be 0) 15214 * statp - optional pointer to kstat function 15215 * 15216 * Context: May be called under interrupt context 15217 */ 15218 15219 static void 15220 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15221 void (*statp)(kstat_io_t *)) 15222 { 15223 ASSERT(un != NULL); 15224 ASSERT(mutex_owned(SD_MUTEX(un))); 15225 ASSERT(bp != NULL); 15226 15227 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15228 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15229 15230 /* 15231 * Indicate that the command is being retried. This will not allow any 15232 * other commands on the wait queue to be transported to the target 15233 * until this command has been completed (success or failure). The 15234 * "retry command" is not transported to the target until the given 15235 * time delay expires, unless the user specified a 0 retry_delay. 15236 * 15237 * Note: the timeout(9F) callback routine is what actually calls 15238 * sd_start_cmds() to transport the command, with the exception of a 15239 * zero retry_delay. The only current implementor of a zero retry delay 15240 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15241 */ 15242 if (un->un_retry_bp == NULL) { 15243 ASSERT(un->un_retry_statp == NULL); 15244 un->un_retry_bp = bp; 15245 15246 /* 15247 * If the user has not specified a delay the command should 15248 * be queued and no timeout should be scheduled. 15249 */ 15250 if (retry_delay == 0) { 15251 /* 15252 * Save the kstat pointer that will be used in the 15253 * call to SD_UPDATE_KSTATS() below, so that 15254 * sd_start_cmds() can correctly decrement the waitq 15255 * count when it is time to transport this command. 15256 */ 15257 un->un_retry_statp = statp; 15258 goto done; 15259 } 15260 } 15261 15262 if (un->un_retry_bp == bp) { 15263 /* 15264 * Save the kstat pointer that will be used in the call to 15265 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15266 * correctly decrement the waitq count when it is time to 15267 * transport this command. 15268 */ 15269 un->un_retry_statp = statp; 15270 15271 /* 15272 * Schedule a timeout if: 15273 * 1) The user has specified a delay. 15274 * 2) There is not a START_STOP_UNIT callback pending. 15275 * 15276 * If no delay has been specified, then it is up to the caller 15277 * to ensure that IO processing continues without stalling. 15278 * Effectively, this means that the caller will issue the 15279 * required call to sd_start_cmds(). The START_STOP_UNIT 15280 * callback does this after the START STOP UNIT command has 15281 * completed. In either of these cases we should not schedule 15282 * a timeout callback here. Also don't schedule the timeout if 15283 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15284 */ 15285 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15286 (un->un_direct_priority_timeid == NULL)) { 15287 un->un_retry_timeid = 15288 timeout(sd_start_retry_command, un, retry_delay); 15289 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15290 "sd_set_retry_bp: setting timeout: un: 0x%p" 15291 " bp:0x%p un_retry_timeid:0x%p\n", 15292 un, bp, un->un_retry_timeid); 15293 } 15294 } else { 15295 /* 15296 * We only get in here if there is already another command 15297 * waiting to be retried. In this case, we just put the 15298 * given command onto the wait queue, so it can be transported 15299 * after the current retry command has completed. 15300 * 15301 * Also we have to make sure that if the command at the head 15302 * of the wait queue is the un_failfast_bp, that we do not 15303 * put ahead of it any other commands that are to be retried. 15304 */ 15305 if ((un->un_failfast_bp != NULL) && 15306 (un->un_failfast_bp == un->un_waitq_headp)) { 15307 /* 15308 * Enqueue this command AFTER the first command on 15309 * the wait queue (which is also un_failfast_bp). 15310 */ 15311 bp->av_forw = un->un_waitq_headp->av_forw; 15312 un->un_waitq_headp->av_forw = bp; 15313 if (un->un_waitq_headp == un->un_waitq_tailp) { 15314 un->un_waitq_tailp = bp; 15315 } 15316 } else { 15317 /* Enqueue this command at the head of the waitq. */ 15318 bp->av_forw = un->un_waitq_headp; 15319 un->un_waitq_headp = bp; 15320 if (un->un_waitq_tailp == NULL) { 15321 un->un_waitq_tailp = bp; 15322 } 15323 } 15324 15325 if (statp == NULL) { 15326 statp = kstat_waitq_enter; 15327 } 15328 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15329 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15330 } 15331 15332 done: 15333 if (statp != NULL) { 15334 SD_UPDATE_KSTATS(un, statp, bp); 15335 } 15336 15337 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15338 "sd_set_retry_bp: exit un:0x%p\n", un); 15339 } 15340 15341 15342 /* 15343 * Function: sd_start_retry_command 15344 * 15345 * Description: Start the command that has been waiting on the target's 15346 * retry queue. Called from timeout(9F) context after the 15347 * retry delay interval has expired. 15348 * 15349 * Arguments: arg - pointer to associated softstate for the device. 15350 * 15351 * Context: timeout(9F) thread context. May not sleep. 15352 */ 15353 15354 static void 15355 sd_start_retry_command(void *arg) 15356 { 15357 struct sd_lun *un = arg; 15358 15359 ASSERT(un != NULL); 15360 ASSERT(!mutex_owned(SD_MUTEX(un))); 15361 15362 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15363 "sd_start_retry_command: entry\n"); 15364 15365 mutex_enter(SD_MUTEX(un)); 15366 15367 un->un_retry_timeid = NULL; 15368 15369 if (un->un_retry_bp != NULL) { 15370 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15371 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15372 un, un->un_retry_bp); 15373 sd_start_cmds(un, un->un_retry_bp); 15374 } 15375 15376 mutex_exit(SD_MUTEX(un)); 15377 15378 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15379 "sd_start_retry_command: exit\n"); 15380 } 15381 15382 15383 /* 15384 * Function: sd_start_direct_priority_command 15385 * 15386 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15387 * received TRAN_BUSY when we called scsi_transport() to send it 15388 * to the underlying HBA. This function is called from timeout(9F) 15389 * context after the delay interval has expired. 15390 * 15391 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15392 * 15393 * Context: timeout(9F) thread context. May not sleep. 15394 */ 15395 15396 static void 15397 sd_start_direct_priority_command(void *arg) 15398 { 15399 struct buf *priority_bp = arg; 15400 struct sd_lun *un; 15401 15402 ASSERT(priority_bp != NULL); 15403 un = SD_GET_UN(priority_bp); 15404 ASSERT(un != NULL); 15405 ASSERT(!mutex_owned(SD_MUTEX(un))); 15406 15407 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15408 "sd_start_direct_priority_command: entry\n"); 15409 15410 mutex_enter(SD_MUTEX(un)); 15411 un->un_direct_priority_timeid = NULL; 15412 sd_start_cmds(un, priority_bp); 15413 mutex_exit(SD_MUTEX(un)); 15414 15415 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15416 "sd_start_direct_priority_command: exit\n"); 15417 } 15418 15419 15420 /* 15421 * Function: sd_send_request_sense_command 15422 * 15423 * Description: Sends a REQUEST SENSE command to the target 15424 * 15425 * Context: May be called from interrupt context. 15426 */ 15427 15428 static void 15429 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15430 struct scsi_pkt *pktp) 15431 { 15432 ASSERT(bp != NULL); 15433 ASSERT(un != NULL); 15434 ASSERT(mutex_owned(SD_MUTEX(un))); 15435 15436 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15437 "entry: buf:0x%p\n", bp); 15438 15439 /* 15440 * If we are syncing or dumping, then fail the command to avoid a 15441 * recursive callback into scsi_transport(). Also fail the command 15442 * if we are suspended (legacy behavior). 15443 */ 15444 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15445 (un->un_state == SD_STATE_DUMPING)) { 15446 sd_return_failed_command(un, bp, EIO); 15447 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15448 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15449 return; 15450 } 15451 15452 /* 15453 * Retry the failed command and don't issue the request sense if: 15454 * 1) the sense buf is busy 15455 * 2) we have 1 or more outstanding commands on the target 15456 * (the sense data will be cleared or invalidated any way) 15457 * 15458 * Note: There could be an issue with not checking a retry limit here, 15459 * the problem is determining which retry limit to check. 15460 */ 15461 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15462 /* Don't retry if the command is flagged as non-retryable */ 15463 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15464 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15465 NULL, NULL, 0, un->un_busy_timeout, 15466 kstat_waitq_enter); 15467 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15468 "sd_send_request_sense_command: " 15469 "at full throttle, retrying exit\n"); 15470 } else { 15471 sd_return_failed_command(un, bp, EIO); 15472 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15473 "sd_send_request_sense_command: " 15474 "at full throttle, non-retryable exit\n"); 15475 } 15476 return; 15477 } 15478 15479 sd_mark_rqs_busy(un, bp); 15480 sd_start_cmds(un, un->un_rqs_bp); 15481 15482 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15483 "sd_send_request_sense_command: exit\n"); 15484 } 15485 15486 15487 /* 15488 * Function: sd_mark_rqs_busy 15489 * 15490 * Description: Indicate that the request sense bp for this instance is 15491 * in use. 15492 * 15493 * Context: May be called under interrupt context 15494 */ 15495 15496 static void 15497 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15498 { 15499 struct sd_xbuf *sense_xp; 15500 15501 ASSERT(un != NULL); 15502 ASSERT(bp != NULL); 15503 ASSERT(mutex_owned(SD_MUTEX(un))); 15504 ASSERT(un->un_sense_isbusy == 0); 15505 15506 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15507 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15508 15509 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15510 ASSERT(sense_xp != NULL); 15511 15512 SD_INFO(SD_LOG_IO, un, 15513 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15514 15515 ASSERT(sense_xp->xb_pktp != NULL); 15516 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15517 == (FLAG_SENSING | FLAG_HEAD)); 15518 15519 un->un_sense_isbusy = 1; 15520 un->un_rqs_bp->b_resid = 0; 15521 sense_xp->xb_pktp->pkt_resid = 0; 15522 sense_xp->xb_pktp->pkt_reason = 0; 15523 15524 /* So we can get back the bp at interrupt time! */ 15525 sense_xp->xb_sense_bp = bp; 15526 15527 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15528 15529 /* 15530 * Mark this buf as awaiting sense data. (This is already set in 15531 * the pkt_flags for the RQS packet.) 15532 */ 15533 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15534 15535 /* Request sense down same path */ 15536 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15537 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15538 sense_xp->xb_pktp->pkt_path_instance = 15539 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15540 15541 sense_xp->xb_retry_count = 0; 15542 sense_xp->xb_victim_retry_count = 0; 15543 sense_xp->xb_ua_retry_count = 0; 15544 sense_xp->xb_nr_retry_count = 0; 15545 sense_xp->xb_dma_resid = 0; 15546 15547 /* Clean up the fields for auto-request sense */ 15548 sense_xp->xb_sense_status = 0; 15549 sense_xp->xb_sense_state = 0; 15550 sense_xp->xb_sense_resid = 0; 15551 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15552 15553 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15554 } 15555 15556 15557 /* 15558 * Function: sd_mark_rqs_idle 15559 * 15560 * Description: SD_MUTEX must be held continuously through this routine 15561 * to prevent reuse of the rqs struct before the caller can 15562 * complete it's processing. 15563 * 15564 * Return Code: Pointer to the RQS buf 15565 * 15566 * Context: May be called under interrupt context 15567 */ 15568 15569 static struct buf * 15570 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15571 { 15572 struct buf *bp; 15573 ASSERT(un != NULL); 15574 ASSERT(sense_xp != NULL); 15575 ASSERT(mutex_owned(SD_MUTEX(un))); 15576 ASSERT(un->un_sense_isbusy != 0); 15577 15578 un->un_sense_isbusy = 0; 15579 bp = sense_xp->xb_sense_bp; 15580 sense_xp->xb_sense_bp = NULL; 15581 15582 /* This pkt is no longer interested in getting sense data */ 15583 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15584 15585 return (bp); 15586 } 15587 15588 15589 15590 /* 15591 * Function: sd_alloc_rqs 15592 * 15593 * Description: Set up the unit to receive auto request sense data 15594 * 15595 * Return Code: DDI_SUCCESS or DDI_FAILURE 15596 * 15597 * Context: Called under attach(9E) context 15598 */ 15599 15600 static int 15601 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15602 { 15603 struct sd_xbuf *xp; 15604 15605 ASSERT(un != NULL); 15606 ASSERT(!mutex_owned(SD_MUTEX(un))); 15607 ASSERT(un->un_rqs_bp == NULL); 15608 ASSERT(un->un_rqs_pktp == NULL); 15609 15610 /* 15611 * First allocate the required buf and scsi_pkt structs, then set up 15612 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15613 */ 15614 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15615 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15616 if (un->un_rqs_bp == NULL) { 15617 return (DDI_FAILURE); 15618 } 15619 15620 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15621 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15622 15623 if (un->un_rqs_pktp == NULL) { 15624 sd_free_rqs(un); 15625 return (DDI_FAILURE); 15626 } 15627 15628 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15629 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15630 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15631 15632 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15633 15634 /* Set up the other needed members in the ARQ scsi_pkt. */ 15635 un->un_rqs_pktp->pkt_comp = sdintr; 15636 un->un_rqs_pktp->pkt_time = sd_io_time; 15637 un->un_rqs_pktp->pkt_flags |= 15638 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15639 15640 /* 15641 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15642 * provide any intpkt, destroypkt routines as we take care of 15643 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15644 */ 15645 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15646 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15647 xp->xb_pktp = un->un_rqs_pktp; 15648 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15649 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15650 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15651 15652 /* 15653 * Save the pointer to the request sense private bp so it can 15654 * be retrieved in sdintr. 15655 */ 15656 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15657 ASSERT(un->un_rqs_bp->b_private == xp); 15658 15659 /* 15660 * See if the HBA supports auto-request sense for the specified 15661 * target/lun. If it does, then try to enable it (if not already 15662 * enabled). 15663 * 15664 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15665 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15666 * return success. However, in both of these cases ARQ is always 15667 * enabled and scsi_ifgetcap will always return true. The best approach 15668 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15669 * 15670 * The 3rd case is the HBA (adp) always return enabled on 15671 * scsi_ifgetgetcap even when it's not enable, the best approach 15672 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15673 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15674 */ 15675 15676 if (un->un_f_is_fibre == TRUE) { 15677 un->un_f_arq_enabled = TRUE; 15678 } else { 15679 #if defined(__i386) || defined(__amd64) 15680 /* 15681 * Circumvent the Adaptec bug, remove this code when 15682 * the bug is fixed 15683 */ 15684 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15685 #endif 15686 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15687 case 0: 15688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15689 "sd_alloc_rqs: HBA supports ARQ\n"); 15690 /* 15691 * ARQ is supported by this HBA but currently is not 15692 * enabled. Attempt to enable it and if successful then 15693 * mark this instance as ARQ enabled. 15694 */ 15695 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15696 == 1) { 15697 /* Successfully enabled ARQ in the HBA */ 15698 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15699 "sd_alloc_rqs: ARQ enabled\n"); 15700 un->un_f_arq_enabled = TRUE; 15701 } else { 15702 /* Could not enable ARQ in the HBA */ 15703 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15704 "sd_alloc_rqs: failed ARQ enable\n"); 15705 un->un_f_arq_enabled = FALSE; 15706 } 15707 break; 15708 case 1: 15709 /* 15710 * ARQ is supported by this HBA and is already enabled. 15711 * Just mark ARQ as enabled for this instance. 15712 */ 15713 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15714 "sd_alloc_rqs: ARQ already enabled\n"); 15715 un->un_f_arq_enabled = TRUE; 15716 break; 15717 default: 15718 /* 15719 * ARQ is not supported by this HBA; disable it for this 15720 * instance. 15721 */ 15722 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15723 "sd_alloc_rqs: HBA does not support ARQ\n"); 15724 un->un_f_arq_enabled = FALSE; 15725 break; 15726 } 15727 } 15728 15729 return (DDI_SUCCESS); 15730 } 15731 15732 15733 /* 15734 * Function: sd_free_rqs 15735 * 15736 * Description: Cleanup for the pre-instance RQS command. 15737 * 15738 * Context: Kernel thread context 15739 */ 15740 15741 static void 15742 sd_free_rqs(struct sd_lun *un) 15743 { 15744 ASSERT(un != NULL); 15745 15746 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15747 15748 /* 15749 * If consistent memory is bound to a scsi_pkt, the pkt 15750 * has to be destroyed *before* freeing the consistent memory. 15751 * Don't change the sequence of this operations. 15752 * scsi_destroy_pkt() might access memory, which isn't allowed, 15753 * after it was freed in scsi_free_consistent_buf(). 15754 */ 15755 if (un->un_rqs_pktp != NULL) { 15756 scsi_destroy_pkt(un->un_rqs_pktp); 15757 un->un_rqs_pktp = NULL; 15758 } 15759 15760 if (un->un_rqs_bp != NULL) { 15761 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15762 if (xp != NULL) { 15763 kmem_free(xp, sizeof (struct sd_xbuf)); 15764 } 15765 scsi_free_consistent_buf(un->un_rqs_bp); 15766 un->un_rqs_bp = NULL; 15767 } 15768 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15769 } 15770 15771 15772 15773 /* 15774 * Function: sd_reduce_throttle 15775 * 15776 * Description: Reduces the maximum # of outstanding commands on a 15777 * target to the current number of outstanding commands. 15778 * Queues a tiemout(9F) callback to restore the limit 15779 * after a specified interval has elapsed. 15780 * Typically used when we get a TRAN_BUSY return code 15781 * back from scsi_transport(). 15782 * 15783 * Arguments: un - ptr to the sd_lun softstate struct 15784 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15785 * 15786 * Context: May be called from interrupt context 15787 */ 15788 15789 static void 15790 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15791 { 15792 ASSERT(un != NULL); 15793 ASSERT(mutex_owned(SD_MUTEX(un))); 15794 ASSERT(un->un_ncmds_in_transport >= 0); 15795 15796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15797 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15798 un, un->un_throttle, un->un_ncmds_in_transport); 15799 15800 if (un->un_throttle > 1) { 15801 if (un->un_f_use_adaptive_throttle == TRUE) { 15802 switch (throttle_type) { 15803 case SD_THROTTLE_TRAN_BUSY: 15804 if (un->un_busy_throttle == 0) { 15805 un->un_busy_throttle = un->un_throttle; 15806 } 15807 break; 15808 case SD_THROTTLE_QFULL: 15809 un->un_busy_throttle = 0; 15810 break; 15811 default: 15812 ASSERT(FALSE); 15813 } 15814 15815 if (un->un_ncmds_in_transport > 0) { 15816 un->un_throttle = un->un_ncmds_in_transport; 15817 } 15818 15819 } else { 15820 if (un->un_ncmds_in_transport == 0) { 15821 un->un_throttle = 1; 15822 } else { 15823 un->un_throttle = un->un_ncmds_in_transport; 15824 } 15825 } 15826 } 15827 15828 /* Reschedule the timeout if none is currently active */ 15829 if (un->un_reset_throttle_timeid == NULL) { 15830 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15831 un, SD_THROTTLE_RESET_INTERVAL); 15832 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15833 "sd_reduce_throttle: timeout scheduled!\n"); 15834 } 15835 15836 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15837 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15838 } 15839 15840 15841 15842 /* 15843 * Function: sd_restore_throttle 15844 * 15845 * Description: Callback function for timeout(9F). Resets the current 15846 * value of un->un_throttle to its default. 15847 * 15848 * Arguments: arg - pointer to associated softstate for the device. 15849 * 15850 * Context: May be called from interrupt context 15851 */ 15852 15853 static void 15854 sd_restore_throttle(void *arg) 15855 { 15856 struct sd_lun *un = arg; 15857 15858 ASSERT(un != NULL); 15859 ASSERT(!mutex_owned(SD_MUTEX(un))); 15860 15861 mutex_enter(SD_MUTEX(un)); 15862 15863 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15864 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15865 15866 un->un_reset_throttle_timeid = NULL; 15867 15868 if (un->un_f_use_adaptive_throttle == TRUE) { 15869 /* 15870 * If un_busy_throttle is nonzero, then it contains the 15871 * value that un_throttle was when we got a TRAN_BUSY back 15872 * from scsi_transport(). We want to revert back to this 15873 * value. 15874 * 15875 * In the QFULL case, the throttle limit will incrementally 15876 * increase until it reaches max throttle. 15877 */ 15878 if (un->un_busy_throttle > 0) { 15879 un->un_throttle = un->un_busy_throttle; 15880 un->un_busy_throttle = 0; 15881 } else { 15882 /* 15883 * increase throttle by 10% open gate slowly, schedule 15884 * another restore if saved throttle has not been 15885 * reached 15886 */ 15887 short throttle; 15888 if (sd_qfull_throttle_enable) { 15889 throttle = un->un_throttle + 15890 max((un->un_throttle / 10), 1); 15891 un->un_throttle = 15892 (throttle < un->un_saved_throttle) ? 15893 throttle : un->un_saved_throttle; 15894 if (un->un_throttle < un->un_saved_throttle) { 15895 un->un_reset_throttle_timeid = 15896 timeout(sd_restore_throttle, 15897 un, 15898 SD_QFULL_THROTTLE_RESET_INTERVAL); 15899 } 15900 } 15901 } 15902 15903 /* 15904 * If un_throttle has fallen below the low-water mark, we 15905 * restore the maximum value here (and allow it to ratchet 15906 * down again if necessary). 15907 */ 15908 if (un->un_throttle < un->un_min_throttle) { 15909 un->un_throttle = un->un_saved_throttle; 15910 } 15911 } else { 15912 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15913 "restoring limit from 0x%x to 0x%x\n", 15914 un->un_throttle, un->un_saved_throttle); 15915 un->un_throttle = un->un_saved_throttle; 15916 } 15917 15918 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15919 "sd_restore_throttle: calling sd_start_cmds!\n"); 15920 15921 sd_start_cmds(un, NULL); 15922 15923 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15924 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15925 un, un->un_throttle); 15926 15927 mutex_exit(SD_MUTEX(un)); 15928 15929 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15930 } 15931 15932 /* 15933 * Function: sdrunout 15934 * 15935 * Description: Callback routine for scsi_init_pkt when a resource allocation 15936 * fails. 15937 * 15938 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15939 * soft state instance. 15940 * 15941 * Return Code: The scsi_init_pkt routine allows for the callback function to 15942 * return a 0 indicating the callback should be rescheduled or a 1 15943 * indicating not to reschedule. This routine always returns 1 15944 * because the driver always provides a callback function to 15945 * scsi_init_pkt. This results in a callback always being scheduled 15946 * (via the scsi_init_pkt callback implementation) if a resource 15947 * failure occurs. 15948 * 15949 * Context: This callback function may not block or call routines that block 15950 * 15951 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15952 * request persisting at the head of the list which cannot be 15953 * satisfied even after multiple retries. In the future the driver 15954 * may implement some time of maximum runout count before failing 15955 * an I/O. 15956 */ 15957 15958 static int 15959 sdrunout(caddr_t arg) 15960 { 15961 struct sd_lun *un = (struct sd_lun *)arg; 15962 15963 ASSERT(un != NULL); 15964 ASSERT(!mutex_owned(SD_MUTEX(un))); 15965 15966 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15967 15968 mutex_enter(SD_MUTEX(un)); 15969 sd_start_cmds(un, NULL); 15970 mutex_exit(SD_MUTEX(un)); 15971 /* 15972 * This callback routine always returns 1 (i.e. do not reschedule) 15973 * because we always specify sdrunout as the callback handler for 15974 * scsi_init_pkt inside the call to sd_start_cmds. 15975 */ 15976 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15977 return (1); 15978 } 15979 15980 15981 /* 15982 * Function: sdintr 15983 * 15984 * Description: Completion callback routine for scsi_pkt(9S) structs 15985 * sent to the HBA driver via scsi_transport(9F). 15986 * 15987 * Context: Interrupt context 15988 */ 15989 15990 static void 15991 sdintr(struct scsi_pkt *pktp) 15992 { 15993 struct buf *bp; 15994 struct sd_xbuf *xp; 15995 struct sd_lun *un; 15996 size_t actual_len; 15997 sd_ssc_t *sscp; 15998 15999 ASSERT(pktp != NULL); 16000 bp = (struct buf *)pktp->pkt_private; 16001 ASSERT(bp != NULL); 16002 xp = SD_GET_XBUF(bp); 16003 ASSERT(xp != NULL); 16004 ASSERT(xp->xb_pktp != NULL); 16005 un = SD_GET_UN(bp); 16006 ASSERT(un != NULL); 16007 ASSERT(!mutex_owned(SD_MUTEX(un))); 16008 16009 #ifdef SD_FAULT_INJECTION 16010 16011 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16012 /* SD FaultInjection */ 16013 sd_faultinjection(pktp); 16014 16015 #endif /* SD_FAULT_INJECTION */ 16016 16017 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16018 " xp:0x%p, un:0x%p\n", bp, xp, un); 16019 16020 mutex_enter(SD_MUTEX(un)); 16021 16022 ASSERT(un->un_fm_private != NULL); 16023 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16024 ASSERT(sscp != NULL); 16025 16026 /* Reduce the count of the #commands currently in transport */ 16027 un->un_ncmds_in_transport--; 16028 ASSERT(un->un_ncmds_in_transport >= 0); 16029 16030 /* Increment counter to indicate that the callback routine is active */ 16031 un->un_in_callback++; 16032 16033 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16034 16035 #ifdef SDDEBUG 16036 if (bp == un->un_retry_bp) { 16037 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16038 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16039 un, un->un_retry_bp, un->un_ncmds_in_transport); 16040 } 16041 #endif 16042 16043 /* 16044 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16045 * state if needed. 16046 */ 16047 if (pktp->pkt_reason == CMD_DEV_GONE) { 16048 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16049 "Command failed to complete...Device is gone\n"); 16050 if (un->un_mediastate != DKIO_DEV_GONE) { 16051 un->un_mediastate = DKIO_DEV_GONE; 16052 cv_broadcast(&un->un_state_cv); 16053 } 16054 sd_return_failed_command(un, bp, EIO); 16055 goto exit; 16056 } 16057 16058 if (pktp->pkt_state & STATE_XARQ_DONE) { 16059 SD_TRACE(SD_LOG_COMMON, un, 16060 "sdintr: extra sense data received. pkt=%p\n", pktp); 16061 } 16062 16063 /* 16064 * First see if the pkt has auto-request sense data with it.... 16065 * Look at the packet state first so we don't take a performance 16066 * hit looking at the arq enabled flag unless absolutely necessary. 16067 */ 16068 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16069 (un->un_f_arq_enabled == TRUE)) { 16070 /* 16071 * The HBA did an auto request sense for this command so check 16072 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16073 * driver command that should not be retried. 16074 */ 16075 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16076 /* 16077 * Save the relevant sense info into the xp for the 16078 * original cmd. 16079 */ 16080 struct scsi_arq_status *asp; 16081 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16082 xp->xb_sense_status = 16083 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16084 xp->xb_sense_state = asp->sts_rqpkt_state; 16085 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16086 if (pktp->pkt_state & STATE_XARQ_DONE) { 16087 actual_len = MAX_SENSE_LENGTH - 16088 xp->xb_sense_resid; 16089 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16090 MAX_SENSE_LENGTH); 16091 } else { 16092 if (xp->xb_sense_resid > SENSE_LENGTH) { 16093 actual_len = MAX_SENSE_LENGTH - 16094 xp->xb_sense_resid; 16095 } else { 16096 actual_len = SENSE_LENGTH - 16097 xp->xb_sense_resid; 16098 } 16099 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16100 if ((((struct uscsi_cmd *) 16101 (xp->xb_pktinfo))->uscsi_rqlen) > 16102 actual_len) { 16103 xp->xb_sense_resid = 16104 (((struct uscsi_cmd *) 16105 (xp->xb_pktinfo))-> 16106 uscsi_rqlen) - actual_len; 16107 } else { 16108 xp->xb_sense_resid = 0; 16109 } 16110 } 16111 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16112 SENSE_LENGTH); 16113 } 16114 16115 /* fail the command */ 16116 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16117 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16118 sd_return_failed_command(un, bp, EIO); 16119 goto exit; 16120 } 16121 16122 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16123 /* 16124 * We want to either retry or fail this command, so free 16125 * the DMA resources here. If we retry the command then 16126 * the DMA resources will be reallocated in sd_start_cmds(). 16127 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16128 * causes the *entire* transfer to start over again from the 16129 * beginning of the request, even for PARTIAL chunks that 16130 * have already transferred successfully. 16131 */ 16132 if ((un->un_f_is_fibre == TRUE) && 16133 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16134 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16135 scsi_dmafree(pktp); 16136 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16137 } 16138 #endif 16139 16140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16141 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16142 16143 sd_handle_auto_request_sense(un, bp, xp, pktp); 16144 goto exit; 16145 } 16146 16147 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16148 if (pktp->pkt_flags & FLAG_SENSING) { 16149 /* This pktp is from the unit's REQUEST_SENSE command */ 16150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16151 "sdintr: sd_handle_request_sense\n"); 16152 sd_handle_request_sense(un, bp, xp, pktp); 16153 goto exit; 16154 } 16155 16156 /* 16157 * Check to see if the command successfully completed as requested; 16158 * this is the most common case (and also the hot performance path). 16159 * 16160 * Requirements for successful completion are: 16161 * pkt_reason is CMD_CMPLT and packet status is status good. 16162 * In addition: 16163 * - A residual of zero indicates successful completion no matter what 16164 * the command is. 16165 * - If the residual is not zero and the command is not a read or 16166 * write, then it's still defined as successful completion. In other 16167 * words, if the command is a read or write the residual must be 16168 * zero for successful completion. 16169 * - If the residual is not zero and the command is a read or 16170 * write, and it's a USCSICMD, then it's still defined as 16171 * successful completion. 16172 */ 16173 if ((pktp->pkt_reason == CMD_CMPLT) && 16174 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16175 16176 /* 16177 * Since this command is returned with a good status, we 16178 * can reset the count for Sonoma failover. 16179 */ 16180 un->un_sonoma_failure_count = 0; 16181 16182 /* 16183 * Return all USCSI commands on good status 16184 */ 16185 if (pktp->pkt_resid == 0) { 16186 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16187 "sdintr: returning command for resid == 0\n"); 16188 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16189 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16190 SD_UPDATE_B_RESID(bp, pktp); 16191 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16192 "sdintr: returning command for resid != 0\n"); 16193 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16194 SD_UPDATE_B_RESID(bp, pktp); 16195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16196 "sdintr: returning uscsi command\n"); 16197 } else { 16198 goto not_successful; 16199 } 16200 sd_return_command(un, bp); 16201 16202 /* 16203 * Decrement counter to indicate that the callback routine 16204 * is done. 16205 */ 16206 un->un_in_callback--; 16207 ASSERT(un->un_in_callback >= 0); 16208 mutex_exit(SD_MUTEX(un)); 16209 16210 return; 16211 } 16212 16213 not_successful: 16214 16215 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16216 /* 16217 * The following is based upon knowledge of the underlying transport 16218 * and its use of DMA resources. This code should be removed when 16219 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16220 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16221 * and sd_start_cmds(). 16222 * 16223 * Free any DMA resources associated with this command if there 16224 * is a chance it could be retried or enqueued for later retry. 16225 * If we keep the DMA binding then mpxio cannot reissue the 16226 * command on another path whenever a path failure occurs. 16227 * 16228 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16229 * causes the *entire* transfer to start over again from the 16230 * beginning of the request, even for PARTIAL chunks that 16231 * have already transferred successfully. 16232 * 16233 * This is only done for non-uscsi commands (and also skipped for the 16234 * driver's internal RQS command). Also just do this for Fibre Channel 16235 * devices as these are the only ones that support mpxio. 16236 */ 16237 if ((un->un_f_is_fibre == TRUE) && 16238 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16239 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16240 scsi_dmafree(pktp); 16241 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16242 } 16243 #endif 16244 16245 /* 16246 * The command did not successfully complete as requested so check 16247 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16248 * driver command that should not be retried so just return. If 16249 * FLAG_DIAGNOSE is not set the error will be processed below. 16250 */ 16251 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16253 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16254 /* 16255 * Issue a request sense if a check condition caused the error 16256 * (we handle the auto request sense case above), otherwise 16257 * just fail the command. 16258 */ 16259 if ((pktp->pkt_reason == CMD_CMPLT) && 16260 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16261 sd_send_request_sense_command(un, bp, pktp); 16262 } else { 16263 sd_return_failed_command(un, bp, EIO); 16264 } 16265 goto exit; 16266 } 16267 16268 /* 16269 * The command did not successfully complete as requested so process 16270 * the error, retry, and/or attempt recovery. 16271 */ 16272 switch (pktp->pkt_reason) { 16273 case CMD_CMPLT: 16274 switch (SD_GET_PKT_STATUS(pktp)) { 16275 case STATUS_GOOD: 16276 /* 16277 * The command completed successfully with a non-zero 16278 * residual 16279 */ 16280 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16281 "sdintr: STATUS_GOOD \n"); 16282 sd_pkt_status_good(un, bp, xp, pktp); 16283 break; 16284 16285 case STATUS_CHECK: 16286 case STATUS_TERMINATED: 16287 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16288 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16289 sd_pkt_status_check_condition(un, bp, xp, pktp); 16290 break; 16291 16292 case STATUS_BUSY: 16293 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16294 "sdintr: STATUS_BUSY\n"); 16295 sd_pkt_status_busy(un, bp, xp, pktp); 16296 break; 16297 16298 case STATUS_RESERVATION_CONFLICT: 16299 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16300 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16301 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16302 break; 16303 16304 case STATUS_QFULL: 16305 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16306 "sdintr: STATUS_QFULL\n"); 16307 sd_pkt_status_qfull(un, bp, xp, pktp); 16308 break; 16309 16310 case STATUS_MET: 16311 case STATUS_INTERMEDIATE: 16312 case STATUS_SCSI2: 16313 case STATUS_INTERMEDIATE_MET: 16314 case STATUS_ACA_ACTIVE: 16315 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16316 "Unexpected SCSI status received: 0x%x\n", 16317 SD_GET_PKT_STATUS(pktp)); 16318 /* 16319 * Mark the ssc_flags when detected invalid status 16320 * code for non-USCSI command. 16321 */ 16322 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16323 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16324 "stat-code"); 16325 } 16326 sd_return_failed_command(un, bp, EIO); 16327 break; 16328 16329 default: 16330 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16331 "Invalid SCSI status received: 0x%x\n", 16332 SD_GET_PKT_STATUS(pktp)); 16333 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16334 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16335 "stat-code"); 16336 } 16337 sd_return_failed_command(un, bp, EIO); 16338 break; 16339 16340 } 16341 break; 16342 16343 case CMD_INCOMPLETE: 16344 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16345 "sdintr: CMD_INCOMPLETE\n"); 16346 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16347 break; 16348 case CMD_TRAN_ERR: 16349 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16350 "sdintr: CMD_TRAN_ERR\n"); 16351 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16352 break; 16353 case CMD_RESET: 16354 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16355 "sdintr: CMD_RESET \n"); 16356 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16357 break; 16358 case CMD_ABORTED: 16359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16360 "sdintr: CMD_ABORTED \n"); 16361 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16362 break; 16363 case CMD_TIMEOUT: 16364 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16365 "sdintr: CMD_TIMEOUT\n"); 16366 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16367 break; 16368 case CMD_UNX_BUS_FREE: 16369 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16370 "sdintr: CMD_UNX_BUS_FREE \n"); 16371 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16372 break; 16373 case CMD_TAG_REJECT: 16374 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16375 "sdintr: CMD_TAG_REJECT\n"); 16376 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16377 break; 16378 default: 16379 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16380 "sdintr: default\n"); 16381 /* 16382 * Mark the ssc_flags for detecting invliad pkt_reason. 16383 */ 16384 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16385 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16386 "pkt-reason"); 16387 } 16388 sd_pkt_reason_default(un, bp, xp, pktp); 16389 break; 16390 } 16391 16392 exit: 16393 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16394 16395 /* Decrement counter to indicate that the callback routine is done. */ 16396 un->un_in_callback--; 16397 ASSERT(un->un_in_callback >= 0); 16398 16399 /* 16400 * At this point, the pkt has been dispatched, ie, it is either 16401 * being re-tried or has been returned to its caller and should 16402 * not be referenced. 16403 */ 16404 16405 mutex_exit(SD_MUTEX(un)); 16406 } 16407 16408 16409 /* 16410 * Function: sd_print_incomplete_msg 16411 * 16412 * Description: Prints the error message for a CMD_INCOMPLETE error. 16413 * 16414 * Arguments: un - ptr to associated softstate for the device. 16415 * bp - ptr to the buf(9S) for the command. 16416 * arg - message string ptr 16417 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16418 * or SD_NO_RETRY_ISSUED. 16419 * 16420 * Context: May be called under interrupt context 16421 */ 16422 16423 static void 16424 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16425 { 16426 struct scsi_pkt *pktp; 16427 char *msgp; 16428 char *cmdp = arg; 16429 16430 ASSERT(un != NULL); 16431 ASSERT(mutex_owned(SD_MUTEX(un))); 16432 ASSERT(bp != NULL); 16433 ASSERT(arg != NULL); 16434 pktp = SD_GET_PKTP(bp); 16435 ASSERT(pktp != NULL); 16436 16437 switch (code) { 16438 case SD_DELAYED_RETRY_ISSUED: 16439 case SD_IMMEDIATE_RETRY_ISSUED: 16440 msgp = "retrying"; 16441 break; 16442 case SD_NO_RETRY_ISSUED: 16443 default: 16444 msgp = "giving up"; 16445 break; 16446 } 16447 16448 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16449 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16450 "incomplete %s- %s\n", cmdp, msgp); 16451 } 16452 } 16453 16454 16455 16456 /* 16457 * Function: sd_pkt_status_good 16458 * 16459 * Description: Processing for a STATUS_GOOD code in pkt_status. 16460 * 16461 * Context: May be called under interrupt context 16462 */ 16463 16464 static void 16465 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16466 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16467 { 16468 char *cmdp; 16469 16470 ASSERT(un != NULL); 16471 ASSERT(mutex_owned(SD_MUTEX(un))); 16472 ASSERT(bp != NULL); 16473 ASSERT(xp != NULL); 16474 ASSERT(pktp != NULL); 16475 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16476 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16477 ASSERT(pktp->pkt_resid != 0); 16478 16479 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16480 16481 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16482 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16483 case SCMD_READ: 16484 cmdp = "read"; 16485 break; 16486 case SCMD_WRITE: 16487 cmdp = "write"; 16488 break; 16489 default: 16490 SD_UPDATE_B_RESID(bp, pktp); 16491 sd_return_command(un, bp); 16492 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16493 return; 16494 } 16495 16496 /* 16497 * See if we can retry the read/write, preferrably immediately. 16498 * If retries are exhaused, then sd_retry_command() will update 16499 * the b_resid count. 16500 */ 16501 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16502 cmdp, EIO, (clock_t)0, NULL); 16503 16504 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16505 } 16506 16507 16508 16509 16510 16511 /* 16512 * Function: sd_handle_request_sense 16513 * 16514 * Description: Processing for non-auto Request Sense command. 16515 * 16516 * Arguments: un - ptr to associated softstate 16517 * sense_bp - ptr to buf(9S) for the RQS command 16518 * sense_xp - ptr to the sd_xbuf for the RQS command 16519 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16520 * 16521 * Context: May be called under interrupt context 16522 */ 16523 16524 static void 16525 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16526 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16527 { 16528 struct buf *cmd_bp; /* buf for the original command */ 16529 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16530 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16531 size_t actual_len; /* actual sense data length */ 16532 16533 ASSERT(un != NULL); 16534 ASSERT(mutex_owned(SD_MUTEX(un))); 16535 ASSERT(sense_bp != NULL); 16536 ASSERT(sense_xp != NULL); 16537 ASSERT(sense_pktp != NULL); 16538 16539 /* 16540 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16541 * RQS command and not the original command. 16542 */ 16543 ASSERT(sense_pktp == un->un_rqs_pktp); 16544 ASSERT(sense_bp == un->un_rqs_bp); 16545 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16546 (FLAG_SENSING | FLAG_HEAD)); 16547 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16548 FLAG_SENSING) == FLAG_SENSING); 16549 16550 /* These are the bp, xp, and pktp for the original command */ 16551 cmd_bp = sense_xp->xb_sense_bp; 16552 cmd_xp = SD_GET_XBUF(cmd_bp); 16553 cmd_pktp = SD_GET_PKTP(cmd_bp); 16554 16555 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16556 /* 16557 * The REQUEST SENSE command failed. Release the REQUEST 16558 * SENSE command for re-use, get back the bp for the original 16559 * command, and attempt to re-try the original command if 16560 * FLAG_DIAGNOSE is not set in the original packet. 16561 */ 16562 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16563 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16564 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16565 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16566 NULL, NULL, EIO, (clock_t)0, NULL); 16567 return; 16568 } 16569 } 16570 16571 /* 16572 * Save the relevant sense info into the xp for the original cmd. 16573 * 16574 * Note: if the request sense failed the state info will be zero 16575 * as set in sd_mark_rqs_busy() 16576 */ 16577 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16578 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16579 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16580 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16581 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16582 SENSE_LENGTH)) { 16583 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16584 MAX_SENSE_LENGTH); 16585 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16586 } else { 16587 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16588 SENSE_LENGTH); 16589 if (actual_len < SENSE_LENGTH) { 16590 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16591 } else { 16592 cmd_xp->xb_sense_resid = 0; 16593 } 16594 } 16595 16596 /* 16597 * Free up the RQS command.... 16598 * NOTE: 16599 * Must do this BEFORE calling sd_validate_sense_data! 16600 * sd_validate_sense_data may return the original command in 16601 * which case the pkt will be freed and the flags can no 16602 * longer be touched. 16603 * SD_MUTEX is held through this process until the command 16604 * is dispatched based upon the sense data, so there are 16605 * no race conditions. 16606 */ 16607 (void) sd_mark_rqs_idle(un, sense_xp); 16608 16609 /* 16610 * For a retryable command see if we have valid sense data, if so then 16611 * turn it over to sd_decode_sense() to figure out the right course of 16612 * action. Just fail a non-retryable command. 16613 */ 16614 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16615 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16616 SD_SENSE_DATA_IS_VALID) { 16617 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16618 } 16619 } else { 16620 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16621 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16622 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16623 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16624 sd_return_failed_command(un, cmd_bp, EIO); 16625 } 16626 } 16627 16628 16629 16630 16631 /* 16632 * Function: sd_handle_auto_request_sense 16633 * 16634 * Description: Processing for auto-request sense information. 16635 * 16636 * Arguments: un - ptr to associated softstate 16637 * bp - ptr to buf(9S) for the command 16638 * xp - ptr to the sd_xbuf for the command 16639 * pktp - ptr to the scsi_pkt(9S) for the command 16640 * 16641 * Context: May be called under interrupt context 16642 */ 16643 16644 static void 16645 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16646 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16647 { 16648 struct scsi_arq_status *asp; 16649 size_t actual_len; 16650 16651 ASSERT(un != NULL); 16652 ASSERT(mutex_owned(SD_MUTEX(un))); 16653 ASSERT(bp != NULL); 16654 ASSERT(xp != NULL); 16655 ASSERT(pktp != NULL); 16656 ASSERT(pktp != un->un_rqs_pktp); 16657 ASSERT(bp != un->un_rqs_bp); 16658 16659 /* 16660 * For auto-request sense, we get a scsi_arq_status back from 16661 * the HBA, with the sense data in the sts_sensedata member. 16662 * The pkt_scbp of the packet points to this scsi_arq_status. 16663 */ 16664 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16665 16666 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16667 /* 16668 * The auto REQUEST SENSE failed; see if we can re-try 16669 * the original command. 16670 */ 16671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16672 "auto request sense failed (reason=%s)\n", 16673 scsi_rname(asp->sts_rqpkt_reason)); 16674 16675 sd_reset_target(un, pktp); 16676 16677 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16678 NULL, NULL, EIO, (clock_t)0, NULL); 16679 return; 16680 } 16681 16682 /* Save the relevant sense info into the xp for the original cmd. */ 16683 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16684 xp->xb_sense_state = asp->sts_rqpkt_state; 16685 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16686 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16687 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16688 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16689 MAX_SENSE_LENGTH); 16690 } else { 16691 if (xp->xb_sense_resid > SENSE_LENGTH) { 16692 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16693 } else { 16694 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16695 } 16696 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16697 if ((((struct uscsi_cmd *) 16698 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16699 xp->xb_sense_resid = (((struct uscsi_cmd *) 16700 (xp->xb_pktinfo))->uscsi_rqlen) - 16701 actual_len; 16702 } else { 16703 xp->xb_sense_resid = 0; 16704 } 16705 } 16706 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16707 } 16708 16709 /* 16710 * See if we have valid sense data, if so then turn it over to 16711 * sd_decode_sense() to figure out the right course of action. 16712 */ 16713 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16714 SD_SENSE_DATA_IS_VALID) { 16715 sd_decode_sense(un, bp, xp, pktp); 16716 } 16717 } 16718 16719 16720 /* 16721 * Function: sd_print_sense_failed_msg 16722 * 16723 * Description: Print log message when RQS has failed. 16724 * 16725 * Arguments: un - ptr to associated softstate 16726 * bp - ptr to buf(9S) for the command 16727 * arg - generic message string ptr 16728 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16729 * or SD_NO_RETRY_ISSUED 16730 * 16731 * Context: May be called from interrupt context 16732 */ 16733 16734 static void 16735 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16736 int code) 16737 { 16738 char *msgp = arg; 16739 16740 ASSERT(un != NULL); 16741 ASSERT(mutex_owned(SD_MUTEX(un))); 16742 ASSERT(bp != NULL); 16743 16744 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16745 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16746 } 16747 } 16748 16749 16750 /* 16751 * Function: sd_validate_sense_data 16752 * 16753 * Description: Check the given sense data for validity. 16754 * If the sense data is not valid, the command will 16755 * be either failed or retried! 16756 * 16757 * Return Code: SD_SENSE_DATA_IS_INVALID 16758 * SD_SENSE_DATA_IS_VALID 16759 * 16760 * Context: May be called from interrupt context 16761 */ 16762 16763 static int 16764 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16765 size_t actual_len) 16766 { 16767 struct scsi_extended_sense *esp; 16768 struct scsi_pkt *pktp; 16769 char *msgp = NULL; 16770 sd_ssc_t *sscp; 16771 16772 ASSERT(un != NULL); 16773 ASSERT(mutex_owned(SD_MUTEX(un))); 16774 ASSERT(bp != NULL); 16775 ASSERT(bp != un->un_rqs_bp); 16776 ASSERT(xp != NULL); 16777 ASSERT(un->un_fm_private != NULL); 16778 16779 pktp = SD_GET_PKTP(bp); 16780 ASSERT(pktp != NULL); 16781 16782 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16783 ASSERT(sscp != NULL); 16784 16785 /* 16786 * Check the status of the RQS command (auto or manual). 16787 */ 16788 switch (xp->xb_sense_status & STATUS_MASK) { 16789 case STATUS_GOOD: 16790 break; 16791 16792 case STATUS_RESERVATION_CONFLICT: 16793 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16794 return (SD_SENSE_DATA_IS_INVALID); 16795 16796 case STATUS_BUSY: 16797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16798 "Busy Status on REQUEST SENSE\n"); 16799 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16800 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16801 return (SD_SENSE_DATA_IS_INVALID); 16802 16803 case STATUS_QFULL: 16804 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16805 "QFULL Status on REQUEST SENSE\n"); 16806 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16807 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16808 return (SD_SENSE_DATA_IS_INVALID); 16809 16810 case STATUS_CHECK: 16811 case STATUS_TERMINATED: 16812 msgp = "Check Condition on REQUEST SENSE\n"; 16813 goto sense_failed; 16814 16815 default: 16816 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16817 goto sense_failed; 16818 } 16819 16820 /* 16821 * See if we got the minimum required amount of sense data. 16822 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16823 * or less. 16824 */ 16825 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16826 (actual_len == 0)) { 16827 msgp = "Request Sense couldn't get sense data\n"; 16828 goto sense_failed; 16829 } 16830 16831 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16832 msgp = "Not enough sense information\n"; 16833 /* Mark the ssc_flags for detecting invalid sense data */ 16834 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16835 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16836 "sense-data"); 16837 } 16838 goto sense_failed; 16839 } 16840 16841 /* 16842 * We require the extended sense data 16843 */ 16844 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16845 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16846 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16847 static char tmp[8]; 16848 static char buf[148]; 16849 char *p = (char *)(xp->xb_sense_data); 16850 int i; 16851 16852 mutex_enter(&sd_sense_mutex); 16853 (void) strcpy(buf, "undecodable sense information:"); 16854 for (i = 0; i < actual_len; i++) { 16855 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16856 (void) strcpy(&buf[strlen(buf)], tmp); 16857 } 16858 i = strlen(buf); 16859 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16860 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16861 mutex_exit(&sd_sense_mutex); 16862 } 16863 16864 /* Mark the ssc_flags for detecting invalid sense data */ 16865 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16866 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16867 "sense-data"); 16868 } 16869 16870 /* Note: Legacy behavior, fail the command with no retry */ 16871 sd_return_failed_command(un, bp, EIO); 16872 return (SD_SENSE_DATA_IS_INVALID); 16873 } 16874 16875 /* 16876 * Check that es_code is valid (es_class concatenated with es_code 16877 * make up the "response code" field. es_class will always be 7, so 16878 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16879 * format. 16880 */ 16881 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16882 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16883 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16884 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16885 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16886 /* Mark the ssc_flags for detecting invalid sense data */ 16887 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16888 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16889 "sense-data"); 16890 } 16891 goto sense_failed; 16892 } 16893 16894 return (SD_SENSE_DATA_IS_VALID); 16895 16896 sense_failed: 16897 /* 16898 * If the request sense failed (for whatever reason), attempt 16899 * to retry the original command. 16900 */ 16901 #if defined(__i386) || defined(__amd64) 16902 /* 16903 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16904 * sddef.h for Sparc platform, and x86 uses 1 binary 16905 * for both SCSI/FC. 16906 * The SD_RETRY_DELAY value need to be adjusted here 16907 * when SD_RETRY_DELAY change in sddef.h 16908 */ 16909 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16910 sd_print_sense_failed_msg, msgp, EIO, 16911 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16912 #else 16913 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16914 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16915 #endif 16916 16917 return (SD_SENSE_DATA_IS_INVALID); 16918 } 16919 16920 /* 16921 * Function: sd_decode_sense 16922 * 16923 * Description: Take recovery action(s) when SCSI Sense Data is received. 16924 * 16925 * Context: Interrupt context. 16926 */ 16927 16928 static void 16929 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16930 struct scsi_pkt *pktp) 16931 { 16932 uint8_t sense_key; 16933 16934 ASSERT(un != NULL); 16935 ASSERT(mutex_owned(SD_MUTEX(un))); 16936 ASSERT(bp != NULL); 16937 ASSERT(bp != un->un_rqs_bp); 16938 ASSERT(xp != NULL); 16939 ASSERT(pktp != NULL); 16940 16941 sense_key = scsi_sense_key(xp->xb_sense_data); 16942 16943 switch (sense_key) { 16944 case KEY_NO_SENSE: 16945 sd_sense_key_no_sense(un, bp, xp, pktp); 16946 break; 16947 case KEY_RECOVERABLE_ERROR: 16948 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 16949 bp, xp, pktp); 16950 break; 16951 case KEY_NOT_READY: 16952 sd_sense_key_not_ready(un, xp->xb_sense_data, 16953 bp, xp, pktp); 16954 break; 16955 case KEY_MEDIUM_ERROR: 16956 case KEY_HARDWARE_ERROR: 16957 sd_sense_key_medium_or_hardware_error(un, 16958 xp->xb_sense_data, bp, xp, pktp); 16959 break; 16960 case KEY_ILLEGAL_REQUEST: 16961 sd_sense_key_illegal_request(un, bp, xp, pktp); 16962 break; 16963 case KEY_UNIT_ATTENTION: 16964 sd_sense_key_unit_attention(un, xp->xb_sense_data, 16965 bp, xp, pktp); 16966 break; 16967 case KEY_WRITE_PROTECT: 16968 case KEY_VOLUME_OVERFLOW: 16969 case KEY_MISCOMPARE: 16970 sd_sense_key_fail_command(un, bp, xp, pktp); 16971 break; 16972 case KEY_BLANK_CHECK: 16973 sd_sense_key_blank_check(un, bp, xp, pktp); 16974 break; 16975 case KEY_ABORTED_COMMAND: 16976 sd_sense_key_aborted_command(un, bp, xp, pktp); 16977 break; 16978 case KEY_VENDOR_UNIQUE: 16979 case KEY_COPY_ABORTED: 16980 case KEY_EQUAL: 16981 case KEY_RESERVED: 16982 default: 16983 sd_sense_key_default(un, xp->xb_sense_data, 16984 bp, xp, pktp); 16985 break; 16986 } 16987 } 16988 16989 16990 /* 16991 * Function: sd_dump_memory 16992 * 16993 * Description: Debug logging routine to print the contents of a user provided 16994 * buffer. The output of the buffer is broken up into 256 byte 16995 * segments due to a size constraint of the scsi_log. 16996 * implementation. 16997 * 16998 * Arguments: un - ptr to softstate 16999 * comp - component mask 17000 * title - "title" string to preceed data when printed 17001 * data - ptr to data block to be printed 17002 * len - size of data block to be printed 17003 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17004 * 17005 * Context: May be called from interrupt context 17006 */ 17007 17008 #define SD_DUMP_MEMORY_BUF_SIZE 256 17009 17010 static char *sd_dump_format_string[] = { 17011 " 0x%02x", 17012 " %c" 17013 }; 17014 17015 static void 17016 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17017 int len, int fmt) 17018 { 17019 int i, j; 17020 int avail_count; 17021 int start_offset; 17022 int end_offset; 17023 size_t entry_len; 17024 char *bufp; 17025 char *local_buf; 17026 char *format_string; 17027 17028 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17029 17030 /* 17031 * In the debug version of the driver, this function is called from a 17032 * number of places which are NOPs in the release driver. 17033 * The debug driver therefore has additional methods of filtering 17034 * debug output. 17035 */ 17036 #ifdef SDDEBUG 17037 /* 17038 * In the debug version of the driver we can reduce the amount of debug 17039 * messages by setting sd_error_level to something other than 17040 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17041 * sd_component_mask. 17042 */ 17043 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17044 (sd_error_level != SCSI_ERR_ALL)) { 17045 return; 17046 } 17047 if (((sd_component_mask & comp) == 0) || 17048 (sd_error_level != SCSI_ERR_ALL)) { 17049 return; 17050 } 17051 #else 17052 if (sd_error_level != SCSI_ERR_ALL) { 17053 return; 17054 } 17055 #endif 17056 17057 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17058 bufp = local_buf; 17059 /* 17060 * Available length is the length of local_buf[], minus the 17061 * length of the title string, minus one for the ":", minus 17062 * one for the newline, minus one for the NULL terminator. 17063 * This gives the #bytes available for holding the printed 17064 * values from the given data buffer. 17065 */ 17066 if (fmt == SD_LOG_HEX) { 17067 format_string = sd_dump_format_string[0]; 17068 } else /* SD_LOG_CHAR */ { 17069 format_string = sd_dump_format_string[1]; 17070 } 17071 /* 17072 * Available count is the number of elements from the given 17073 * data buffer that we can fit into the available length. 17074 * This is based upon the size of the format string used. 17075 * Make one entry and find it's size. 17076 */ 17077 (void) sprintf(bufp, format_string, data[0]); 17078 entry_len = strlen(bufp); 17079 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17080 17081 j = 0; 17082 while (j < len) { 17083 bufp = local_buf; 17084 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17085 start_offset = j; 17086 17087 end_offset = start_offset + avail_count; 17088 17089 (void) sprintf(bufp, "%s:", title); 17090 bufp += strlen(bufp); 17091 for (i = start_offset; ((i < end_offset) && (j < len)); 17092 i++, j++) { 17093 (void) sprintf(bufp, format_string, data[i]); 17094 bufp += entry_len; 17095 } 17096 (void) sprintf(bufp, "\n"); 17097 17098 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17099 } 17100 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17101 } 17102 17103 /* 17104 * Function: sd_print_sense_msg 17105 * 17106 * Description: Log a message based upon the given sense data. 17107 * 17108 * Arguments: un - ptr to associated softstate 17109 * bp - ptr to buf(9S) for the command 17110 * arg - ptr to associate sd_sense_info struct 17111 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17112 * or SD_NO_RETRY_ISSUED 17113 * 17114 * Context: May be called from interrupt context 17115 */ 17116 17117 static void 17118 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17119 { 17120 struct sd_xbuf *xp; 17121 struct scsi_pkt *pktp; 17122 uint8_t *sensep; 17123 daddr_t request_blkno; 17124 diskaddr_t err_blkno; 17125 int severity; 17126 int pfa_flag; 17127 extern struct scsi_key_strings scsi_cmds[]; 17128 17129 ASSERT(un != NULL); 17130 ASSERT(mutex_owned(SD_MUTEX(un))); 17131 ASSERT(bp != NULL); 17132 xp = SD_GET_XBUF(bp); 17133 ASSERT(xp != NULL); 17134 pktp = SD_GET_PKTP(bp); 17135 ASSERT(pktp != NULL); 17136 ASSERT(arg != NULL); 17137 17138 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17139 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17140 17141 if ((code == SD_DELAYED_RETRY_ISSUED) || 17142 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17143 severity = SCSI_ERR_RETRYABLE; 17144 } 17145 17146 /* Use absolute block number for the request block number */ 17147 request_blkno = xp->xb_blkno; 17148 17149 /* 17150 * Now try to get the error block number from the sense data 17151 */ 17152 sensep = xp->xb_sense_data; 17153 17154 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17155 (uint64_t *)&err_blkno)) { 17156 /* 17157 * We retrieved the error block number from the information 17158 * portion of the sense data. 17159 * 17160 * For USCSI commands we are better off using the error 17161 * block no. as the requested block no. (This is the best 17162 * we can estimate.) 17163 */ 17164 if ((SD_IS_BUFIO(xp) == FALSE) && 17165 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17166 request_blkno = err_blkno; 17167 } 17168 } else { 17169 /* 17170 * Without the es_valid bit set (for fixed format) or an 17171 * information descriptor (for descriptor format) we cannot 17172 * be certain of the error blkno, so just use the 17173 * request_blkno. 17174 */ 17175 err_blkno = (diskaddr_t)request_blkno; 17176 } 17177 17178 /* 17179 * The following will log the buffer contents for the release driver 17180 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17181 * level is set to verbose. 17182 */ 17183 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17184 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17185 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17186 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17187 17188 if (pfa_flag == FALSE) { 17189 /* This is normally only set for USCSI */ 17190 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17191 return; 17192 } 17193 17194 if ((SD_IS_BUFIO(xp) == TRUE) && 17195 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17196 (severity < sd_error_level))) { 17197 return; 17198 } 17199 } 17200 /* 17201 * Check for Sonoma Failover and keep a count of how many failed I/O's 17202 */ 17203 if ((SD_IS_LSI(un)) && 17204 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17205 (scsi_sense_asc(sensep) == 0x94) && 17206 (scsi_sense_ascq(sensep) == 0x01)) { 17207 un->un_sonoma_failure_count++; 17208 if (un->un_sonoma_failure_count > 1) { 17209 return; 17210 } 17211 } 17212 17213 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17214 request_blkno, err_blkno, scsi_cmds, 17215 (struct scsi_extended_sense *)sensep, 17216 un->un_additional_codes, NULL); 17217 } 17218 17219 /* 17220 * Function: sd_sense_key_no_sense 17221 * 17222 * Description: Recovery action when sense data was not received. 17223 * 17224 * Context: May be called from interrupt context 17225 */ 17226 17227 static void 17228 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17229 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17230 { 17231 struct sd_sense_info si; 17232 17233 ASSERT(un != NULL); 17234 ASSERT(mutex_owned(SD_MUTEX(un))); 17235 ASSERT(bp != NULL); 17236 ASSERT(xp != NULL); 17237 ASSERT(pktp != NULL); 17238 17239 si.ssi_severity = SCSI_ERR_FATAL; 17240 si.ssi_pfa_flag = FALSE; 17241 17242 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17243 17244 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17245 &si, EIO, (clock_t)0, NULL); 17246 } 17247 17248 17249 /* 17250 * Function: sd_sense_key_recoverable_error 17251 * 17252 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17253 * 17254 * Context: May be called from interrupt context 17255 */ 17256 17257 static void 17258 sd_sense_key_recoverable_error(struct sd_lun *un, 17259 uint8_t *sense_datap, 17260 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17261 { 17262 struct sd_sense_info si; 17263 uint8_t asc = scsi_sense_asc(sense_datap); 17264 17265 ASSERT(un != NULL); 17266 ASSERT(mutex_owned(SD_MUTEX(un))); 17267 ASSERT(bp != NULL); 17268 ASSERT(xp != NULL); 17269 ASSERT(pktp != NULL); 17270 17271 /* 17272 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17273 */ 17274 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17275 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17276 si.ssi_severity = SCSI_ERR_INFO; 17277 si.ssi_pfa_flag = TRUE; 17278 } else { 17279 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17280 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17281 si.ssi_severity = SCSI_ERR_RECOVERED; 17282 si.ssi_pfa_flag = FALSE; 17283 } 17284 17285 if (pktp->pkt_resid == 0) { 17286 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17287 sd_return_command(un, bp); 17288 return; 17289 } 17290 17291 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17292 &si, EIO, (clock_t)0, NULL); 17293 } 17294 17295 17296 17297 17298 /* 17299 * Function: sd_sense_key_not_ready 17300 * 17301 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17302 * 17303 * Context: May be called from interrupt context 17304 */ 17305 17306 static void 17307 sd_sense_key_not_ready(struct sd_lun *un, 17308 uint8_t *sense_datap, 17309 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17310 { 17311 struct sd_sense_info si; 17312 uint8_t asc = scsi_sense_asc(sense_datap); 17313 uint8_t ascq = scsi_sense_ascq(sense_datap); 17314 17315 ASSERT(un != NULL); 17316 ASSERT(mutex_owned(SD_MUTEX(un))); 17317 ASSERT(bp != NULL); 17318 ASSERT(xp != NULL); 17319 ASSERT(pktp != NULL); 17320 17321 si.ssi_severity = SCSI_ERR_FATAL; 17322 si.ssi_pfa_flag = FALSE; 17323 17324 /* 17325 * Update error stats after first NOT READY error. Disks may have 17326 * been powered down and may need to be restarted. For CDROMs, 17327 * report NOT READY errors only if media is present. 17328 */ 17329 if ((ISCD(un) && (asc == 0x3A)) || 17330 (xp->xb_nr_retry_count > 0)) { 17331 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17332 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17333 } 17334 17335 /* 17336 * Just fail if the "not ready" retry limit has been reached. 17337 */ 17338 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17339 /* Special check for error message printing for removables. */ 17340 if (un->un_f_has_removable_media && (asc == 0x04) && 17341 (ascq >= 0x04)) { 17342 si.ssi_severity = SCSI_ERR_ALL; 17343 } 17344 goto fail_command; 17345 } 17346 17347 /* 17348 * Check the ASC and ASCQ in the sense data as needed, to determine 17349 * what to do. 17350 */ 17351 switch (asc) { 17352 case 0x04: /* LOGICAL UNIT NOT READY */ 17353 /* 17354 * disk drives that don't spin up result in a very long delay 17355 * in format without warning messages. We will log a message 17356 * if the error level is set to verbose. 17357 */ 17358 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17359 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17360 "logical unit not ready, resetting disk\n"); 17361 } 17362 17363 /* 17364 * There are different requirements for CDROMs and disks for 17365 * the number of retries. If a CD-ROM is giving this, it is 17366 * probably reading TOC and is in the process of getting 17367 * ready, so we should keep on trying for a long time to make 17368 * sure that all types of media are taken in account (for 17369 * some media the drive takes a long time to read TOC). For 17370 * disks we do not want to retry this too many times as this 17371 * can cause a long hang in format when the drive refuses to 17372 * spin up (a very common failure). 17373 */ 17374 switch (ascq) { 17375 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17376 /* 17377 * Disk drives frequently refuse to spin up which 17378 * results in a very long hang in format without 17379 * warning messages. 17380 * 17381 * Note: This code preserves the legacy behavior of 17382 * comparing xb_nr_retry_count against zero for fibre 17383 * channel targets instead of comparing against the 17384 * un_reset_retry_count value. The reason for this 17385 * discrepancy has been so utterly lost beneath the 17386 * Sands of Time that even Indiana Jones could not 17387 * find it. 17388 */ 17389 if (un->un_f_is_fibre == TRUE) { 17390 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17391 (xp->xb_nr_retry_count > 0)) && 17392 (un->un_startstop_timeid == NULL)) { 17393 scsi_log(SD_DEVINFO(un), sd_label, 17394 CE_WARN, "logical unit not ready, " 17395 "resetting disk\n"); 17396 sd_reset_target(un, pktp); 17397 } 17398 } else { 17399 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17400 (xp->xb_nr_retry_count > 17401 un->un_reset_retry_count)) && 17402 (un->un_startstop_timeid == NULL)) { 17403 scsi_log(SD_DEVINFO(un), sd_label, 17404 CE_WARN, "logical unit not ready, " 17405 "resetting disk\n"); 17406 sd_reset_target(un, pktp); 17407 } 17408 } 17409 break; 17410 17411 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17412 /* 17413 * If the target is in the process of becoming 17414 * ready, just proceed with the retry. This can 17415 * happen with CD-ROMs that take a long time to 17416 * read TOC after a power cycle or reset. 17417 */ 17418 goto do_retry; 17419 17420 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17421 break; 17422 17423 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17424 /* 17425 * Retries cannot help here so just fail right away. 17426 */ 17427 goto fail_command; 17428 17429 case 0x88: 17430 /* 17431 * Vendor-unique code for T3/T4: it indicates a 17432 * path problem in a mutipathed config, but as far as 17433 * the target driver is concerned it equates to a fatal 17434 * error, so we should just fail the command right away 17435 * (without printing anything to the console). If this 17436 * is not a T3/T4, fall thru to the default recovery 17437 * action. 17438 * T3/T4 is FC only, don't need to check is_fibre 17439 */ 17440 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17441 sd_return_failed_command(un, bp, EIO); 17442 return; 17443 } 17444 /* FALLTHRU */ 17445 17446 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17447 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17448 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17449 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17450 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17451 default: /* Possible future codes in SCSI spec? */ 17452 /* 17453 * For removable-media devices, do not retry if 17454 * ASCQ > 2 as these result mostly from USCSI commands 17455 * on MMC devices issued to check status of an 17456 * operation initiated in immediate mode. Also for 17457 * ASCQ >= 4 do not print console messages as these 17458 * mainly represent a user-initiated operation 17459 * instead of a system failure. 17460 */ 17461 if (un->un_f_has_removable_media) { 17462 si.ssi_severity = SCSI_ERR_ALL; 17463 goto fail_command; 17464 } 17465 break; 17466 } 17467 17468 /* 17469 * As part of our recovery attempt for the NOT READY 17470 * condition, we issue a START STOP UNIT command. However 17471 * we want to wait for a short delay before attempting this 17472 * as there may still be more commands coming back from the 17473 * target with the check condition. To do this we use 17474 * timeout(9F) to call sd_start_stop_unit_callback() after 17475 * the delay interval expires. (sd_start_stop_unit_callback() 17476 * dispatches sd_start_stop_unit_task(), which will issue 17477 * the actual START STOP UNIT command. The delay interval 17478 * is one-half of the delay that we will use to retry the 17479 * command that generated the NOT READY condition. 17480 * 17481 * Note that we could just dispatch sd_start_stop_unit_task() 17482 * from here and allow it to sleep for the delay interval, 17483 * but then we would be tying up the taskq thread 17484 * uncesessarily for the duration of the delay. 17485 * 17486 * Do not issue the START STOP UNIT if the current command 17487 * is already a START STOP UNIT. 17488 */ 17489 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17490 break; 17491 } 17492 17493 /* 17494 * Do not schedule the timeout if one is already pending. 17495 */ 17496 if (un->un_startstop_timeid != NULL) { 17497 SD_INFO(SD_LOG_ERROR, un, 17498 "sd_sense_key_not_ready: restart already issued to" 17499 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17500 ddi_get_instance(SD_DEVINFO(un))); 17501 break; 17502 } 17503 17504 /* 17505 * Schedule the START STOP UNIT command, then queue the command 17506 * for a retry. 17507 * 17508 * Note: A timeout is not scheduled for this retry because we 17509 * want the retry to be serial with the START_STOP_UNIT. The 17510 * retry will be started when the START_STOP_UNIT is completed 17511 * in sd_start_stop_unit_task. 17512 */ 17513 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17514 un, un->un_busy_timeout / 2); 17515 xp->xb_nr_retry_count++; 17516 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17517 return; 17518 17519 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17520 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17521 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17522 "unit does not respond to selection\n"); 17523 } 17524 break; 17525 17526 case 0x3A: /* MEDIUM NOT PRESENT */ 17527 if (sd_error_level >= SCSI_ERR_FATAL) { 17528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17529 "Caddy not inserted in drive\n"); 17530 } 17531 17532 sr_ejected(un); 17533 un->un_mediastate = DKIO_EJECTED; 17534 /* The state has changed, inform the media watch routines */ 17535 cv_broadcast(&un->un_state_cv); 17536 /* Just fail if no media is present in the drive. */ 17537 goto fail_command; 17538 17539 default: 17540 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17541 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17542 "Unit not Ready. Additional sense code 0x%x\n", 17543 asc); 17544 } 17545 break; 17546 } 17547 17548 do_retry: 17549 17550 /* 17551 * Retry the command, as some targets may report NOT READY for 17552 * several seconds after being reset. 17553 */ 17554 xp->xb_nr_retry_count++; 17555 si.ssi_severity = SCSI_ERR_RETRYABLE; 17556 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17557 &si, EIO, un->un_busy_timeout, NULL); 17558 17559 return; 17560 17561 fail_command: 17562 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17563 sd_return_failed_command(un, bp, EIO); 17564 } 17565 17566 17567 17568 /* 17569 * Function: sd_sense_key_medium_or_hardware_error 17570 * 17571 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17572 * sense key. 17573 * 17574 * Context: May be called from interrupt context 17575 */ 17576 17577 static void 17578 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17579 uint8_t *sense_datap, 17580 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17581 { 17582 struct sd_sense_info si; 17583 uint8_t sense_key = scsi_sense_key(sense_datap); 17584 uint8_t asc = scsi_sense_asc(sense_datap); 17585 17586 ASSERT(un != NULL); 17587 ASSERT(mutex_owned(SD_MUTEX(un))); 17588 ASSERT(bp != NULL); 17589 ASSERT(xp != NULL); 17590 ASSERT(pktp != NULL); 17591 17592 si.ssi_severity = SCSI_ERR_FATAL; 17593 si.ssi_pfa_flag = FALSE; 17594 17595 if (sense_key == KEY_MEDIUM_ERROR) { 17596 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17597 } 17598 17599 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17600 17601 if ((un->un_reset_retry_count != 0) && 17602 (xp->xb_retry_count == un->un_reset_retry_count)) { 17603 mutex_exit(SD_MUTEX(un)); 17604 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17605 if (un->un_f_allow_bus_device_reset == TRUE) { 17606 17607 boolean_t try_resetting_target = B_TRUE; 17608 17609 /* 17610 * We need to be able to handle specific ASC when we are 17611 * handling a KEY_HARDWARE_ERROR. In particular 17612 * taking the default action of resetting the target may 17613 * not be the appropriate way to attempt recovery. 17614 * Resetting a target because of a single LUN failure 17615 * victimizes all LUNs on that target. 17616 * 17617 * This is true for the LSI arrays, if an LSI 17618 * array controller returns an ASC of 0x84 (LUN Dead) we 17619 * should trust it. 17620 */ 17621 17622 if (sense_key == KEY_HARDWARE_ERROR) { 17623 switch (asc) { 17624 case 0x84: 17625 if (SD_IS_LSI(un)) { 17626 try_resetting_target = B_FALSE; 17627 } 17628 break; 17629 default: 17630 break; 17631 } 17632 } 17633 17634 if (try_resetting_target == B_TRUE) { 17635 int reset_retval = 0; 17636 if (un->un_f_lun_reset_enabled == TRUE) { 17637 SD_TRACE(SD_LOG_IO_CORE, un, 17638 "sd_sense_key_medium_or_hardware_" 17639 "error: issuing RESET_LUN\n"); 17640 reset_retval = 17641 scsi_reset(SD_ADDRESS(un), 17642 RESET_LUN); 17643 } 17644 if (reset_retval == 0) { 17645 SD_TRACE(SD_LOG_IO_CORE, un, 17646 "sd_sense_key_medium_or_hardware_" 17647 "error: issuing RESET_TARGET\n"); 17648 (void) scsi_reset(SD_ADDRESS(un), 17649 RESET_TARGET); 17650 } 17651 } 17652 } 17653 mutex_enter(SD_MUTEX(un)); 17654 } 17655 17656 /* 17657 * This really ought to be a fatal error, but we will retry anyway 17658 * as some drives report this as a spurious error. 17659 */ 17660 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17661 &si, EIO, (clock_t)0, NULL); 17662 } 17663 17664 17665 17666 /* 17667 * Function: sd_sense_key_illegal_request 17668 * 17669 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17670 * 17671 * Context: May be called from interrupt context 17672 */ 17673 17674 static void 17675 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17676 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17677 { 17678 struct sd_sense_info si; 17679 17680 ASSERT(un != NULL); 17681 ASSERT(mutex_owned(SD_MUTEX(un))); 17682 ASSERT(bp != NULL); 17683 ASSERT(xp != NULL); 17684 ASSERT(pktp != NULL); 17685 17686 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17687 17688 si.ssi_severity = SCSI_ERR_INFO; 17689 si.ssi_pfa_flag = FALSE; 17690 17691 /* Pointless to retry if the target thinks it's an illegal request */ 17692 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17693 sd_return_failed_command(un, bp, EIO); 17694 } 17695 17696 17697 17698 17699 /* 17700 * Function: sd_sense_key_unit_attention 17701 * 17702 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17703 * 17704 * Context: May be called from interrupt context 17705 */ 17706 17707 static void 17708 sd_sense_key_unit_attention(struct sd_lun *un, 17709 uint8_t *sense_datap, 17710 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17711 { 17712 /* 17713 * For UNIT ATTENTION we allow retries for one minute. Devices 17714 * like Sonoma can return UNIT ATTENTION close to a minute 17715 * under certain conditions. 17716 */ 17717 int retry_check_flag = SD_RETRIES_UA; 17718 boolean_t kstat_updated = B_FALSE; 17719 struct sd_sense_info si; 17720 uint8_t asc = scsi_sense_asc(sense_datap); 17721 uint8_t ascq = scsi_sense_ascq(sense_datap); 17722 17723 ASSERT(un != NULL); 17724 ASSERT(mutex_owned(SD_MUTEX(un))); 17725 ASSERT(bp != NULL); 17726 ASSERT(xp != NULL); 17727 ASSERT(pktp != NULL); 17728 17729 si.ssi_severity = SCSI_ERR_INFO; 17730 si.ssi_pfa_flag = FALSE; 17731 17732 17733 switch (asc) { 17734 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17735 if (sd_report_pfa != 0) { 17736 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17737 si.ssi_pfa_flag = TRUE; 17738 retry_check_flag = SD_RETRIES_STANDARD; 17739 goto do_retry; 17740 } 17741 17742 break; 17743 17744 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17745 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17746 un->un_resvd_status |= 17747 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17748 } 17749 #ifdef _LP64 17750 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17751 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17752 un, KM_NOSLEEP) == 0) { 17753 /* 17754 * If we can't dispatch the task we'll just 17755 * live without descriptor sense. We can 17756 * try again on the next "unit attention" 17757 */ 17758 SD_ERROR(SD_LOG_ERROR, un, 17759 "sd_sense_key_unit_attention: " 17760 "Could not dispatch " 17761 "sd_reenable_dsense_task\n"); 17762 } 17763 } 17764 #endif /* _LP64 */ 17765 /* FALLTHRU */ 17766 17767 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17768 if (!un->un_f_has_removable_media) { 17769 break; 17770 } 17771 17772 /* 17773 * When we get a unit attention from a removable-media device, 17774 * it may be in a state that will take a long time to recover 17775 * (e.g., from a reset). Since we are executing in interrupt 17776 * context here, we cannot wait around for the device to come 17777 * back. So hand this command off to sd_media_change_task() 17778 * for deferred processing under taskq thread context. (Note 17779 * that the command still may be failed if a problem is 17780 * encountered at a later time.) 17781 */ 17782 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17783 KM_NOSLEEP) == 0) { 17784 /* 17785 * Cannot dispatch the request so fail the command. 17786 */ 17787 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17788 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17789 si.ssi_severity = SCSI_ERR_FATAL; 17790 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17791 sd_return_failed_command(un, bp, EIO); 17792 } 17793 17794 /* 17795 * If failed to dispatch sd_media_change_task(), we already 17796 * updated kstat. If succeed to dispatch sd_media_change_task(), 17797 * we should update kstat later if it encounters an error. So, 17798 * we update kstat_updated flag here. 17799 */ 17800 kstat_updated = B_TRUE; 17801 17802 /* 17803 * Either the command has been successfully dispatched to a 17804 * task Q for retrying, or the dispatch failed. In either case 17805 * do NOT retry again by calling sd_retry_command. This sets up 17806 * two retries of the same command and when one completes and 17807 * frees the resources the other will access freed memory, 17808 * a bad thing. 17809 */ 17810 return; 17811 17812 default: 17813 break; 17814 } 17815 17816 /* 17817 * ASC ASCQ 17818 * 2A 09 Capacity data has changed 17819 * 2A 01 Mode parameters changed 17820 * 3F 0E Reported luns data has changed 17821 * Arrays that support logical unit expansion should report 17822 * capacity changes(2Ah/09). Mode parameters changed and 17823 * reported luns data has changed are the approximation. 17824 */ 17825 if (((asc == 0x2a) && (ascq == 0x09)) || 17826 ((asc == 0x2a) && (ascq == 0x01)) || 17827 ((asc == 0x3f) && (ascq == 0x0e))) { 17828 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 17829 KM_NOSLEEP) == 0) { 17830 SD_ERROR(SD_LOG_ERROR, un, 17831 "sd_sense_key_unit_attention: " 17832 "Could not dispatch sd_target_change_task\n"); 17833 } 17834 } 17835 17836 /* 17837 * Update kstat if we haven't done that. 17838 */ 17839 if (!kstat_updated) { 17840 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17841 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17842 } 17843 17844 do_retry: 17845 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17846 EIO, SD_UA_RETRY_DELAY, NULL); 17847 } 17848 17849 17850 17851 /* 17852 * Function: sd_sense_key_fail_command 17853 * 17854 * Description: Use to fail a command when we don't like the sense key that 17855 * was returned. 17856 * 17857 * Context: May be called from interrupt context 17858 */ 17859 17860 static void 17861 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17862 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17863 { 17864 struct sd_sense_info si; 17865 17866 ASSERT(un != NULL); 17867 ASSERT(mutex_owned(SD_MUTEX(un))); 17868 ASSERT(bp != NULL); 17869 ASSERT(xp != NULL); 17870 ASSERT(pktp != NULL); 17871 17872 si.ssi_severity = SCSI_ERR_FATAL; 17873 si.ssi_pfa_flag = FALSE; 17874 17875 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17876 sd_return_failed_command(un, bp, EIO); 17877 } 17878 17879 17880 17881 /* 17882 * Function: sd_sense_key_blank_check 17883 * 17884 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17885 * Has no monetary connotation. 17886 * 17887 * Context: May be called from interrupt context 17888 */ 17889 17890 static void 17891 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17892 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17893 { 17894 struct sd_sense_info si; 17895 17896 ASSERT(un != NULL); 17897 ASSERT(mutex_owned(SD_MUTEX(un))); 17898 ASSERT(bp != NULL); 17899 ASSERT(xp != NULL); 17900 ASSERT(pktp != NULL); 17901 17902 /* 17903 * Blank check is not fatal for removable devices, therefore 17904 * it does not require a console message. 17905 */ 17906 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 17907 SCSI_ERR_FATAL; 17908 si.ssi_pfa_flag = FALSE; 17909 17910 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17911 sd_return_failed_command(un, bp, EIO); 17912 } 17913 17914 17915 17916 17917 /* 17918 * Function: sd_sense_key_aborted_command 17919 * 17920 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17921 * 17922 * Context: May be called from interrupt context 17923 */ 17924 17925 static void 17926 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17927 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17928 { 17929 struct sd_sense_info si; 17930 17931 ASSERT(un != NULL); 17932 ASSERT(mutex_owned(SD_MUTEX(un))); 17933 ASSERT(bp != NULL); 17934 ASSERT(xp != NULL); 17935 ASSERT(pktp != NULL); 17936 17937 si.ssi_severity = SCSI_ERR_FATAL; 17938 si.ssi_pfa_flag = FALSE; 17939 17940 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17941 17942 /* 17943 * This really ought to be a fatal error, but we will retry anyway 17944 * as some drives report this as a spurious error. 17945 */ 17946 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17947 &si, EIO, drv_usectohz(100000), NULL); 17948 } 17949 17950 17951 17952 /* 17953 * Function: sd_sense_key_default 17954 * 17955 * Description: Default recovery action for several SCSI sense keys (basically 17956 * attempts a retry). 17957 * 17958 * Context: May be called from interrupt context 17959 */ 17960 17961 static void 17962 sd_sense_key_default(struct sd_lun *un, 17963 uint8_t *sense_datap, 17964 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17965 { 17966 struct sd_sense_info si; 17967 uint8_t sense_key = scsi_sense_key(sense_datap); 17968 17969 ASSERT(un != NULL); 17970 ASSERT(mutex_owned(SD_MUTEX(un))); 17971 ASSERT(bp != NULL); 17972 ASSERT(xp != NULL); 17973 ASSERT(pktp != NULL); 17974 17975 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17976 17977 /* 17978 * Undecoded sense key. Attempt retries and hope that will fix 17979 * the problem. Otherwise, we're dead. 17980 */ 17981 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17982 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17983 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17984 } 17985 17986 si.ssi_severity = SCSI_ERR_FATAL; 17987 si.ssi_pfa_flag = FALSE; 17988 17989 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17990 &si, EIO, (clock_t)0, NULL); 17991 } 17992 17993 17994 17995 /* 17996 * Function: sd_print_retry_msg 17997 * 17998 * Description: Print a message indicating the retry action being taken. 17999 * 18000 * Arguments: un - ptr to associated softstate 18001 * bp - ptr to buf(9S) for the command 18002 * arg - not used. 18003 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18004 * or SD_NO_RETRY_ISSUED 18005 * 18006 * Context: May be called from interrupt context 18007 */ 18008 /* ARGSUSED */ 18009 static void 18010 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18011 { 18012 struct sd_xbuf *xp; 18013 struct scsi_pkt *pktp; 18014 char *reasonp; 18015 char *msgp; 18016 18017 ASSERT(un != NULL); 18018 ASSERT(mutex_owned(SD_MUTEX(un))); 18019 ASSERT(bp != NULL); 18020 pktp = SD_GET_PKTP(bp); 18021 ASSERT(pktp != NULL); 18022 xp = SD_GET_XBUF(bp); 18023 ASSERT(xp != NULL); 18024 18025 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18026 mutex_enter(&un->un_pm_mutex); 18027 if ((un->un_state == SD_STATE_SUSPENDED) || 18028 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18029 (pktp->pkt_flags & FLAG_SILENT)) { 18030 mutex_exit(&un->un_pm_mutex); 18031 goto update_pkt_reason; 18032 } 18033 mutex_exit(&un->un_pm_mutex); 18034 18035 /* 18036 * Suppress messages if they are all the same pkt_reason; with 18037 * TQ, many (up to 256) are returned with the same pkt_reason. 18038 * If we are in panic, then suppress the retry messages. 18039 */ 18040 switch (flag) { 18041 case SD_NO_RETRY_ISSUED: 18042 msgp = "giving up"; 18043 break; 18044 case SD_IMMEDIATE_RETRY_ISSUED: 18045 case SD_DELAYED_RETRY_ISSUED: 18046 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18047 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18048 (sd_error_level != SCSI_ERR_ALL))) { 18049 return; 18050 } 18051 msgp = "retrying command"; 18052 break; 18053 default: 18054 goto update_pkt_reason; 18055 } 18056 18057 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18058 scsi_rname(pktp->pkt_reason)); 18059 18060 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18061 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18062 18063 update_pkt_reason: 18064 /* 18065 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18066 * This is to prevent multiple console messages for the same failure 18067 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18068 * when the command is retried successfully because there still may be 18069 * more commands coming back with the same value of pktp->pkt_reason. 18070 */ 18071 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18072 un->un_last_pkt_reason = pktp->pkt_reason; 18073 } 18074 } 18075 18076 18077 /* 18078 * Function: sd_print_cmd_incomplete_msg 18079 * 18080 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18081 * 18082 * Arguments: un - ptr to associated softstate 18083 * bp - ptr to buf(9S) for the command 18084 * arg - passed to sd_print_retry_msg() 18085 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18086 * or SD_NO_RETRY_ISSUED 18087 * 18088 * Context: May be called from interrupt context 18089 */ 18090 18091 static void 18092 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18093 int code) 18094 { 18095 dev_info_t *dip; 18096 18097 ASSERT(un != NULL); 18098 ASSERT(mutex_owned(SD_MUTEX(un))); 18099 ASSERT(bp != NULL); 18100 18101 switch (code) { 18102 case SD_NO_RETRY_ISSUED: 18103 /* Command was failed. Someone turned off this target? */ 18104 if (un->un_state != SD_STATE_OFFLINE) { 18105 /* 18106 * Suppress message if we are detaching and 18107 * device has been disconnected 18108 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18109 * private interface and not part of the DDI 18110 */ 18111 dip = un->un_sd->sd_dev; 18112 if (!(DEVI_IS_DETACHING(dip) && 18113 DEVI_IS_DEVICE_REMOVED(dip))) { 18114 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18115 "disk not responding to selection\n"); 18116 } 18117 New_state(un, SD_STATE_OFFLINE); 18118 } 18119 break; 18120 18121 case SD_DELAYED_RETRY_ISSUED: 18122 case SD_IMMEDIATE_RETRY_ISSUED: 18123 default: 18124 /* Command was successfully queued for retry */ 18125 sd_print_retry_msg(un, bp, arg, code); 18126 break; 18127 } 18128 } 18129 18130 18131 /* 18132 * Function: sd_pkt_reason_cmd_incomplete 18133 * 18134 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18135 * 18136 * Context: May be called from interrupt context 18137 */ 18138 18139 static void 18140 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18141 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18142 { 18143 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18144 18145 ASSERT(un != NULL); 18146 ASSERT(mutex_owned(SD_MUTEX(un))); 18147 ASSERT(bp != NULL); 18148 ASSERT(xp != NULL); 18149 ASSERT(pktp != NULL); 18150 18151 /* Do not do a reset if selection did not complete */ 18152 /* Note: Should this not just check the bit? */ 18153 if (pktp->pkt_state != STATE_GOT_BUS) { 18154 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18155 sd_reset_target(un, pktp); 18156 } 18157 18158 /* 18159 * If the target was not successfully selected, then set 18160 * SD_RETRIES_FAILFAST to indicate that we lost communication 18161 * with the target, and further retries and/or commands are 18162 * likely to take a long time. 18163 */ 18164 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18165 flag |= SD_RETRIES_FAILFAST; 18166 } 18167 18168 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18169 18170 sd_retry_command(un, bp, flag, 18171 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18172 } 18173 18174 18175 18176 /* 18177 * Function: sd_pkt_reason_cmd_tran_err 18178 * 18179 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18180 * 18181 * Context: May be called from interrupt context 18182 */ 18183 18184 static void 18185 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18186 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18187 { 18188 ASSERT(un != NULL); 18189 ASSERT(mutex_owned(SD_MUTEX(un))); 18190 ASSERT(bp != NULL); 18191 ASSERT(xp != NULL); 18192 ASSERT(pktp != NULL); 18193 18194 /* 18195 * Do not reset if we got a parity error, or if 18196 * selection did not complete. 18197 */ 18198 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18199 /* Note: Should this not just check the bit for pkt_state? */ 18200 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18201 (pktp->pkt_state != STATE_GOT_BUS)) { 18202 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18203 sd_reset_target(un, pktp); 18204 } 18205 18206 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18207 18208 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18209 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18210 } 18211 18212 18213 18214 /* 18215 * Function: sd_pkt_reason_cmd_reset 18216 * 18217 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18218 * 18219 * Context: May be called from interrupt context 18220 */ 18221 18222 static void 18223 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18224 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18225 { 18226 ASSERT(un != NULL); 18227 ASSERT(mutex_owned(SD_MUTEX(un))); 18228 ASSERT(bp != NULL); 18229 ASSERT(xp != NULL); 18230 ASSERT(pktp != NULL); 18231 18232 /* The target may still be running the command, so try to reset. */ 18233 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18234 sd_reset_target(un, pktp); 18235 18236 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18237 18238 /* 18239 * If pkt_reason is CMD_RESET chances are that this pkt got 18240 * reset because another target on this bus caused it. The target 18241 * that caused it should get CMD_TIMEOUT with pkt_statistics 18242 * of STAT_TIMEOUT/STAT_DEV_RESET. 18243 */ 18244 18245 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18246 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18247 } 18248 18249 18250 18251 18252 /* 18253 * Function: sd_pkt_reason_cmd_aborted 18254 * 18255 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18256 * 18257 * Context: May be called from interrupt context 18258 */ 18259 18260 static void 18261 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18262 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18263 { 18264 ASSERT(un != NULL); 18265 ASSERT(mutex_owned(SD_MUTEX(un))); 18266 ASSERT(bp != NULL); 18267 ASSERT(xp != NULL); 18268 ASSERT(pktp != NULL); 18269 18270 /* The target may still be running the command, so try to reset. */ 18271 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18272 sd_reset_target(un, pktp); 18273 18274 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18275 18276 /* 18277 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18278 * aborted because another target on this bus caused it. The target 18279 * that caused it should get CMD_TIMEOUT with pkt_statistics 18280 * of STAT_TIMEOUT/STAT_DEV_RESET. 18281 */ 18282 18283 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18284 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18285 } 18286 18287 18288 18289 /* 18290 * Function: sd_pkt_reason_cmd_timeout 18291 * 18292 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18293 * 18294 * Context: May be called from interrupt context 18295 */ 18296 18297 static void 18298 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18299 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18300 { 18301 ASSERT(un != NULL); 18302 ASSERT(mutex_owned(SD_MUTEX(un))); 18303 ASSERT(bp != NULL); 18304 ASSERT(xp != NULL); 18305 ASSERT(pktp != NULL); 18306 18307 18308 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18309 sd_reset_target(un, pktp); 18310 18311 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18312 18313 /* 18314 * A command timeout indicates that we could not establish 18315 * communication with the target, so set SD_RETRIES_FAILFAST 18316 * as further retries/commands are likely to take a long time. 18317 */ 18318 sd_retry_command(un, bp, 18319 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18320 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18321 } 18322 18323 18324 18325 /* 18326 * Function: sd_pkt_reason_cmd_unx_bus_free 18327 * 18328 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18329 * 18330 * Context: May be called from interrupt context 18331 */ 18332 18333 static void 18334 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18335 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18336 { 18337 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18338 18339 ASSERT(un != NULL); 18340 ASSERT(mutex_owned(SD_MUTEX(un))); 18341 ASSERT(bp != NULL); 18342 ASSERT(xp != NULL); 18343 ASSERT(pktp != NULL); 18344 18345 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18346 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18347 18348 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18349 sd_print_retry_msg : NULL; 18350 18351 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18352 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18353 } 18354 18355 18356 /* 18357 * Function: sd_pkt_reason_cmd_tag_reject 18358 * 18359 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18360 * 18361 * Context: May be called from interrupt context 18362 */ 18363 18364 static void 18365 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18366 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18367 { 18368 ASSERT(un != NULL); 18369 ASSERT(mutex_owned(SD_MUTEX(un))); 18370 ASSERT(bp != NULL); 18371 ASSERT(xp != NULL); 18372 ASSERT(pktp != NULL); 18373 18374 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18375 pktp->pkt_flags = 0; 18376 un->un_tagflags = 0; 18377 if (un->un_f_opt_queueing == TRUE) { 18378 un->un_throttle = min(un->un_throttle, 3); 18379 } else { 18380 un->un_throttle = 1; 18381 } 18382 mutex_exit(SD_MUTEX(un)); 18383 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18384 mutex_enter(SD_MUTEX(un)); 18385 18386 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18387 18388 /* Legacy behavior not to check retry counts here. */ 18389 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18390 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18391 } 18392 18393 18394 /* 18395 * Function: sd_pkt_reason_default 18396 * 18397 * Description: Default recovery actions for SCSA pkt_reason values that 18398 * do not have more explicit recovery actions. 18399 * 18400 * Context: May be called from interrupt context 18401 */ 18402 18403 static void 18404 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18405 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18406 { 18407 ASSERT(un != NULL); 18408 ASSERT(mutex_owned(SD_MUTEX(un))); 18409 ASSERT(bp != NULL); 18410 ASSERT(xp != NULL); 18411 ASSERT(pktp != NULL); 18412 18413 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18414 sd_reset_target(un, pktp); 18415 18416 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18417 18418 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18419 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18420 } 18421 18422 18423 18424 /* 18425 * Function: sd_pkt_status_check_condition 18426 * 18427 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18428 * 18429 * Context: May be called from interrupt context 18430 */ 18431 18432 static void 18433 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18434 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18435 { 18436 ASSERT(un != NULL); 18437 ASSERT(mutex_owned(SD_MUTEX(un))); 18438 ASSERT(bp != NULL); 18439 ASSERT(xp != NULL); 18440 ASSERT(pktp != NULL); 18441 18442 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18443 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18444 18445 /* 18446 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18447 * command will be retried after the request sense). Otherwise, retry 18448 * the command. Note: we are issuing the request sense even though the 18449 * retry limit may have been reached for the failed command. 18450 */ 18451 if (un->un_f_arq_enabled == FALSE) { 18452 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18453 "no ARQ, sending request sense command\n"); 18454 sd_send_request_sense_command(un, bp, pktp); 18455 } else { 18456 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18457 "ARQ,retrying request sense command\n"); 18458 #if defined(__i386) || defined(__amd64) 18459 /* 18460 * The SD_RETRY_DELAY value need to be adjusted here 18461 * when SD_RETRY_DELAY change in sddef.h 18462 */ 18463 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18464 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18465 NULL); 18466 #else 18467 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18468 EIO, SD_RETRY_DELAY, NULL); 18469 #endif 18470 } 18471 18472 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18473 } 18474 18475 18476 /* 18477 * Function: sd_pkt_status_busy 18478 * 18479 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18480 * 18481 * Context: May be called from interrupt context 18482 */ 18483 18484 static void 18485 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18486 struct scsi_pkt *pktp) 18487 { 18488 ASSERT(un != NULL); 18489 ASSERT(mutex_owned(SD_MUTEX(un))); 18490 ASSERT(bp != NULL); 18491 ASSERT(xp != NULL); 18492 ASSERT(pktp != NULL); 18493 18494 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18495 "sd_pkt_status_busy: entry\n"); 18496 18497 /* If retries are exhausted, just fail the command. */ 18498 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18499 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18500 "device busy too long\n"); 18501 sd_return_failed_command(un, bp, EIO); 18502 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18503 "sd_pkt_status_busy: exit\n"); 18504 return; 18505 } 18506 xp->xb_retry_count++; 18507 18508 /* 18509 * Try to reset the target. However, we do not want to perform 18510 * more than one reset if the device continues to fail. The reset 18511 * will be performed when the retry count reaches the reset 18512 * threshold. This threshold should be set such that at least 18513 * one retry is issued before the reset is performed. 18514 */ 18515 if (xp->xb_retry_count == 18516 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18517 int rval = 0; 18518 mutex_exit(SD_MUTEX(un)); 18519 if (un->un_f_allow_bus_device_reset == TRUE) { 18520 /* 18521 * First try to reset the LUN; if we cannot then 18522 * try to reset the target. 18523 */ 18524 if (un->un_f_lun_reset_enabled == TRUE) { 18525 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18526 "sd_pkt_status_busy: RESET_LUN\n"); 18527 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18528 } 18529 if (rval == 0) { 18530 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18531 "sd_pkt_status_busy: RESET_TARGET\n"); 18532 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18533 } 18534 } 18535 if (rval == 0) { 18536 /* 18537 * If the RESET_LUN and/or RESET_TARGET failed, 18538 * try RESET_ALL 18539 */ 18540 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18541 "sd_pkt_status_busy: RESET_ALL\n"); 18542 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18543 } 18544 mutex_enter(SD_MUTEX(un)); 18545 if (rval == 0) { 18546 /* 18547 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18548 * At this point we give up & fail the command. 18549 */ 18550 sd_return_failed_command(un, bp, EIO); 18551 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18552 "sd_pkt_status_busy: exit (failed cmd)\n"); 18553 return; 18554 } 18555 } 18556 18557 /* 18558 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18559 * we have already checked the retry counts above. 18560 */ 18561 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18562 EIO, un->un_busy_timeout, NULL); 18563 18564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18565 "sd_pkt_status_busy: exit\n"); 18566 } 18567 18568 18569 /* 18570 * Function: sd_pkt_status_reservation_conflict 18571 * 18572 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18573 * command status. 18574 * 18575 * Context: May be called from interrupt context 18576 */ 18577 18578 static void 18579 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18580 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18581 { 18582 ASSERT(un != NULL); 18583 ASSERT(mutex_owned(SD_MUTEX(un))); 18584 ASSERT(bp != NULL); 18585 ASSERT(xp != NULL); 18586 ASSERT(pktp != NULL); 18587 18588 /* 18589 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18590 * conflict could be due to various reasons like incorrect keys, not 18591 * registered or not reserved etc. So, we return EACCES to the caller. 18592 */ 18593 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18594 int cmd = SD_GET_PKT_OPCODE(pktp); 18595 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18596 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18597 sd_return_failed_command(un, bp, EACCES); 18598 return; 18599 } 18600 } 18601 18602 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18603 18604 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18605 if (sd_failfast_enable != 0) { 18606 /* By definition, we must panic here.... */ 18607 sd_panic_for_res_conflict(un); 18608 /*NOTREACHED*/ 18609 } 18610 SD_ERROR(SD_LOG_IO, un, 18611 "sd_handle_resv_conflict: Disk Reserved\n"); 18612 sd_return_failed_command(un, bp, EACCES); 18613 return; 18614 } 18615 18616 /* 18617 * 1147670: retry only if sd_retry_on_reservation_conflict 18618 * property is set (default is 1). Retries will not succeed 18619 * on a disk reserved by another initiator. HA systems 18620 * may reset this via sd.conf to avoid these retries. 18621 * 18622 * Note: The legacy return code for this failure is EIO, however EACCES 18623 * seems more appropriate for a reservation conflict. 18624 */ 18625 if (sd_retry_on_reservation_conflict == 0) { 18626 SD_ERROR(SD_LOG_IO, un, 18627 "sd_handle_resv_conflict: Device Reserved\n"); 18628 sd_return_failed_command(un, bp, EIO); 18629 return; 18630 } 18631 18632 /* 18633 * Retry the command if we can. 18634 * 18635 * Note: The legacy return code for this failure is EIO, however EACCES 18636 * seems more appropriate for a reservation conflict. 18637 */ 18638 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18639 (clock_t)2, NULL); 18640 } 18641 18642 18643 18644 /* 18645 * Function: sd_pkt_status_qfull 18646 * 18647 * Description: Handle a QUEUE FULL condition from the target. This can 18648 * occur if the HBA does not handle the queue full condition. 18649 * (Basically this means third-party HBAs as Sun HBAs will 18650 * handle the queue full condition.) Note that if there are 18651 * some commands already in the transport, then the queue full 18652 * has occurred because the queue for this nexus is actually 18653 * full. If there are no commands in the transport, then the 18654 * queue full is resulting from some other initiator or lun 18655 * consuming all the resources at the target. 18656 * 18657 * Context: May be called from interrupt context 18658 */ 18659 18660 static void 18661 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18662 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18663 { 18664 ASSERT(un != NULL); 18665 ASSERT(mutex_owned(SD_MUTEX(un))); 18666 ASSERT(bp != NULL); 18667 ASSERT(xp != NULL); 18668 ASSERT(pktp != NULL); 18669 18670 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18671 "sd_pkt_status_qfull: entry\n"); 18672 18673 /* 18674 * Just lower the QFULL throttle and retry the command. Note that 18675 * we do not limit the number of retries here. 18676 */ 18677 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18678 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18679 SD_RESTART_TIMEOUT, NULL); 18680 18681 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18682 "sd_pkt_status_qfull: exit\n"); 18683 } 18684 18685 18686 /* 18687 * Function: sd_reset_target 18688 * 18689 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18690 * RESET_TARGET, or RESET_ALL. 18691 * 18692 * Context: May be called under interrupt context. 18693 */ 18694 18695 static void 18696 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18697 { 18698 int rval = 0; 18699 18700 ASSERT(un != NULL); 18701 ASSERT(mutex_owned(SD_MUTEX(un))); 18702 ASSERT(pktp != NULL); 18703 18704 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18705 18706 /* 18707 * No need to reset if the transport layer has already done so. 18708 */ 18709 if ((pktp->pkt_statistics & 18710 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18711 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18712 "sd_reset_target: no reset\n"); 18713 return; 18714 } 18715 18716 mutex_exit(SD_MUTEX(un)); 18717 18718 if (un->un_f_allow_bus_device_reset == TRUE) { 18719 if (un->un_f_lun_reset_enabled == TRUE) { 18720 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18721 "sd_reset_target: RESET_LUN\n"); 18722 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18723 } 18724 if (rval == 0) { 18725 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18726 "sd_reset_target: RESET_TARGET\n"); 18727 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18728 } 18729 } 18730 18731 if (rval == 0) { 18732 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18733 "sd_reset_target: RESET_ALL\n"); 18734 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18735 } 18736 18737 mutex_enter(SD_MUTEX(un)); 18738 18739 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18740 } 18741 18742 /* 18743 * Function: sd_target_change_task 18744 * 18745 * Description: Handle dynamic target change 18746 * 18747 * Context: Executes in a taskq() thread context 18748 */ 18749 static void 18750 sd_target_change_task(void *arg) 18751 { 18752 struct sd_lun *un = arg; 18753 uint64_t capacity; 18754 diskaddr_t label_cap; 18755 uint_t lbasize; 18756 sd_ssc_t *ssc; 18757 18758 ASSERT(un != NULL); 18759 ASSERT(!mutex_owned(SD_MUTEX(un))); 18760 18761 if ((un->un_f_blockcount_is_valid == FALSE) || 18762 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18763 return; 18764 } 18765 18766 ssc = sd_ssc_init(un); 18767 18768 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18769 &lbasize, SD_PATH_DIRECT) != 0) { 18770 SD_ERROR(SD_LOG_ERROR, un, 18771 "sd_target_change_task: fail to read capacity\n"); 18772 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18773 goto task_exit; 18774 } 18775 18776 mutex_enter(SD_MUTEX(un)); 18777 if (capacity <= un->un_blockcount) { 18778 mutex_exit(SD_MUTEX(un)); 18779 goto task_exit; 18780 } 18781 18782 sd_update_block_info(un, lbasize, capacity); 18783 mutex_exit(SD_MUTEX(un)); 18784 18785 /* 18786 * If lun is EFI labeled and lun capacity is greater than the 18787 * capacity contained in the label, log a sys event. 18788 */ 18789 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18790 (void*)SD_PATH_DIRECT) == 0) { 18791 mutex_enter(SD_MUTEX(un)); 18792 if (un->un_f_blockcount_is_valid && 18793 un->un_blockcount > label_cap) { 18794 mutex_exit(SD_MUTEX(un)); 18795 sd_log_lun_expansion_event(un, KM_SLEEP); 18796 } else { 18797 mutex_exit(SD_MUTEX(un)); 18798 } 18799 } 18800 18801 task_exit: 18802 sd_ssc_fini(ssc); 18803 } 18804 18805 /* 18806 * Function: sd_log_lun_expansion_event 18807 * 18808 * Description: Log lun expansion sys event 18809 * 18810 * Context: Never called from interrupt context 18811 */ 18812 static void 18813 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18814 { 18815 int err; 18816 char *path; 18817 nvlist_t *dle_attr_list; 18818 18819 /* Allocate and build sysevent attribute list */ 18820 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 18821 if (err != 0) { 18822 SD_ERROR(SD_LOG_ERROR, un, 18823 "sd_log_lun_expansion_event: fail to allocate space\n"); 18824 return; 18825 } 18826 18827 path = kmem_alloc(MAXPATHLEN, km_flag); 18828 if (path == NULL) { 18829 nvlist_free(dle_attr_list); 18830 SD_ERROR(SD_LOG_ERROR, un, 18831 "sd_log_lun_expansion_event: fail to allocate space\n"); 18832 return; 18833 } 18834 /* 18835 * Add path attribute to identify the lun. 18836 * We are using minor node 'a' as the sysevent attribute. 18837 */ 18838 (void) snprintf(path, MAXPATHLEN, "/devices"); 18839 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 18840 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 18841 ":a"); 18842 18843 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 18844 if (err != 0) { 18845 nvlist_free(dle_attr_list); 18846 kmem_free(path, MAXPATHLEN); 18847 SD_ERROR(SD_LOG_ERROR, un, 18848 "sd_log_lun_expansion_event: fail to add attribute\n"); 18849 return; 18850 } 18851 18852 /* Log dynamic lun expansion sysevent */ 18853 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 18854 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 18855 if (err != DDI_SUCCESS) { 18856 SD_ERROR(SD_LOG_ERROR, un, 18857 "sd_log_lun_expansion_event: fail to log sysevent\n"); 18858 } 18859 18860 nvlist_free(dle_attr_list); 18861 kmem_free(path, MAXPATHLEN); 18862 } 18863 18864 /* 18865 * Function: sd_media_change_task 18866 * 18867 * Description: Recovery action for CDROM to become available. 18868 * 18869 * Context: Executes in a taskq() thread context 18870 */ 18871 18872 static void 18873 sd_media_change_task(void *arg) 18874 { 18875 struct scsi_pkt *pktp = arg; 18876 struct sd_lun *un; 18877 struct buf *bp; 18878 struct sd_xbuf *xp; 18879 int err = 0; 18880 int retry_count = 0; 18881 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18882 struct sd_sense_info si; 18883 18884 ASSERT(pktp != NULL); 18885 bp = (struct buf *)pktp->pkt_private; 18886 ASSERT(bp != NULL); 18887 xp = SD_GET_XBUF(bp); 18888 ASSERT(xp != NULL); 18889 un = SD_GET_UN(bp); 18890 ASSERT(un != NULL); 18891 ASSERT(!mutex_owned(SD_MUTEX(un))); 18892 ASSERT(un->un_f_monitor_media_state); 18893 18894 si.ssi_severity = SCSI_ERR_INFO; 18895 si.ssi_pfa_flag = FALSE; 18896 18897 /* 18898 * When a reset is issued on a CDROM, it takes a long time to 18899 * recover. First few attempts to read capacity and other things 18900 * related to handling unit attention fail (with a ASC 0x4 and 18901 * ASCQ 0x1). In that case we want to do enough retries and we want 18902 * to limit the retries in other cases of genuine failures like 18903 * no media in drive. 18904 */ 18905 while (retry_count++ < retry_limit) { 18906 if ((err = sd_handle_mchange(un)) == 0) { 18907 break; 18908 } 18909 if (err == EAGAIN) { 18910 retry_limit = SD_UNIT_ATTENTION_RETRY; 18911 } 18912 /* Sleep for 0.5 sec. & try again */ 18913 delay(drv_usectohz(500000)); 18914 } 18915 18916 /* 18917 * Dispatch (retry or fail) the original command here, 18918 * along with appropriate console messages.... 18919 * 18920 * Must grab the mutex before calling sd_retry_command, 18921 * sd_print_sense_msg and sd_return_failed_command. 18922 */ 18923 mutex_enter(SD_MUTEX(un)); 18924 if (err != SD_CMD_SUCCESS) { 18925 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18926 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18927 si.ssi_severity = SCSI_ERR_FATAL; 18928 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18929 sd_return_failed_command(un, bp, EIO); 18930 } else { 18931 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18932 &si, EIO, (clock_t)0, NULL); 18933 } 18934 mutex_exit(SD_MUTEX(un)); 18935 } 18936 18937 18938 18939 /* 18940 * Function: sd_handle_mchange 18941 * 18942 * Description: Perform geometry validation & other recovery when CDROM 18943 * has been removed from drive. 18944 * 18945 * Return Code: 0 for success 18946 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18947 * sd_send_scsi_READ_CAPACITY() 18948 * 18949 * Context: Executes in a taskq() thread context 18950 */ 18951 18952 static int 18953 sd_handle_mchange(struct sd_lun *un) 18954 { 18955 uint64_t capacity; 18956 uint32_t lbasize; 18957 int rval; 18958 sd_ssc_t *ssc; 18959 18960 ASSERT(!mutex_owned(SD_MUTEX(un))); 18961 ASSERT(un->un_f_monitor_media_state); 18962 18963 ssc = sd_ssc_init(un); 18964 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 18965 SD_PATH_DIRECT_PRIORITY); 18966 18967 if (rval != 0) 18968 goto failed; 18969 18970 mutex_enter(SD_MUTEX(un)); 18971 sd_update_block_info(un, lbasize, capacity); 18972 18973 if (un->un_errstats != NULL) { 18974 struct sd_errstats *stp = 18975 (struct sd_errstats *)un->un_errstats->ks_data; 18976 stp->sd_capacity.value.ui64 = (uint64_t) 18977 ((uint64_t)un->un_blockcount * 18978 (uint64_t)un->un_tgt_blocksize); 18979 } 18980 18981 /* 18982 * Check if the media in the device is writable or not 18983 */ 18984 if (ISCD(un)) { 18985 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 18986 } 18987 18988 /* 18989 * Note: Maybe let the strategy/partitioning chain worry about getting 18990 * valid geometry. 18991 */ 18992 mutex_exit(SD_MUTEX(un)); 18993 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 18994 18995 18996 if (cmlb_validate(un->un_cmlbhandle, 0, 18997 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 18998 sd_ssc_fini(ssc); 18999 return (EIO); 19000 } else { 19001 if (un->un_f_pkstats_enabled) { 19002 sd_set_pstats(un); 19003 SD_TRACE(SD_LOG_IO_PARTITION, un, 19004 "sd_handle_mchange: un:0x%p pstats created and " 19005 "set\n", un); 19006 } 19007 } 19008 19009 /* 19010 * Try to lock the door 19011 */ 19012 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19013 SD_PATH_DIRECT_PRIORITY); 19014 failed: 19015 if (rval != 0) 19016 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19017 sd_ssc_fini(ssc); 19018 return (rval); 19019 } 19020 19021 19022 /* 19023 * Function: sd_send_scsi_DOORLOCK 19024 * 19025 * Description: Issue the scsi DOOR LOCK command 19026 * 19027 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19028 * structure for this target. 19029 * flag - SD_REMOVAL_ALLOW 19030 * SD_REMOVAL_PREVENT 19031 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19032 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19033 * to use the USCSI "direct" chain and bypass the normal 19034 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19035 * command is issued as part of an error recovery action. 19036 * 19037 * Return Code: 0 - Success 19038 * errno return code from sd_ssc_send() 19039 * 19040 * Context: Can sleep. 19041 */ 19042 19043 static int 19044 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19045 { 19046 struct scsi_extended_sense sense_buf; 19047 union scsi_cdb cdb; 19048 struct uscsi_cmd ucmd_buf; 19049 int status; 19050 struct sd_lun *un; 19051 19052 ASSERT(ssc != NULL); 19053 un = ssc->ssc_un; 19054 ASSERT(un != NULL); 19055 ASSERT(!mutex_owned(SD_MUTEX(un))); 19056 19057 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19058 19059 /* already determined doorlock is not supported, fake success */ 19060 if (un->un_f_doorlock_supported == FALSE) { 19061 return (0); 19062 } 19063 19064 /* 19065 * If we are ejecting and see an SD_REMOVAL_PREVENT 19066 * ignore the command so we can complete the eject 19067 * operation. 19068 */ 19069 if (flag == SD_REMOVAL_PREVENT) { 19070 mutex_enter(SD_MUTEX(un)); 19071 if (un->un_f_ejecting == TRUE) { 19072 mutex_exit(SD_MUTEX(un)); 19073 return (EAGAIN); 19074 } 19075 mutex_exit(SD_MUTEX(un)); 19076 } 19077 19078 bzero(&cdb, sizeof (cdb)); 19079 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19080 19081 cdb.scc_cmd = SCMD_DOORLOCK; 19082 cdb.cdb_opaque[4] = (uchar_t)flag; 19083 19084 ucmd_buf.uscsi_cdb = (char *)&cdb; 19085 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19086 ucmd_buf.uscsi_bufaddr = NULL; 19087 ucmd_buf.uscsi_buflen = 0; 19088 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19089 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19090 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19091 ucmd_buf.uscsi_timeout = 15; 19092 19093 SD_TRACE(SD_LOG_IO, un, 19094 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19095 19096 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19097 UIO_SYSSPACE, path_flag); 19098 19099 if (status == 0) 19100 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19101 19102 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19103 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19104 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19105 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19106 19107 /* fake success and skip subsequent doorlock commands */ 19108 un->un_f_doorlock_supported = FALSE; 19109 return (0); 19110 } 19111 19112 return (status); 19113 } 19114 19115 /* 19116 * Function: sd_send_scsi_READ_CAPACITY 19117 * 19118 * Description: This routine uses the scsi READ CAPACITY command to determine 19119 * the device capacity in number of blocks and the device native 19120 * block size. If this function returns a failure, then the 19121 * values in *capp and *lbap are undefined. If the capacity 19122 * returned is 0xffffffff then the lun is too large for a 19123 * normal READ CAPACITY command and the results of a 19124 * READ CAPACITY 16 will be used instead. 19125 * 19126 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19127 * capp - ptr to unsigned 64-bit variable to receive the 19128 * capacity value from the command. 19129 * lbap - ptr to unsigned 32-bit varaible to receive the 19130 * block size value from the command 19131 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19132 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19133 * to use the USCSI "direct" chain and bypass the normal 19134 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19135 * command is issued as part of an error recovery action. 19136 * 19137 * Return Code: 0 - Success 19138 * EIO - IO error 19139 * EACCES - Reservation conflict detected 19140 * EAGAIN - Device is becoming ready 19141 * errno return code from sd_ssc_send() 19142 * 19143 * Context: Can sleep. Blocks until command completes. 19144 */ 19145 19146 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19147 19148 static int 19149 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19150 int path_flag) 19151 { 19152 struct scsi_extended_sense sense_buf; 19153 struct uscsi_cmd ucmd_buf; 19154 union scsi_cdb cdb; 19155 uint32_t *capacity_buf; 19156 uint64_t capacity; 19157 uint32_t lbasize; 19158 int status; 19159 struct sd_lun *un; 19160 19161 ASSERT(ssc != NULL); 19162 19163 un = ssc->ssc_un; 19164 ASSERT(un != NULL); 19165 ASSERT(!mutex_owned(SD_MUTEX(un))); 19166 ASSERT(capp != NULL); 19167 ASSERT(lbap != NULL); 19168 19169 SD_TRACE(SD_LOG_IO, un, 19170 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19171 19172 /* 19173 * First send a READ_CAPACITY command to the target. 19174 * (This command is mandatory under SCSI-2.) 19175 * 19176 * Set up the CDB for the READ_CAPACITY command. The Partial 19177 * Medium Indicator bit is cleared. The address field must be 19178 * zero if the PMI bit is zero. 19179 */ 19180 bzero(&cdb, sizeof (cdb)); 19181 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19182 19183 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19184 19185 cdb.scc_cmd = SCMD_READ_CAPACITY; 19186 19187 ucmd_buf.uscsi_cdb = (char *)&cdb; 19188 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19189 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19190 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19191 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19192 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19193 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19194 ucmd_buf.uscsi_timeout = 60; 19195 19196 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19197 UIO_SYSSPACE, path_flag); 19198 19199 switch (status) { 19200 case 0: 19201 /* Return failure if we did not get valid capacity data. */ 19202 if (ucmd_buf.uscsi_resid != 0) { 19203 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19204 "sd_send_scsi_READ_CAPACITY received " 19205 "invalid capacity data"); 19206 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19207 return (EIO); 19208 } 19209 19210 /* 19211 * Read capacity and block size from the READ CAPACITY 10 data. 19212 * This data may be adjusted later due to device specific 19213 * issues. 19214 * 19215 * According to the SCSI spec, the READ CAPACITY 10 19216 * command returns the following: 19217 * 19218 * bytes 0-3: Maximum logical block address available. 19219 * (MSB in byte:0 & LSB in byte:3) 19220 * 19221 * bytes 4-7: Block length in bytes 19222 * (MSB in byte:4 & LSB in byte:7) 19223 * 19224 */ 19225 capacity = BE_32(capacity_buf[0]); 19226 lbasize = BE_32(capacity_buf[1]); 19227 19228 /* 19229 * Done with capacity_buf 19230 */ 19231 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19232 19233 /* 19234 * if the reported capacity is set to all 0xf's, then 19235 * this disk is too large and requires SBC-2 commands. 19236 * Reissue the request using READ CAPACITY 16. 19237 */ 19238 if (capacity == 0xffffffff) { 19239 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19240 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19241 &lbasize, path_flag); 19242 if (status != 0) { 19243 return (status); 19244 } 19245 } 19246 break; /* Success! */ 19247 case EIO: 19248 switch (ucmd_buf.uscsi_status) { 19249 case STATUS_RESERVATION_CONFLICT: 19250 status = EACCES; 19251 break; 19252 case STATUS_CHECK: 19253 /* 19254 * Check condition; look for ASC/ASCQ of 0x04/0x01 19255 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19256 */ 19257 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19258 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19259 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19260 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19261 return (EAGAIN); 19262 } 19263 break; 19264 default: 19265 break; 19266 } 19267 /* FALLTHRU */ 19268 default: 19269 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19270 return (status); 19271 } 19272 19273 /* 19274 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19275 * (2352 and 0 are common) so for these devices always force the value 19276 * to 2048 as required by the ATAPI specs. 19277 */ 19278 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19279 lbasize = 2048; 19280 } 19281 19282 /* 19283 * Get the maximum LBA value from the READ CAPACITY data. 19284 * Here we assume that the Partial Medium Indicator (PMI) bit 19285 * was cleared when issuing the command. This means that the LBA 19286 * returned from the device is the LBA of the last logical block 19287 * on the logical unit. The actual logical block count will be 19288 * this value plus one. 19289 * 19290 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19291 * so scale the capacity value to reflect this. 19292 */ 19293 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19294 19295 /* 19296 * Copy the values from the READ CAPACITY command into the space 19297 * provided by the caller. 19298 */ 19299 *capp = capacity; 19300 *lbap = lbasize; 19301 19302 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19303 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19304 19305 /* 19306 * Both the lbasize and capacity from the device must be nonzero, 19307 * otherwise we assume that the values are not valid and return 19308 * failure to the caller. (4203735) 19309 */ 19310 if ((capacity == 0) || (lbasize == 0)) { 19311 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19312 "sd_send_scsi_READ_CAPACITY received invalid value " 19313 "capacity %llu lbasize %d", capacity, lbasize); 19314 return (EIO); 19315 } 19316 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19317 return (0); 19318 } 19319 19320 /* 19321 * Function: sd_send_scsi_READ_CAPACITY_16 19322 * 19323 * Description: This routine uses the scsi READ CAPACITY 16 command to 19324 * determine the device capacity in number of blocks and the 19325 * device native block size. If this function returns a failure, 19326 * then the values in *capp and *lbap are undefined. 19327 * This routine should always be called by 19328 * sd_send_scsi_READ_CAPACITY which will appy any device 19329 * specific adjustments to capacity and lbasize. 19330 * 19331 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19332 * capp - ptr to unsigned 64-bit variable to receive the 19333 * capacity value from the command. 19334 * lbap - ptr to unsigned 32-bit varaible to receive the 19335 * block size value from the command 19336 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19337 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19338 * to use the USCSI "direct" chain and bypass the normal 19339 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19340 * this command is issued as part of an error recovery 19341 * action. 19342 * 19343 * Return Code: 0 - Success 19344 * EIO - IO error 19345 * EACCES - Reservation conflict detected 19346 * EAGAIN - Device is becoming ready 19347 * errno return code from sd_ssc_send() 19348 * 19349 * Context: Can sleep. Blocks until command completes. 19350 */ 19351 19352 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19353 19354 static int 19355 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19356 uint32_t *lbap, int path_flag) 19357 { 19358 struct scsi_extended_sense sense_buf; 19359 struct uscsi_cmd ucmd_buf; 19360 union scsi_cdb cdb; 19361 uint64_t *capacity16_buf; 19362 uint64_t capacity; 19363 uint32_t lbasize; 19364 int status; 19365 struct sd_lun *un; 19366 19367 ASSERT(ssc != NULL); 19368 19369 un = ssc->ssc_un; 19370 ASSERT(un != NULL); 19371 ASSERT(!mutex_owned(SD_MUTEX(un))); 19372 ASSERT(capp != NULL); 19373 ASSERT(lbap != NULL); 19374 19375 SD_TRACE(SD_LOG_IO, un, 19376 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19377 19378 /* 19379 * First send a READ_CAPACITY_16 command to the target. 19380 * 19381 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19382 * Medium Indicator bit is cleared. The address field must be 19383 * zero if the PMI bit is zero. 19384 */ 19385 bzero(&cdb, sizeof (cdb)); 19386 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19387 19388 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19389 19390 ucmd_buf.uscsi_cdb = (char *)&cdb; 19391 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19392 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19393 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19394 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19395 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19396 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19397 ucmd_buf.uscsi_timeout = 60; 19398 19399 /* 19400 * Read Capacity (16) is a Service Action In command. One 19401 * command byte (0x9E) is overloaded for multiple operations, 19402 * with the second CDB byte specifying the desired operation 19403 */ 19404 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19405 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19406 19407 /* 19408 * Fill in allocation length field 19409 */ 19410 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19411 19412 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19413 UIO_SYSSPACE, path_flag); 19414 19415 switch (status) { 19416 case 0: 19417 /* Return failure if we did not get valid capacity data. */ 19418 if (ucmd_buf.uscsi_resid > 20) { 19419 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19420 "sd_send_scsi_READ_CAPACITY_16 received " 19421 "invalid capacity data"); 19422 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19423 return (EIO); 19424 } 19425 19426 /* 19427 * Read capacity and block size from the READ CAPACITY 10 data. 19428 * This data may be adjusted later due to device specific 19429 * issues. 19430 * 19431 * According to the SCSI spec, the READ CAPACITY 10 19432 * command returns the following: 19433 * 19434 * bytes 0-7: Maximum logical block address available. 19435 * (MSB in byte:0 & LSB in byte:7) 19436 * 19437 * bytes 8-11: Block length in bytes 19438 * (MSB in byte:8 & LSB in byte:11) 19439 * 19440 */ 19441 capacity = BE_64(capacity16_buf[0]); 19442 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19443 19444 /* 19445 * Done with capacity16_buf 19446 */ 19447 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19448 19449 /* 19450 * if the reported capacity is set to all 0xf's, then 19451 * this disk is too large. This could only happen with 19452 * a device that supports LBAs larger than 64 bits which 19453 * are not defined by any current T10 standards. 19454 */ 19455 if (capacity == 0xffffffffffffffff) { 19456 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19457 "disk is too large"); 19458 return (EIO); 19459 } 19460 break; /* Success! */ 19461 case EIO: 19462 switch (ucmd_buf.uscsi_status) { 19463 case STATUS_RESERVATION_CONFLICT: 19464 status = EACCES; 19465 break; 19466 case STATUS_CHECK: 19467 /* 19468 * Check condition; look for ASC/ASCQ of 0x04/0x01 19469 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19470 */ 19471 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19472 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19473 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19474 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19475 return (EAGAIN); 19476 } 19477 break; 19478 default: 19479 break; 19480 } 19481 /* FALLTHRU */ 19482 default: 19483 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19484 return (status); 19485 } 19486 19487 *capp = capacity; 19488 *lbap = lbasize; 19489 19490 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19491 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19492 19493 return (0); 19494 } 19495 19496 19497 /* 19498 * Function: sd_send_scsi_START_STOP_UNIT 19499 * 19500 * Description: Issue a scsi START STOP UNIT command to the target. 19501 * 19502 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19503 * structure for this target. 19504 * flag - SD_TARGET_START 19505 * SD_TARGET_STOP 19506 * SD_TARGET_EJECT 19507 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19508 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19509 * to use the USCSI "direct" chain and bypass the normal 19510 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19511 * command is issued as part of an error recovery action. 19512 * 19513 * Return Code: 0 - Success 19514 * EIO - IO error 19515 * EACCES - Reservation conflict detected 19516 * ENXIO - Not Ready, medium not present 19517 * errno return code from sd_ssc_send() 19518 * 19519 * Context: Can sleep. 19520 */ 19521 19522 static int 19523 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19524 { 19525 struct scsi_extended_sense sense_buf; 19526 union scsi_cdb cdb; 19527 struct uscsi_cmd ucmd_buf; 19528 int status; 19529 struct sd_lun *un; 19530 19531 ASSERT(ssc != NULL); 19532 un = ssc->ssc_un; 19533 ASSERT(un != NULL); 19534 ASSERT(!mutex_owned(SD_MUTEX(un))); 19535 19536 SD_TRACE(SD_LOG_IO, un, 19537 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19538 19539 if (un->un_f_check_start_stop && 19540 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19541 (un->un_f_start_stop_supported != TRUE)) { 19542 return (0); 19543 } 19544 19545 /* 19546 * If we are performing an eject operation and 19547 * we receive any command other than SD_TARGET_EJECT 19548 * we should immediately return. 19549 */ 19550 if (flag != SD_TARGET_EJECT) { 19551 mutex_enter(SD_MUTEX(un)); 19552 if (un->un_f_ejecting == TRUE) { 19553 mutex_exit(SD_MUTEX(un)); 19554 return (EAGAIN); 19555 } 19556 mutex_exit(SD_MUTEX(un)); 19557 } 19558 19559 bzero(&cdb, sizeof (cdb)); 19560 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19561 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19562 19563 cdb.scc_cmd = SCMD_START_STOP; 19564 cdb.cdb_opaque[4] = (uchar_t)flag; 19565 19566 ucmd_buf.uscsi_cdb = (char *)&cdb; 19567 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19568 ucmd_buf.uscsi_bufaddr = NULL; 19569 ucmd_buf.uscsi_buflen = 0; 19570 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19571 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19572 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19573 ucmd_buf.uscsi_timeout = 200; 19574 19575 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19576 UIO_SYSSPACE, path_flag); 19577 19578 switch (status) { 19579 case 0: 19580 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19581 break; /* Success! */ 19582 case EIO: 19583 switch (ucmd_buf.uscsi_status) { 19584 case STATUS_RESERVATION_CONFLICT: 19585 status = EACCES; 19586 break; 19587 case STATUS_CHECK: 19588 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19589 switch (scsi_sense_key( 19590 (uint8_t *)&sense_buf)) { 19591 case KEY_ILLEGAL_REQUEST: 19592 status = ENOTSUP; 19593 break; 19594 case KEY_NOT_READY: 19595 if (scsi_sense_asc( 19596 (uint8_t *)&sense_buf) 19597 == 0x3A) { 19598 status = ENXIO; 19599 } 19600 break; 19601 default: 19602 break; 19603 } 19604 } 19605 break; 19606 default: 19607 break; 19608 } 19609 break; 19610 default: 19611 break; 19612 } 19613 19614 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19615 19616 return (status); 19617 } 19618 19619 19620 /* 19621 * Function: sd_start_stop_unit_callback 19622 * 19623 * Description: timeout(9F) callback to begin recovery process for a 19624 * device that has spun down. 19625 * 19626 * Arguments: arg - pointer to associated softstate struct. 19627 * 19628 * Context: Executes in a timeout(9F) thread context 19629 */ 19630 19631 static void 19632 sd_start_stop_unit_callback(void *arg) 19633 { 19634 struct sd_lun *un = arg; 19635 ASSERT(un != NULL); 19636 ASSERT(!mutex_owned(SD_MUTEX(un))); 19637 19638 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19639 19640 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19641 } 19642 19643 19644 /* 19645 * Function: sd_start_stop_unit_task 19646 * 19647 * Description: Recovery procedure when a drive is spun down. 19648 * 19649 * Arguments: arg - pointer to associated softstate struct. 19650 * 19651 * Context: Executes in a taskq() thread context 19652 */ 19653 19654 static void 19655 sd_start_stop_unit_task(void *arg) 19656 { 19657 struct sd_lun *un = arg; 19658 sd_ssc_t *ssc; 19659 int rval; 19660 19661 ASSERT(un != NULL); 19662 ASSERT(!mutex_owned(SD_MUTEX(un))); 19663 19664 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19665 19666 /* 19667 * Some unformatted drives report not ready error, no need to 19668 * restart if format has been initiated. 19669 */ 19670 mutex_enter(SD_MUTEX(un)); 19671 if (un->un_f_format_in_progress == TRUE) { 19672 mutex_exit(SD_MUTEX(un)); 19673 return; 19674 } 19675 mutex_exit(SD_MUTEX(un)); 19676 19677 /* 19678 * When a START STOP command is issued from here, it is part of a 19679 * failure recovery operation and must be issued before any other 19680 * commands, including any pending retries. Thus it must be sent 19681 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19682 * succeeds or not, we will start I/O after the attempt. 19683 */ 19684 ssc = sd_ssc_init(un); 19685 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19686 SD_PATH_DIRECT_PRIORITY); 19687 if (rval != 0) 19688 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19689 sd_ssc_fini(ssc); 19690 /* 19691 * The above call blocks until the START_STOP_UNIT command completes. 19692 * Now that it has completed, we must re-try the original IO that 19693 * received the NOT READY condition in the first place. There are 19694 * three possible conditions here: 19695 * 19696 * (1) The original IO is on un_retry_bp. 19697 * (2) The original IO is on the regular wait queue, and un_retry_bp 19698 * is NULL. 19699 * (3) The original IO is on the regular wait queue, and un_retry_bp 19700 * points to some other, unrelated bp. 19701 * 19702 * For each case, we must call sd_start_cmds() with un_retry_bp 19703 * as the argument. If un_retry_bp is NULL, this will initiate 19704 * processing of the regular wait queue. If un_retry_bp is not NULL, 19705 * then this will process the bp on un_retry_bp. That may or may not 19706 * be the original IO, but that does not matter: the important thing 19707 * is to keep the IO processing going at this point. 19708 * 19709 * Note: This is a very specific error recovery sequence associated 19710 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19711 * serialize the I/O with completion of the spin-up. 19712 */ 19713 mutex_enter(SD_MUTEX(un)); 19714 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19715 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19716 un, un->un_retry_bp); 19717 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19718 sd_start_cmds(un, un->un_retry_bp); 19719 mutex_exit(SD_MUTEX(un)); 19720 19721 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19722 } 19723 19724 19725 /* 19726 * Function: sd_send_scsi_INQUIRY 19727 * 19728 * Description: Issue the scsi INQUIRY command. 19729 * 19730 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19731 * structure for this target. 19732 * bufaddr 19733 * buflen 19734 * evpd 19735 * page_code 19736 * page_length 19737 * 19738 * Return Code: 0 - Success 19739 * errno return code from sd_ssc_send() 19740 * 19741 * Context: Can sleep. Does not return until command is completed. 19742 */ 19743 19744 static int 19745 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19746 uchar_t evpd, uchar_t page_code, size_t *residp) 19747 { 19748 union scsi_cdb cdb; 19749 struct uscsi_cmd ucmd_buf; 19750 int status; 19751 struct sd_lun *un; 19752 19753 ASSERT(ssc != NULL); 19754 un = ssc->ssc_un; 19755 ASSERT(un != NULL); 19756 ASSERT(!mutex_owned(SD_MUTEX(un))); 19757 ASSERT(bufaddr != NULL); 19758 19759 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19760 19761 bzero(&cdb, sizeof (cdb)); 19762 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19763 bzero(bufaddr, buflen); 19764 19765 cdb.scc_cmd = SCMD_INQUIRY; 19766 cdb.cdb_opaque[1] = evpd; 19767 cdb.cdb_opaque[2] = page_code; 19768 FORMG0COUNT(&cdb, buflen); 19769 19770 ucmd_buf.uscsi_cdb = (char *)&cdb; 19771 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19772 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19773 ucmd_buf.uscsi_buflen = buflen; 19774 ucmd_buf.uscsi_rqbuf = NULL; 19775 ucmd_buf.uscsi_rqlen = 0; 19776 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19777 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19778 19779 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19780 UIO_SYSSPACE, SD_PATH_DIRECT); 19781 19782 /* 19783 * Only handle status == 0, the upper-level caller 19784 * will put different assessment based on the context. 19785 */ 19786 if (status == 0) 19787 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19788 19789 if ((status == 0) && (residp != NULL)) { 19790 *residp = ucmd_buf.uscsi_resid; 19791 } 19792 19793 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19794 19795 return (status); 19796 } 19797 19798 19799 /* 19800 * Function: sd_send_scsi_TEST_UNIT_READY 19801 * 19802 * Description: Issue the scsi TEST UNIT READY command. 19803 * This routine can be told to set the flag USCSI_DIAGNOSE to 19804 * prevent retrying failed commands. Use this when the intent 19805 * is either to check for device readiness, to clear a Unit 19806 * Attention, or to clear any outstanding sense data. 19807 * However under specific conditions the expected behavior 19808 * is for retries to bring a device ready, so use the flag 19809 * with caution. 19810 * 19811 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19812 * structure for this target. 19813 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19814 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19815 * 0: dont check for media present, do retries on cmd. 19816 * 19817 * Return Code: 0 - Success 19818 * EIO - IO error 19819 * EACCES - Reservation conflict detected 19820 * ENXIO - Not Ready, medium not present 19821 * errno return code from sd_ssc_send() 19822 * 19823 * Context: Can sleep. Does not return until command is completed. 19824 */ 19825 19826 static int 19827 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 19828 { 19829 struct scsi_extended_sense sense_buf; 19830 union scsi_cdb cdb; 19831 struct uscsi_cmd ucmd_buf; 19832 int status; 19833 struct sd_lun *un; 19834 19835 ASSERT(ssc != NULL); 19836 un = ssc->ssc_un; 19837 ASSERT(un != NULL); 19838 ASSERT(!mutex_owned(SD_MUTEX(un))); 19839 19840 SD_TRACE(SD_LOG_IO, un, 19841 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19842 19843 /* 19844 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19845 * timeouts when they receive a TUR and the queue is not empty. Check 19846 * the configuration flag set during attach (indicating the drive has 19847 * this firmware bug) and un_ncmds_in_transport before issuing the 19848 * TUR. If there are 19849 * pending commands return success, this is a bit arbitrary but is ok 19850 * for non-removables (i.e. the eliteI disks) and non-clustering 19851 * configurations. 19852 */ 19853 if (un->un_f_cfg_tur_check == TRUE) { 19854 mutex_enter(SD_MUTEX(un)); 19855 if (un->un_ncmds_in_transport != 0) { 19856 mutex_exit(SD_MUTEX(un)); 19857 return (0); 19858 } 19859 mutex_exit(SD_MUTEX(un)); 19860 } 19861 19862 bzero(&cdb, sizeof (cdb)); 19863 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19864 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19865 19866 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19867 19868 ucmd_buf.uscsi_cdb = (char *)&cdb; 19869 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19870 ucmd_buf.uscsi_bufaddr = NULL; 19871 ucmd_buf.uscsi_buflen = 0; 19872 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19873 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19874 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19875 19876 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19877 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19878 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19879 } 19880 ucmd_buf.uscsi_timeout = 60; 19881 19882 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19883 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 19884 SD_PATH_STANDARD)); 19885 19886 switch (status) { 19887 case 0: 19888 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19889 break; /* Success! */ 19890 case EIO: 19891 switch (ucmd_buf.uscsi_status) { 19892 case STATUS_RESERVATION_CONFLICT: 19893 status = EACCES; 19894 break; 19895 case STATUS_CHECK: 19896 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19897 break; 19898 } 19899 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19900 (scsi_sense_key((uint8_t *)&sense_buf) == 19901 KEY_NOT_READY) && 19902 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 19903 status = ENXIO; 19904 } 19905 break; 19906 default: 19907 break; 19908 } 19909 break; 19910 default: 19911 break; 19912 } 19913 19914 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19915 19916 return (status); 19917 } 19918 19919 /* 19920 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19921 * 19922 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19923 * 19924 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19925 * structure for this target. 19926 * 19927 * Return Code: 0 - Success 19928 * EACCES 19929 * ENOTSUP 19930 * errno return code from sd_ssc_send() 19931 * 19932 * Context: Can sleep. Does not return until command is completed. 19933 */ 19934 19935 static int 19936 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 19937 uint16_t data_len, uchar_t *data_bufp) 19938 { 19939 struct scsi_extended_sense sense_buf; 19940 union scsi_cdb cdb; 19941 struct uscsi_cmd ucmd_buf; 19942 int status; 19943 int no_caller_buf = FALSE; 19944 struct sd_lun *un; 19945 19946 ASSERT(ssc != NULL); 19947 un = ssc->ssc_un; 19948 ASSERT(un != NULL); 19949 ASSERT(!mutex_owned(SD_MUTEX(un))); 19950 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19951 19952 SD_TRACE(SD_LOG_IO, un, 19953 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19954 19955 bzero(&cdb, sizeof (cdb)); 19956 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19957 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19958 if (data_bufp == NULL) { 19959 /* Allocate a default buf if the caller did not give one */ 19960 ASSERT(data_len == 0); 19961 data_len = MHIOC_RESV_KEY_SIZE; 19962 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19963 no_caller_buf = TRUE; 19964 } 19965 19966 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19967 cdb.cdb_opaque[1] = usr_cmd; 19968 FORMG1COUNT(&cdb, data_len); 19969 19970 ucmd_buf.uscsi_cdb = (char *)&cdb; 19971 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19972 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19973 ucmd_buf.uscsi_buflen = data_len; 19974 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19975 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19976 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19977 ucmd_buf.uscsi_timeout = 60; 19978 19979 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19980 UIO_SYSSPACE, SD_PATH_STANDARD); 19981 19982 switch (status) { 19983 case 0: 19984 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19985 19986 break; /* Success! */ 19987 case EIO: 19988 switch (ucmd_buf.uscsi_status) { 19989 case STATUS_RESERVATION_CONFLICT: 19990 status = EACCES; 19991 break; 19992 case STATUS_CHECK: 19993 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19994 (scsi_sense_key((uint8_t *)&sense_buf) == 19995 KEY_ILLEGAL_REQUEST)) { 19996 status = ENOTSUP; 19997 } 19998 break; 19999 default: 20000 break; 20001 } 20002 break; 20003 default: 20004 break; 20005 } 20006 20007 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20008 20009 if (no_caller_buf == TRUE) { 20010 kmem_free(data_bufp, data_len); 20011 } 20012 20013 return (status); 20014 } 20015 20016 20017 /* 20018 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20019 * 20020 * Description: This routine is the driver entry point for handling CD-ROM 20021 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20022 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20023 * device. 20024 * 20025 * Arguments: ssc - ssc contains un - pointer to soft state struct 20026 * for the target. 20027 * usr_cmd SCSI-3 reservation facility command (one of 20028 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20029 * SD_SCSI3_PREEMPTANDABORT) 20030 * usr_bufp - user provided pointer register, reserve descriptor or 20031 * preempt and abort structure (mhioc_register_t, 20032 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20033 * 20034 * Return Code: 0 - Success 20035 * EACCES 20036 * ENOTSUP 20037 * errno return code from sd_ssc_send() 20038 * 20039 * Context: Can sleep. Does not return until command is completed. 20040 */ 20041 20042 static int 20043 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20044 uchar_t *usr_bufp) 20045 { 20046 struct scsi_extended_sense sense_buf; 20047 union scsi_cdb cdb; 20048 struct uscsi_cmd ucmd_buf; 20049 int status; 20050 uchar_t data_len = sizeof (sd_prout_t); 20051 sd_prout_t *prp; 20052 struct sd_lun *un; 20053 20054 ASSERT(ssc != NULL); 20055 un = ssc->ssc_un; 20056 ASSERT(un != NULL); 20057 ASSERT(!mutex_owned(SD_MUTEX(un))); 20058 ASSERT(data_len == 24); /* required by scsi spec */ 20059 20060 SD_TRACE(SD_LOG_IO, un, 20061 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20062 20063 if (usr_bufp == NULL) { 20064 return (EINVAL); 20065 } 20066 20067 bzero(&cdb, sizeof (cdb)); 20068 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20069 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20070 prp = kmem_zalloc(data_len, KM_SLEEP); 20071 20072 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20073 cdb.cdb_opaque[1] = usr_cmd; 20074 FORMG1COUNT(&cdb, data_len); 20075 20076 ucmd_buf.uscsi_cdb = (char *)&cdb; 20077 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20078 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20079 ucmd_buf.uscsi_buflen = data_len; 20080 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20081 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20082 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20083 ucmd_buf.uscsi_timeout = 60; 20084 20085 switch (usr_cmd) { 20086 case SD_SCSI3_REGISTER: { 20087 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20088 20089 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20090 bcopy(ptr->newkey.key, prp->service_key, 20091 MHIOC_RESV_KEY_SIZE); 20092 prp->aptpl = ptr->aptpl; 20093 break; 20094 } 20095 case SD_SCSI3_RESERVE: 20096 case SD_SCSI3_RELEASE: { 20097 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20098 20099 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20100 prp->scope_address = BE_32(ptr->scope_specific_addr); 20101 cdb.cdb_opaque[2] = ptr->type; 20102 break; 20103 } 20104 case SD_SCSI3_PREEMPTANDABORT: { 20105 mhioc_preemptandabort_t *ptr = 20106 (mhioc_preemptandabort_t *)usr_bufp; 20107 20108 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20109 bcopy(ptr->victim_key.key, prp->service_key, 20110 MHIOC_RESV_KEY_SIZE); 20111 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20112 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20113 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20114 break; 20115 } 20116 case SD_SCSI3_REGISTERANDIGNOREKEY: 20117 { 20118 mhioc_registerandignorekey_t *ptr; 20119 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20120 bcopy(ptr->newkey.key, 20121 prp->service_key, MHIOC_RESV_KEY_SIZE); 20122 prp->aptpl = ptr->aptpl; 20123 break; 20124 } 20125 default: 20126 ASSERT(FALSE); 20127 break; 20128 } 20129 20130 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20131 UIO_SYSSPACE, SD_PATH_STANDARD); 20132 20133 switch (status) { 20134 case 0: 20135 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20136 break; /* Success! */ 20137 case EIO: 20138 switch (ucmd_buf.uscsi_status) { 20139 case STATUS_RESERVATION_CONFLICT: 20140 status = EACCES; 20141 break; 20142 case STATUS_CHECK: 20143 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20144 (scsi_sense_key((uint8_t *)&sense_buf) == 20145 KEY_ILLEGAL_REQUEST)) { 20146 status = ENOTSUP; 20147 } 20148 break; 20149 default: 20150 break; 20151 } 20152 break; 20153 default: 20154 break; 20155 } 20156 20157 kmem_free(prp, data_len); 20158 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20159 return (status); 20160 } 20161 20162 20163 /* 20164 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20165 * 20166 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20167 * 20168 * Arguments: un - pointer to the target's soft state struct 20169 * dkc - pointer to the callback structure 20170 * 20171 * Return Code: 0 - success 20172 * errno-type error code 20173 * 20174 * Context: kernel thread context only. 20175 * 20176 * _______________________________________________________________ 20177 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20178 * |FLUSH_VOLATILE| | operation | 20179 * |______________|______________|_________________________________| 20180 * | 0 | NULL | Synchronous flush on both | 20181 * | | | volatile and non-volatile cache | 20182 * |______________|______________|_________________________________| 20183 * | 1 | NULL | Synchronous flush on volatile | 20184 * | | | cache; disk drivers may suppress| 20185 * | | | flush if disk table indicates | 20186 * | | | non-volatile cache | 20187 * |______________|______________|_________________________________| 20188 * | 0 | !NULL | Asynchronous flush on both | 20189 * | | | volatile and non-volatile cache;| 20190 * |______________|______________|_________________________________| 20191 * | 1 | !NULL | Asynchronous flush on volatile | 20192 * | | | cache; disk drivers may suppress| 20193 * | | | flush if disk table indicates | 20194 * | | | non-volatile cache | 20195 * |______________|______________|_________________________________| 20196 * 20197 */ 20198 20199 static int 20200 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20201 { 20202 struct sd_uscsi_info *uip; 20203 struct uscsi_cmd *uscmd; 20204 union scsi_cdb *cdb; 20205 struct buf *bp; 20206 int rval = 0; 20207 int is_async; 20208 20209 SD_TRACE(SD_LOG_IO, un, 20210 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20211 20212 ASSERT(un != NULL); 20213 ASSERT(!mutex_owned(SD_MUTEX(un))); 20214 20215 if (dkc == NULL || dkc->dkc_callback == NULL) { 20216 is_async = FALSE; 20217 } else { 20218 is_async = TRUE; 20219 } 20220 20221 mutex_enter(SD_MUTEX(un)); 20222 /* check whether cache flush should be suppressed */ 20223 if (un->un_f_suppress_cache_flush == TRUE) { 20224 mutex_exit(SD_MUTEX(un)); 20225 /* 20226 * suppress the cache flush if the device is told to do 20227 * so by sd.conf or disk table 20228 */ 20229 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20230 skip the cache flush since suppress_cache_flush is %d!\n", 20231 un->un_f_suppress_cache_flush); 20232 20233 if (is_async == TRUE) { 20234 /* invoke callback for asynchronous flush */ 20235 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20236 } 20237 return (rval); 20238 } 20239 mutex_exit(SD_MUTEX(un)); 20240 20241 /* 20242 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20243 * set properly 20244 */ 20245 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20246 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20247 20248 mutex_enter(SD_MUTEX(un)); 20249 if (dkc != NULL && un->un_f_sync_nv_supported && 20250 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20251 /* 20252 * if the device supports SYNC_NV bit, turn on 20253 * the SYNC_NV bit to only flush volatile cache 20254 */ 20255 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20256 } 20257 mutex_exit(SD_MUTEX(un)); 20258 20259 /* 20260 * First get some memory for the uscsi_cmd struct and cdb 20261 * and initialize for SYNCHRONIZE_CACHE cmd. 20262 */ 20263 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20264 uscmd->uscsi_cdblen = CDB_GROUP1; 20265 uscmd->uscsi_cdb = (caddr_t)cdb; 20266 uscmd->uscsi_bufaddr = NULL; 20267 uscmd->uscsi_buflen = 0; 20268 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20269 uscmd->uscsi_rqlen = SENSE_LENGTH; 20270 uscmd->uscsi_rqresid = SENSE_LENGTH; 20271 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20272 uscmd->uscsi_timeout = sd_io_time; 20273 20274 /* 20275 * Allocate an sd_uscsi_info struct and fill it with the info 20276 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20277 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20278 * since we allocate the buf here in this function, we do not 20279 * need to preserve the prior contents of b_private. 20280 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20281 */ 20282 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20283 uip->ui_flags = SD_PATH_DIRECT; 20284 uip->ui_cmdp = uscmd; 20285 20286 bp = getrbuf(KM_SLEEP); 20287 bp->b_private = uip; 20288 20289 /* 20290 * Setup buffer to carry uscsi request. 20291 */ 20292 bp->b_flags = B_BUSY; 20293 bp->b_bcount = 0; 20294 bp->b_blkno = 0; 20295 20296 if (is_async == TRUE) { 20297 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20298 uip->ui_dkc = *dkc; 20299 } 20300 20301 bp->b_edev = SD_GET_DEV(un); 20302 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20303 20304 /* 20305 * Unset un_f_sync_cache_required flag 20306 */ 20307 mutex_enter(SD_MUTEX(un)); 20308 un->un_f_sync_cache_required = FALSE; 20309 mutex_exit(SD_MUTEX(un)); 20310 20311 (void) sd_uscsi_strategy(bp); 20312 20313 /* 20314 * If synchronous request, wait for completion 20315 * If async just return and let b_iodone callback 20316 * cleanup. 20317 * NOTE: On return, u_ncmds_in_driver will be decremented, 20318 * but it was also incremented in sd_uscsi_strategy(), so 20319 * we should be ok. 20320 */ 20321 if (is_async == FALSE) { 20322 (void) biowait(bp); 20323 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20324 } 20325 20326 return (rval); 20327 } 20328 20329 20330 static int 20331 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20332 { 20333 struct sd_uscsi_info *uip; 20334 struct uscsi_cmd *uscmd; 20335 uint8_t *sense_buf; 20336 struct sd_lun *un; 20337 int status; 20338 union scsi_cdb *cdb; 20339 20340 uip = (struct sd_uscsi_info *)(bp->b_private); 20341 ASSERT(uip != NULL); 20342 20343 uscmd = uip->ui_cmdp; 20344 ASSERT(uscmd != NULL); 20345 20346 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20347 ASSERT(sense_buf != NULL); 20348 20349 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20350 ASSERT(un != NULL); 20351 20352 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20353 20354 status = geterror(bp); 20355 switch (status) { 20356 case 0: 20357 break; /* Success! */ 20358 case EIO: 20359 switch (uscmd->uscsi_status) { 20360 case STATUS_RESERVATION_CONFLICT: 20361 /* Ignore reservation conflict */ 20362 status = 0; 20363 goto done; 20364 20365 case STATUS_CHECK: 20366 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20367 (scsi_sense_key(sense_buf) == 20368 KEY_ILLEGAL_REQUEST)) { 20369 /* Ignore Illegal Request error */ 20370 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20371 mutex_enter(SD_MUTEX(un)); 20372 un->un_f_sync_nv_supported = FALSE; 20373 mutex_exit(SD_MUTEX(un)); 20374 status = 0; 20375 SD_TRACE(SD_LOG_IO, un, 20376 "un_f_sync_nv_supported \ 20377 is set to false.\n"); 20378 goto done; 20379 } 20380 20381 mutex_enter(SD_MUTEX(un)); 20382 un->un_f_sync_cache_supported = FALSE; 20383 mutex_exit(SD_MUTEX(un)); 20384 SD_TRACE(SD_LOG_IO, un, 20385 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20386 un_f_sync_cache_supported set to false \ 20387 with asc = %x, ascq = %x\n", 20388 scsi_sense_asc(sense_buf), 20389 scsi_sense_ascq(sense_buf)); 20390 status = ENOTSUP; 20391 goto done; 20392 } 20393 break; 20394 default: 20395 break; 20396 } 20397 /* FALLTHRU */ 20398 default: 20399 /* 20400 * Turn on the un_f_sync_cache_required flag 20401 * since the SYNC CACHE command failed 20402 */ 20403 mutex_enter(SD_MUTEX(un)); 20404 un->un_f_sync_cache_required = TRUE; 20405 mutex_exit(SD_MUTEX(un)); 20406 20407 /* 20408 * Don't log an error message if this device 20409 * has removable media. 20410 */ 20411 if (!un->un_f_has_removable_media) { 20412 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20413 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20414 } 20415 break; 20416 } 20417 20418 done: 20419 if (uip->ui_dkc.dkc_callback != NULL) { 20420 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20421 } 20422 20423 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20424 freerbuf(bp); 20425 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20426 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20427 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20428 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20429 20430 return (status); 20431 } 20432 20433 20434 /* 20435 * Function: sd_send_scsi_GET_CONFIGURATION 20436 * 20437 * Description: Issues the get configuration command to the device. 20438 * Called from sd_check_for_writable_cd & sd_get_media_info 20439 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20440 * Arguments: ssc 20441 * ucmdbuf 20442 * rqbuf 20443 * rqbuflen 20444 * bufaddr 20445 * buflen 20446 * path_flag 20447 * 20448 * Return Code: 0 - Success 20449 * errno return code from sd_ssc_send() 20450 * 20451 * Context: Can sleep. Does not return until command is completed. 20452 * 20453 */ 20454 20455 static int 20456 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20457 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20458 int path_flag) 20459 { 20460 char cdb[CDB_GROUP1]; 20461 int status; 20462 struct sd_lun *un; 20463 20464 ASSERT(ssc != NULL); 20465 un = ssc->ssc_un; 20466 ASSERT(un != NULL); 20467 ASSERT(!mutex_owned(SD_MUTEX(un))); 20468 ASSERT(bufaddr != NULL); 20469 ASSERT(ucmdbuf != NULL); 20470 ASSERT(rqbuf != NULL); 20471 20472 SD_TRACE(SD_LOG_IO, un, 20473 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20474 20475 bzero(cdb, sizeof (cdb)); 20476 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20477 bzero(rqbuf, rqbuflen); 20478 bzero(bufaddr, buflen); 20479 20480 /* 20481 * Set up cdb field for the get configuration command. 20482 */ 20483 cdb[0] = SCMD_GET_CONFIGURATION; 20484 cdb[1] = 0x02; /* Requested Type */ 20485 cdb[8] = SD_PROFILE_HEADER_LEN; 20486 ucmdbuf->uscsi_cdb = cdb; 20487 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20488 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20489 ucmdbuf->uscsi_buflen = buflen; 20490 ucmdbuf->uscsi_timeout = sd_io_time; 20491 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20492 ucmdbuf->uscsi_rqlen = rqbuflen; 20493 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20494 20495 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20496 UIO_SYSSPACE, path_flag); 20497 20498 switch (status) { 20499 case 0: 20500 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20501 break; /* Success! */ 20502 case EIO: 20503 switch (ucmdbuf->uscsi_status) { 20504 case STATUS_RESERVATION_CONFLICT: 20505 status = EACCES; 20506 break; 20507 default: 20508 break; 20509 } 20510 break; 20511 default: 20512 break; 20513 } 20514 20515 if (status == 0) { 20516 SD_DUMP_MEMORY(un, SD_LOG_IO, 20517 "sd_send_scsi_GET_CONFIGURATION: data", 20518 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20519 } 20520 20521 SD_TRACE(SD_LOG_IO, un, 20522 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20523 20524 return (status); 20525 } 20526 20527 /* 20528 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20529 * 20530 * Description: Issues the get configuration command to the device to 20531 * retrieve a specific feature. Called from 20532 * sd_check_for_writable_cd & sd_set_mmc_caps. 20533 * Arguments: ssc 20534 * ucmdbuf 20535 * rqbuf 20536 * rqbuflen 20537 * bufaddr 20538 * buflen 20539 * feature 20540 * 20541 * Return Code: 0 - Success 20542 * errno return code from sd_ssc_send() 20543 * 20544 * Context: Can sleep. Does not return until command is completed. 20545 * 20546 */ 20547 static int 20548 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20549 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20550 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20551 { 20552 char cdb[CDB_GROUP1]; 20553 int status; 20554 struct sd_lun *un; 20555 20556 ASSERT(ssc != NULL); 20557 un = ssc->ssc_un; 20558 ASSERT(un != NULL); 20559 ASSERT(!mutex_owned(SD_MUTEX(un))); 20560 ASSERT(bufaddr != NULL); 20561 ASSERT(ucmdbuf != NULL); 20562 ASSERT(rqbuf != NULL); 20563 20564 SD_TRACE(SD_LOG_IO, un, 20565 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20566 20567 bzero(cdb, sizeof (cdb)); 20568 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20569 bzero(rqbuf, rqbuflen); 20570 bzero(bufaddr, buflen); 20571 20572 /* 20573 * Set up cdb field for the get configuration command. 20574 */ 20575 cdb[0] = SCMD_GET_CONFIGURATION; 20576 cdb[1] = 0x02; /* Requested Type */ 20577 cdb[3] = feature; 20578 cdb[8] = buflen; 20579 ucmdbuf->uscsi_cdb = cdb; 20580 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20581 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20582 ucmdbuf->uscsi_buflen = buflen; 20583 ucmdbuf->uscsi_timeout = sd_io_time; 20584 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20585 ucmdbuf->uscsi_rqlen = rqbuflen; 20586 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20587 20588 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20589 UIO_SYSSPACE, path_flag); 20590 20591 switch (status) { 20592 case 0: 20593 20594 break; /* Success! */ 20595 case EIO: 20596 switch (ucmdbuf->uscsi_status) { 20597 case STATUS_RESERVATION_CONFLICT: 20598 status = EACCES; 20599 break; 20600 default: 20601 break; 20602 } 20603 break; 20604 default: 20605 break; 20606 } 20607 20608 if (status == 0) { 20609 SD_DUMP_MEMORY(un, SD_LOG_IO, 20610 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20611 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20612 } 20613 20614 SD_TRACE(SD_LOG_IO, un, 20615 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20616 20617 return (status); 20618 } 20619 20620 20621 /* 20622 * Function: sd_send_scsi_MODE_SENSE 20623 * 20624 * Description: Utility function for issuing a scsi MODE SENSE command. 20625 * Note: This routine uses a consistent implementation for Group0, 20626 * Group1, and Group2 commands across all platforms. ATAPI devices 20627 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20628 * 20629 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20630 * structure for this target. 20631 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20632 * CDB_GROUP[1|2] (10 byte). 20633 * bufaddr - buffer for page data retrieved from the target. 20634 * buflen - size of page to be retrieved. 20635 * page_code - page code of data to be retrieved from the target. 20636 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20637 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20638 * to use the USCSI "direct" chain and bypass the normal 20639 * command waitq. 20640 * 20641 * Return Code: 0 - Success 20642 * errno return code from sd_ssc_send() 20643 * 20644 * Context: Can sleep. Does not return until command is completed. 20645 */ 20646 20647 static int 20648 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20649 size_t buflen, uchar_t page_code, int path_flag) 20650 { 20651 struct scsi_extended_sense sense_buf; 20652 union scsi_cdb cdb; 20653 struct uscsi_cmd ucmd_buf; 20654 int status; 20655 int headlen; 20656 struct sd_lun *un; 20657 20658 ASSERT(ssc != NULL); 20659 un = ssc->ssc_un; 20660 ASSERT(un != NULL); 20661 ASSERT(!mutex_owned(SD_MUTEX(un))); 20662 ASSERT(bufaddr != NULL); 20663 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20664 (cdbsize == CDB_GROUP2)); 20665 20666 SD_TRACE(SD_LOG_IO, un, 20667 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20668 20669 bzero(&cdb, sizeof (cdb)); 20670 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20671 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20672 bzero(bufaddr, buflen); 20673 20674 if (cdbsize == CDB_GROUP0) { 20675 cdb.scc_cmd = SCMD_MODE_SENSE; 20676 cdb.cdb_opaque[2] = page_code; 20677 FORMG0COUNT(&cdb, buflen); 20678 headlen = MODE_HEADER_LENGTH; 20679 } else { 20680 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20681 cdb.cdb_opaque[2] = page_code; 20682 FORMG1COUNT(&cdb, buflen); 20683 headlen = MODE_HEADER_LENGTH_GRP2; 20684 } 20685 20686 ASSERT(headlen <= buflen); 20687 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20688 20689 ucmd_buf.uscsi_cdb = (char *)&cdb; 20690 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20691 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20692 ucmd_buf.uscsi_buflen = buflen; 20693 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20694 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20695 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20696 ucmd_buf.uscsi_timeout = 60; 20697 20698 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20699 UIO_SYSSPACE, path_flag); 20700 20701 switch (status) { 20702 case 0: 20703 /* 20704 * sr_check_wp() uses 0x3f page code and check the header of 20705 * mode page to determine if target device is write-protected. 20706 * But some USB devices return 0 bytes for 0x3f page code. For 20707 * this case, make sure that mode page header is returned at 20708 * least. 20709 */ 20710 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20711 status = EIO; 20712 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 20713 "mode page header is not returned"); 20714 } 20715 break; /* Success! */ 20716 case EIO: 20717 switch (ucmd_buf.uscsi_status) { 20718 case STATUS_RESERVATION_CONFLICT: 20719 status = EACCES; 20720 break; 20721 default: 20722 break; 20723 } 20724 break; 20725 default: 20726 break; 20727 } 20728 20729 if (status == 0) { 20730 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20731 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20732 } 20733 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20734 20735 return (status); 20736 } 20737 20738 20739 /* 20740 * Function: sd_send_scsi_MODE_SELECT 20741 * 20742 * Description: Utility function for issuing a scsi MODE SELECT command. 20743 * Note: This routine uses a consistent implementation for Group0, 20744 * Group1, and Group2 commands across all platforms. ATAPI devices 20745 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20746 * 20747 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20748 * structure for this target. 20749 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20750 * CDB_GROUP[1|2] (10 byte). 20751 * bufaddr - buffer for page data retrieved from the target. 20752 * buflen - size of page to be retrieved. 20753 * save_page - boolean to determin if SP bit should be set. 20754 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20755 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20756 * to use the USCSI "direct" chain and bypass the normal 20757 * command waitq. 20758 * 20759 * Return Code: 0 - Success 20760 * errno return code from sd_ssc_send() 20761 * 20762 * Context: Can sleep. Does not return until command is completed. 20763 */ 20764 20765 static int 20766 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20767 size_t buflen, uchar_t save_page, int path_flag) 20768 { 20769 struct scsi_extended_sense sense_buf; 20770 union scsi_cdb cdb; 20771 struct uscsi_cmd ucmd_buf; 20772 int status; 20773 struct sd_lun *un; 20774 20775 ASSERT(ssc != NULL); 20776 un = ssc->ssc_un; 20777 ASSERT(un != NULL); 20778 ASSERT(!mutex_owned(SD_MUTEX(un))); 20779 ASSERT(bufaddr != NULL); 20780 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20781 (cdbsize == CDB_GROUP2)); 20782 20783 SD_TRACE(SD_LOG_IO, un, 20784 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20785 20786 bzero(&cdb, sizeof (cdb)); 20787 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20788 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20789 20790 /* Set the PF bit for many third party drives */ 20791 cdb.cdb_opaque[1] = 0x10; 20792 20793 /* Set the savepage(SP) bit if given */ 20794 if (save_page == SD_SAVE_PAGE) { 20795 cdb.cdb_opaque[1] |= 0x01; 20796 } 20797 20798 if (cdbsize == CDB_GROUP0) { 20799 cdb.scc_cmd = SCMD_MODE_SELECT; 20800 FORMG0COUNT(&cdb, buflen); 20801 } else { 20802 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20803 FORMG1COUNT(&cdb, buflen); 20804 } 20805 20806 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20807 20808 ucmd_buf.uscsi_cdb = (char *)&cdb; 20809 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20810 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20811 ucmd_buf.uscsi_buflen = buflen; 20812 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20813 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20814 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20815 ucmd_buf.uscsi_timeout = 60; 20816 20817 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20818 UIO_SYSSPACE, path_flag); 20819 20820 switch (status) { 20821 case 0: 20822 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20823 break; /* Success! */ 20824 case EIO: 20825 switch (ucmd_buf.uscsi_status) { 20826 case STATUS_RESERVATION_CONFLICT: 20827 status = EACCES; 20828 break; 20829 default: 20830 break; 20831 } 20832 break; 20833 default: 20834 break; 20835 } 20836 20837 if (status == 0) { 20838 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20839 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20840 } 20841 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20842 20843 return (status); 20844 } 20845 20846 20847 /* 20848 * Function: sd_send_scsi_RDWR 20849 * 20850 * Description: Issue a scsi READ or WRITE command with the given parameters. 20851 * 20852 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20853 * structure for this target. 20854 * cmd: SCMD_READ or SCMD_WRITE 20855 * bufaddr: Address of caller's buffer to receive the RDWR data 20856 * buflen: Length of caller's buffer receive the RDWR data. 20857 * start_block: Block number for the start of the RDWR operation. 20858 * (Assumes target-native block size.) 20859 * residp: Pointer to variable to receive the redisual of the 20860 * RDWR operation (may be NULL of no residual requested). 20861 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20862 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20863 * to use the USCSI "direct" chain and bypass the normal 20864 * command waitq. 20865 * 20866 * Return Code: 0 - Success 20867 * errno return code from sd_ssc_send() 20868 * 20869 * Context: Can sleep. Does not return until command is completed. 20870 */ 20871 20872 static int 20873 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 20874 size_t buflen, daddr_t start_block, int path_flag) 20875 { 20876 struct scsi_extended_sense sense_buf; 20877 union scsi_cdb cdb; 20878 struct uscsi_cmd ucmd_buf; 20879 uint32_t block_count; 20880 int status; 20881 int cdbsize; 20882 uchar_t flag; 20883 struct sd_lun *un; 20884 20885 ASSERT(ssc != NULL); 20886 un = ssc->ssc_un; 20887 ASSERT(un != NULL); 20888 ASSERT(!mutex_owned(SD_MUTEX(un))); 20889 ASSERT(bufaddr != NULL); 20890 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20891 20892 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20893 20894 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20895 return (EINVAL); 20896 } 20897 20898 mutex_enter(SD_MUTEX(un)); 20899 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20900 mutex_exit(SD_MUTEX(un)); 20901 20902 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20903 20904 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20905 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20906 bufaddr, buflen, start_block, block_count); 20907 20908 bzero(&cdb, sizeof (cdb)); 20909 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20910 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20911 20912 /* Compute CDB size to use */ 20913 if (start_block > 0xffffffff) 20914 cdbsize = CDB_GROUP4; 20915 else if ((start_block & 0xFFE00000) || 20916 (un->un_f_cfg_is_atapi == TRUE)) 20917 cdbsize = CDB_GROUP1; 20918 else 20919 cdbsize = CDB_GROUP0; 20920 20921 switch (cdbsize) { 20922 case CDB_GROUP0: /* 6-byte CDBs */ 20923 cdb.scc_cmd = cmd; 20924 FORMG0ADDR(&cdb, start_block); 20925 FORMG0COUNT(&cdb, block_count); 20926 break; 20927 case CDB_GROUP1: /* 10-byte CDBs */ 20928 cdb.scc_cmd = cmd | SCMD_GROUP1; 20929 FORMG1ADDR(&cdb, start_block); 20930 FORMG1COUNT(&cdb, block_count); 20931 break; 20932 case CDB_GROUP4: /* 16-byte CDBs */ 20933 cdb.scc_cmd = cmd | SCMD_GROUP4; 20934 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20935 FORMG4COUNT(&cdb, block_count); 20936 break; 20937 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20938 default: 20939 /* All others reserved */ 20940 return (EINVAL); 20941 } 20942 20943 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20944 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20945 20946 ucmd_buf.uscsi_cdb = (char *)&cdb; 20947 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20948 ucmd_buf.uscsi_bufaddr = bufaddr; 20949 ucmd_buf.uscsi_buflen = buflen; 20950 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20951 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20952 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20953 ucmd_buf.uscsi_timeout = 60; 20954 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20955 UIO_SYSSPACE, path_flag); 20956 20957 switch (status) { 20958 case 0: 20959 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20960 break; /* Success! */ 20961 case EIO: 20962 switch (ucmd_buf.uscsi_status) { 20963 case STATUS_RESERVATION_CONFLICT: 20964 status = EACCES; 20965 break; 20966 default: 20967 break; 20968 } 20969 break; 20970 default: 20971 break; 20972 } 20973 20974 if (status == 0) { 20975 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20976 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20977 } 20978 20979 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20980 20981 return (status); 20982 } 20983 20984 20985 /* 20986 * Function: sd_send_scsi_LOG_SENSE 20987 * 20988 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20989 * 20990 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20991 * structure for this target. 20992 * 20993 * Return Code: 0 - Success 20994 * errno return code from sd_ssc_send() 20995 * 20996 * Context: Can sleep. Does not return until command is completed. 20997 */ 20998 20999 static int 21000 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21001 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21002 int path_flag) 21003 21004 { 21005 struct scsi_extended_sense sense_buf; 21006 union scsi_cdb cdb; 21007 struct uscsi_cmd ucmd_buf; 21008 int status; 21009 struct sd_lun *un; 21010 21011 ASSERT(ssc != NULL); 21012 un = ssc->ssc_un; 21013 ASSERT(un != NULL); 21014 ASSERT(!mutex_owned(SD_MUTEX(un))); 21015 21016 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21017 21018 bzero(&cdb, sizeof (cdb)); 21019 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21020 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21021 21022 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21023 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21024 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21025 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21026 FORMG1COUNT(&cdb, buflen); 21027 21028 ucmd_buf.uscsi_cdb = (char *)&cdb; 21029 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21030 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21031 ucmd_buf.uscsi_buflen = buflen; 21032 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21033 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21034 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21035 ucmd_buf.uscsi_timeout = 60; 21036 21037 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21038 UIO_SYSSPACE, path_flag); 21039 21040 switch (status) { 21041 case 0: 21042 break; 21043 case EIO: 21044 switch (ucmd_buf.uscsi_status) { 21045 case STATUS_RESERVATION_CONFLICT: 21046 status = EACCES; 21047 break; 21048 case STATUS_CHECK: 21049 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21050 (scsi_sense_key((uint8_t *)&sense_buf) == 21051 KEY_ILLEGAL_REQUEST) && 21052 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21053 /* 21054 * ASC 0x24: INVALID FIELD IN CDB 21055 */ 21056 switch (page_code) { 21057 case START_STOP_CYCLE_PAGE: 21058 /* 21059 * The start stop cycle counter is 21060 * implemented as page 0x31 in earlier 21061 * generation disks. In new generation 21062 * disks the start stop cycle counter is 21063 * implemented as page 0xE. To properly 21064 * handle this case if an attempt for 21065 * log page 0xE is made and fails we 21066 * will try again using page 0x31. 21067 * 21068 * Network storage BU committed to 21069 * maintain the page 0x31 for this 21070 * purpose and will not have any other 21071 * page implemented with page code 0x31 21072 * until all disks transition to the 21073 * standard page. 21074 */ 21075 mutex_enter(SD_MUTEX(un)); 21076 un->un_start_stop_cycle_page = 21077 START_STOP_CYCLE_VU_PAGE; 21078 cdb.cdb_opaque[2] = 21079 (char)(page_control << 6) | 21080 un->un_start_stop_cycle_page; 21081 mutex_exit(SD_MUTEX(un)); 21082 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21083 status = sd_ssc_send( 21084 ssc, &ucmd_buf, FKIOCTL, 21085 UIO_SYSSPACE, path_flag); 21086 21087 break; 21088 case TEMPERATURE_PAGE: 21089 status = ENOTTY; 21090 break; 21091 default: 21092 break; 21093 } 21094 } 21095 break; 21096 default: 21097 break; 21098 } 21099 break; 21100 default: 21101 break; 21102 } 21103 21104 if (status == 0) { 21105 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21106 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21107 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21108 } 21109 21110 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21111 21112 return (status); 21113 } 21114 21115 21116 /* 21117 * Function: sdioctl 21118 * 21119 * Description: Driver's ioctl(9e) entry point function. 21120 * 21121 * Arguments: dev - device number 21122 * cmd - ioctl operation to be performed 21123 * arg - user argument, contains data to be set or reference 21124 * parameter for get 21125 * flag - bit flag, indicating open settings, 32/64 bit type 21126 * cred_p - user credential pointer 21127 * rval_p - calling process return value (OPT) 21128 * 21129 * Return Code: EINVAL 21130 * ENOTTY 21131 * ENXIO 21132 * EIO 21133 * EFAULT 21134 * ENOTSUP 21135 * EPERM 21136 * 21137 * Context: Called from the device switch at normal priority. 21138 */ 21139 21140 static int 21141 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21142 { 21143 struct sd_lun *un = NULL; 21144 int err = 0; 21145 int i = 0; 21146 cred_t *cr; 21147 int tmprval = EINVAL; 21148 int is_valid; 21149 sd_ssc_t *ssc; 21150 21151 /* 21152 * All device accesses go thru sdstrategy where we check on suspend 21153 * status 21154 */ 21155 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21156 return (ENXIO); 21157 } 21158 21159 ASSERT(!mutex_owned(SD_MUTEX(un))); 21160 21161 /* Initialize sd_ssc_t for internal uscsi commands */ 21162 ssc = sd_ssc_init(un); 21163 21164 is_valid = SD_IS_VALID_LABEL(un); 21165 21166 /* 21167 * Moved this wait from sd_uscsi_strategy to here for 21168 * reasons of deadlock prevention. Internal driver commands, 21169 * specifically those to change a devices power level, result 21170 * in a call to sd_uscsi_strategy. 21171 */ 21172 mutex_enter(SD_MUTEX(un)); 21173 while ((un->un_state == SD_STATE_SUSPENDED) || 21174 (un->un_state == SD_STATE_PM_CHANGING)) { 21175 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21176 } 21177 /* 21178 * Twiddling the counter here protects commands from now 21179 * through to the top of sd_uscsi_strategy. Without the 21180 * counter inc. a power down, for example, could get in 21181 * after the above check for state is made and before 21182 * execution gets to the top of sd_uscsi_strategy. 21183 * That would cause problems. 21184 */ 21185 un->un_ncmds_in_driver++; 21186 21187 if (!is_valid && 21188 (flag & (FNDELAY | FNONBLOCK))) { 21189 switch (cmd) { 21190 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21191 case DKIOCGVTOC: 21192 case DKIOCGEXTVTOC: 21193 case DKIOCGAPART: 21194 case DKIOCPARTINFO: 21195 case DKIOCEXTPARTINFO: 21196 case DKIOCSGEOM: 21197 case DKIOCSAPART: 21198 case DKIOCGETEFI: 21199 case DKIOCPARTITION: 21200 case DKIOCSVTOC: 21201 case DKIOCSEXTVTOC: 21202 case DKIOCSETEFI: 21203 case DKIOCGMBOOT: 21204 case DKIOCSMBOOT: 21205 case DKIOCG_PHYGEOM: 21206 case DKIOCG_VIRTGEOM: 21207 /* let cmlb handle it */ 21208 goto skip_ready_valid; 21209 21210 case CDROMPAUSE: 21211 case CDROMRESUME: 21212 case CDROMPLAYMSF: 21213 case CDROMPLAYTRKIND: 21214 case CDROMREADTOCHDR: 21215 case CDROMREADTOCENTRY: 21216 case CDROMSTOP: 21217 case CDROMSTART: 21218 case CDROMVOLCTRL: 21219 case CDROMSUBCHNL: 21220 case CDROMREADMODE2: 21221 case CDROMREADMODE1: 21222 case CDROMREADOFFSET: 21223 case CDROMSBLKMODE: 21224 case CDROMGBLKMODE: 21225 case CDROMGDRVSPEED: 21226 case CDROMSDRVSPEED: 21227 case CDROMCDDA: 21228 case CDROMCDXA: 21229 case CDROMSUBCODE: 21230 if (!ISCD(un)) { 21231 un->un_ncmds_in_driver--; 21232 ASSERT(un->un_ncmds_in_driver >= 0); 21233 mutex_exit(SD_MUTEX(un)); 21234 err = ENOTTY; 21235 goto done_without_assess; 21236 } 21237 break; 21238 case FDEJECT: 21239 case DKIOCEJECT: 21240 case CDROMEJECT: 21241 if (!un->un_f_eject_media_supported) { 21242 un->un_ncmds_in_driver--; 21243 ASSERT(un->un_ncmds_in_driver >= 0); 21244 mutex_exit(SD_MUTEX(un)); 21245 err = ENOTTY; 21246 goto done_without_assess; 21247 } 21248 break; 21249 case DKIOCFLUSHWRITECACHE: 21250 mutex_exit(SD_MUTEX(un)); 21251 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21252 if (err != 0) { 21253 mutex_enter(SD_MUTEX(un)); 21254 un->un_ncmds_in_driver--; 21255 ASSERT(un->un_ncmds_in_driver >= 0); 21256 mutex_exit(SD_MUTEX(un)); 21257 err = EIO; 21258 goto done_quick_assess; 21259 } 21260 mutex_enter(SD_MUTEX(un)); 21261 /* FALLTHROUGH */ 21262 case DKIOCREMOVABLE: 21263 case DKIOCHOTPLUGGABLE: 21264 case DKIOCINFO: 21265 case DKIOCGMEDIAINFO: 21266 case MHIOCENFAILFAST: 21267 case MHIOCSTATUS: 21268 case MHIOCTKOWN: 21269 case MHIOCRELEASE: 21270 case MHIOCGRP_INKEYS: 21271 case MHIOCGRP_INRESV: 21272 case MHIOCGRP_REGISTER: 21273 case MHIOCGRP_RESERVE: 21274 case MHIOCGRP_PREEMPTANDABORT: 21275 case MHIOCGRP_REGISTERANDIGNOREKEY: 21276 case CDROMCLOSETRAY: 21277 case USCSICMD: 21278 goto skip_ready_valid; 21279 default: 21280 break; 21281 } 21282 21283 mutex_exit(SD_MUTEX(un)); 21284 err = sd_ready_and_valid(ssc, SDPART(dev)); 21285 mutex_enter(SD_MUTEX(un)); 21286 21287 if (err != SD_READY_VALID) { 21288 switch (cmd) { 21289 case DKIOCSTATE: 21290 case CDROMGDRVSPEED: 21291 case CDROMSDRVSPEED: 21292 case FDEJECT: /* for eject command */ 21293 case DKIOCEJECT: 21294 case CDROMEJECT: 21295 case DKIOCREMOVABLE: 21296 case DKIOCHOTPLUGGABLE: 21297 break; 21298 default: 21299 if (un->un_f_has_removable_media) { 21300 err = ENXIO; 21301 } else { 21302 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21303 if (err == SD_RESERVED_BY_OTHERS) { 21304 err = EACCES; 21305 } else { 21306 err = EIO; 21307 } 21308 } 21309 un->un_ncmds_in_driver--; 21310 ASSERT(un->un_ncmds_in_driver >= 0); 21311 mutex_exit(SD_MUTEX(un)); 21312 21313 goto done_without_assess; 21314 } 21315 } 21316 } 21317 21318 skip_ready_valid: 21319 mutex_exit(SD_MUTEX(un)); 21320 21321 switch (cmd) { 21322 case DKIOCINFO: 21323 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21324 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21325 break; 21326 21327 case DKIOCGMEDIAINFO: 21328 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21329 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21330 break; 21331 21332 case DKIOCGGEOM: 21333 case DKIOCGVTOC: 21334 case DKIOCGEXTVTOC: 21335 case DKIOCGAPART: 21336 case DKIOCPARTINFO: 21337 case DKIOCEXTPARTINFO: 21338 case DKIOCSGEOM: 21339 case DKIOCSAPART: 21340 case DKIOCGETEFI: 21341 case DKIOCPARTITION: 21342 case DKIOCSVTOC: 21343 case DKIOCSEXTVTOC: 21344 case DKIOCSETEFI: 21345 case DKIOCGMBOOT: 21346 case DKIOCSMBOOT: 21347 case DKIOCG_PHYGEOM: 21348 case DKIOCG_VIRTGEOM: 21349 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21350 21351 /* TUR should spin up */ 21352 21353 if (un->un_f_has_removable_media) 21354 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21355 SD_CHECK_FOR_MEDIA); 21356 21357 else 21358 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21359 21360 if (err != 0) 21361 goto done_with_assess; 21362 21363 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21364 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21365 21366 if ((err == 0) && 21367 ((cmd == DKIOCSETEFI) || 21368 (un->un_f_pkstats_enabled) && 21369 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 21370 21371 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21372 (void *)SD_PATH_DIRECT); 21373 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21374 sd_set_pstats(un); 21375 SD_TRACE(SD_LOG_IO_PARTITION, un, 21376 "sd_ioctl: un:0x%p pstats created and " 21377 "set\n", un); 21378 } 21379 } 21380 21381 if ((cmd == DKIOCSVTOC) || 21382 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21383 21384 mutex_enter(SD_MUTEX(un)); 21385 if (un->un_f_devid_supported && 21386 (un->un_f_opt_fab_devid == TRUE)) { 21387 if (un->un_devid == NULL) { 21388 sd_register_devid(ssc, SD_DEVINFO(un), 21389 SD_TARGET_IS_UNRESERVED); 21390 } else { 21391 /* 21392 * The device id for this disk 21393 * has been fabricated. The 21394 * device id must be preserved 21395 * by writing it back out to 21396 * disk. 21397 */ 21398 if (sd_write_deviceid(ssc) != 0) { 21399 ddi_devid_free(un->un_devid); 21400 un->un_devid = NULL; 21401 } 21402 } 21403 } 21404 mutex_exit(SD_MUTEX(un)); 21405 } 21406 21407 break; 21408 21409 case DKIOCLOCK: 21410 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21411 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21412 SD_PATH_STANDARD); 21413 goto done_with_assess; 21414 21415 case DKIOCUNLOCK: 21416 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21417 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21418 SD_PATH_STANDARD); 21419 goto done_with_assess; 21420 21421 case DKIOCSTATE: { 21422 enum dkio_state state; 21423 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21424 21425 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21426 err = EFAULT; 21427 } else { 21428 err = sd_check_media(dev, state); 21429 if (err == 0) { 21430 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21431 sizeof (int), flag) != 0) 21432 err = EFAULT; 21433 } 21434 } 21435 break; 21436 } 21437 21438 case DKIOCREMOVABLE: 21439 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21440 i = un->un_f_has_removable_media ? 1 : 0; 21441 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21442 err = EFAULT; 21443 } else { 21444 err = 0; 21445 } 21446 break; 21447 21448 case DKIOCHOTPLUGGABLE: 21449 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21450 i = un->un_f_is_hotpluggable ? 1 : 0; 21451 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21452 err = EFAULT; 21453 } else { 21454 err = 0; 21455 } 21456 break; 21457 21458 case DKIOCGTEMPERATURE: 21459 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21460 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21461 break; 21462 21463 case MHIOCENFAILFAST: 21464 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21465 if ((err = drv_priv(cred_p)) == 0) { 21466 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21467 } 21468 break; 21469 21470 case MHIOCTKOWN: 21471 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21472 if ((err = drv_priv(cred_p)) == 0) { 21473 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21474 } 21475 break; 21476 21477 case MHIOCRELEASE: 21478 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21479 if ((err = drv_priv(cred_p)) == 0) { 21480 err = sd_mhdioc_release(dev); 21481 } 21482 break; 21483 21484 case MHIOCSTATUS: 21485 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21486 if ((err = drv_priv(cred_p)) == 0) { 21487 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21488 case 0: 21489 err = 0; 21490 break; 21491 case EACCES: 21492 *rval_p = 1; 21493 err = 0; 21494 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21495 break; 21496 default: 21497 err = EIO; 21498 goto done_with_assess; 21499 } 21500 } 21501 break; 21502 21503 case MHIOCQRESERVE: 21504 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21505 if ((err = drv_priv(cred_p)) == 0) { 21506 err = sd_reserve_release(dev, SD_RESERVE); 21507 } 21508 break; 21509 21510 case MHIOCREREGISTERDEVID: 21511 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21512 if (drv_priv(cred_p) == EPERM) { 21513 err = EPERM; 21514 } else if (!un->un_f_devid_supported) { 21515 err = ENOTTY; 21516 } else { 21517 err = sd_mhdioc_register_devid(dev); 21518 } 21519 break; 21520 21521 case MHIOCGRP_INKEYS: 21522 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21523 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21524 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21525 err = ENOTSUP; 21526 } else { 21527 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21528 flag); 21529 } 21530 } 21531 break; 21532 21533 case MHIOCGRP_INRESV: 21534 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21535 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21536 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21537 err = ENOTSUP; 21538 } else { 21539 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21540 } 21541 } 21542 break; 21543 21544 case MHIOCGRP_REGISTER: 21545 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21546 if ((err = drv_priv(cred_p)) != EPERM) { 21547 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21548 err = ENOTSUP; 21549 } else if (arg != NULL) { 21550 mhioc_register_t reg; 21551 if (ddi_copyin((void *)arg, ®, 21552 sizeof (mhioc_register_t), flag) != 0) { 21553 err = EFAULT; 21554 } else { 21555 err = 21556 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21557 ssc, SD_SCSI3_REGISTER, 21558 (uchar_t *)®); 21559 if (err != 0) 21560 goto done_with_assess; 21561 } 21562 } 21563 } 21564 break; 21565 21566 case MHIOCGRP_RESERVE: 21567 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21568 if ((err = drv_priv(cred_p)) != EPERM) { 21569 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21570 err = ENOTSUP; 21571 } else if (arg != NULL) { 21572 mhioc_resv_desc_t resv_desc; 21573 if (ddi_copyin((void *)arg, &resv_desc, 21574 sizeof (mhioc_resv_desc_t), flag) != 0) { 21575 err = EFAULT; 21576 } else { 21577 err = 21578 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21579 ssc, SD_SCSI3_RESERVE, 21580 (uchar_t *)&resv_desc); 21581 if (err != 0) 21582 goto done_with_assess; 21583 } 21584 } 21585 } 21586 break; 21587 21588 case MHIOCGRP_PREEMPTANDABORT: 21589 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21590 if ((err = drv_priv(cred_p)) != EPERM) { 21591 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21592 err = ENOTSUP; 21593 } else if (arg != NULL) { 21594 mhioc_preemptandabort_t preempt_abort; 21595 if (ddi_copyin((void *)arg, &preempt_abort, 21596 sizeof (mhioc_preemptandabort_t), 21597 flag) != 0) { 21598 err = EFAULT; 21599 } else { 21600 err = 21601 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21602 ssc, SD_SCSI3_PREEMPTANDABORT, 21603 (uchar_t *)&preempt_abort); 21604 if (err != 0) 21605 goto done_with_assess; 21606 } 21607 } 21608 } 21609 break; 21610 21611 case MHIOCGRP_REGISTERANDIGNOREKEY: 21612 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21613 if ((err = drv_priv(cred_p)) != EPERM) { 21614 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21615 err = ENOTSUP; 21616 } else if (arg != NULL) { 21617 mhioc_registerandignorekey_t r_and_i; 21618 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21619 sizeof (mhioc_registerandignorekey_t), 21620 flag) != 0) { 21621 err = EFAULT; 21622 } else { 21623 err = 21624 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21625 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21626 (uchar_t *)&r_and_i); 21627 if (err != 0) 21628 goto done_with_assess; 21629 } 21630 } 21631 } 21632 break; 21633 21634 case USCSICMD: 21635 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21636 cr = ddi_get_cred(); 21637 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21638 err = EPERM; 21639 } else { 21640 enum uio_seg uioseg; 21641 21642 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21643 UIO_USERSPACE; 21644 if (un->un_f_format_in_progress == TRUE) { 21645 err = EAGAIN; 21646 break; 21647 } 21648 21649 err = sd_ssc_send(ssc, 21650 (struct uscsi_cmd *)arg, 21651 flag, uioseg, SD_PATH_STANDARD); 21652 if (err != 0) 21653 goto done_with_assess; 21654 else 21655 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21656 } 21657 break; 21658 21659 case CDROMPAUSE: 21660 case CDROMRESUME: 21661 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21662 if (!ISCD(un)) { 21663 err = ENOTTY; 21664 } else { 21665 err = sr_pause_resume(dev, cmd); 21666 } 21667 break; 21668 21669 case CDROMPLAYMSF: 21670 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21671 if (!ISCD(un)) { 21672 err = ENOTTY; 21673 } else { 21674 err = sr_play_msf(dev, (caddr_t)arg, flag); 21675 } 21676 break; 21677 21678 case CDROMPLAYTRKIND: 21679 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21680 #if defined(__i386) || defined(__amd64) 21681 /* 21682 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21683 */ 21684 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21685 #else 21686 if (!ISCD(un)) { 21687 #endif 21688 err = ENOTTY; 21689 } else { 21690 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21691 } 21692 break; 21693 21694 case CDROMREADTOCHDR: 21695 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21696 if (!ISCD(un)) { 21697 err = ENOTTY; 21698 } else { 21699 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21700 } 21701 break; 21702 21703 case CDROMREADTOCENTRY: 21704 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21705 if (!ISCD(un)) { 21706 err = ENOTTY; 21707 } else { 21708 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21709 } 21710 break; 21711 21712 case CDROMSTOP: 21713 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21714 if (!ISCD(un)) { 21715 err = ENOTTY; 21716 } else { 21717 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21718 SD_PATH_STANDARD); 21719 goto done_with_assess; 21720 } 21721 break; 21722 21723 case CDROMSTART: 21724 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21725 if (!ISCD(un)) { 21726 err = ENOTTY; 21727 } else { 21728 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21729 SD_PATH_STANDARD); 21730 goto done_with_assess; 21731 } 21732 break; 21733 21734 case CDROMCLOSETRAY: 21735 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21736 if (!ISCD(un)) { 21737 err = ENOTTY; 21738 } else { 21739 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21740 SD_PATH_STANDARD); 21741 goto done_with_assess; 21742 } 21743 break; 21744 21745 case FDEJECT: /* for eject command */ 21746 case DKIOCEJECT: 21747 case CDROMEJECT: 21748 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21749 if (!un->un_f_eject_media_supported) { 21750 err = ENOTTY; 21751 } else { 21752 err = sr_eject(dev); 21753 } 21754 break; 21755 21756 case CDROMVOLCTRL: 21757 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21758 if (!ISCD(un)) { 21759 err = ENOTTY; 21760 } else { 21761 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21762 } 21763 break; 21764 21765 case CDROMSUBCHNL: 21766 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21767 if (!ISCD(un)) { 21768 err = ENOTTY; 21769 } else { 21770 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21771 } 21772 break; 21773 21774 case CDROMREADMODE2: 21775 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21776 if (!ISCD(un)) { 21777 err = ENOTTY; 21778 } else if (un->un_f_cfg_is_atapi == TRUE) { 21779 /* 21780 * If the drive supports READ CD, use that instead of 21781 * switching the LBA size via a MODE SELECT 21782 * Block Descriptor 21783 */ 21784 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21785 } else { 21786 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21787 } 21788 break; 21789 21790 case CDROMREADMODE1: 21791 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21792 if (!ISCD(un)) { 21793 err = ENOTTY; 21794 } else { 21795 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21796 } 21797 break; 21798 21799 case CDROMREADOFFSET: 21800 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21801 if (!ISCD(un)) { 21802 err = ENOTTY; 21803 } else { 21804 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21805 flag); 21806 } 21807 break; 21808 21809 case CDROMSBLKMODE: 21810 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21811 /* 21812 * There is no means of changing block size in case of atapi 21813 * drives, thus return ENOTTY if drive type is atapi 21814 */ 21815 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21816 err = ENOTTY; 21817 } else if (un->un_f_mmc_cap == TRUE) { 21818 21819 /* 21820 * MMC Devices do not support changing the 21821 * logical block size 21822 * 21823 * Note: EINVAL is being returned instead of ENOTTY to 21824 * maintain consistancy with the original mmc 21825 * driver update. 21826 */ 21827 err = EINVAL; 21828 } else { 21829 mutex_enter(SD_MUTEX(un)); 21830 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21831 (un->un_ncmds_in_transport > 0)) { 21832 mutex_exit(SD_MUTEX(un)); 21833 err = EINVAL; 21834 } else { 21835 mutex_exit(SD_MUTEX(un)); 21836 err = sr_change_blkmode(dev, cmd, arg, flag); 21837 } 21838 } 21839 break; 21840 21841 case CDROMGBLKMODE: 21842 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21843 if (!ISCD(un)) { 21844 err = ENOTTY; 21845 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21846 (un->un_f_blockcount_is_valid != FALSE)) { 21847 /* 21848 * Drive is an ATAPI drive so return target block 21849 * size for ATAPI drives since we cannot change the 21850 * blocksize on ATAPI drives. Used primarily to detect 21851 * if an ATAPI cdrom is present. 21852 */ 21853 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21854 sizeof (int), flag) != 0) { 21855 err = EFAULT; 21856 } else { 21857 err = 0; 21858 } 21859 21860 } else { 21861 /* 21862 * Drive supports changing block sizes via a Mode 21863 * Select. 21864 */ 21865 err = sr_change_blkmode(dev, cmd, arg, flag); 21866 } 21867 break; 21868 21869 case CDROMGDRVSPEED: 21870 case CDROMSDRVSPEED: 21871 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21872 if (!ISCD(un)) { 21873 err = ENOTTY; 21874 } else if (un->un_f_mmc_cap == TRUE) { 21875 /* 21876 * Note: In the future the driver implementation 21877 * for getting and 21878 * setting cd speed should entail: 21879 * 1) If non-mmc try the Toshiba mode page 21880 * (sr_change_speed) 21881 * 2) If mmc but no support for Real Time Streaming try 21882 * the SET CD SPEED (0xBB) command 21883 * (sr_atapi_change_speed) 21884 * 3) If mmc and support for Real Time Streaming 21885 * try the GET PERFORMANCE and SET STREAMING 21886 * commands (not yet implemented, 4380808) 21887 */ 21888 /* 21889 * As per recent MMC spec, CD-ROM speed is variable 21890 * and changes with LBA. Since there is no such 21891 * things as drive speed now, fail this ioctl. 21892 * 21893 * Note: EINVAL is returned for consistancy of original 21894 * implementation which included support for getting 21895 * the drive speed of mmc devices but not setting 21896 * the drive speed. Thus EINVAL would be returned 21897 * if a set request was made for an mmc device. 21898 * We no longer support get or set speed for 21899 * mmc but need to remain consistent with regard 21900 * to the error code returned. 21901 */ 21902 err = EINVAL; 21903 } else if (un->un_f_cfg_is_atapi == TRUE) { 21904 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21905 } else { 21906 err = sr_change_speed(dev, cmd, arg, flag); 21907 } 21908 break; 21909 21910 case CDROMCDDA: 21911 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21912 if (!ISCD(un)) { 21913 err = ENOTTY; 21914 } else { 21915 err = sr_read_cdda(dev, (void *)arg, flag); 21916 } 21917 break; 21918 21919 case CDROMCDXA: 21920 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21921 if (!ISCD(un)) { 21922 err = ENOTTY; 21923 } else { 21924 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21925 } 21926 break; 21927 21928 case CDROMSUBCODE: 21929 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21930 if (!ISCD(un)) { 21931 err = ENOTTY; 21932 } else { 21933 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21934 } 21935 break; 21936 21937 21938 #ifdef SDDEBUG 21939 /* RESET/ABORTS testing ioctls */ 21940 case DKIOCRESET: { 21941 int reset_level; 21942 21943 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21944 err = EFAULT; 21945 } else { 21946 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21947 "reset_level = 0x%lx\n", reset_level); 21948 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21949 err = 0; 21950 } else { 21951 err = EIO; 21952 } 21953 } 21954 break; 21955 } 21956 21957 case DKIOCABORT: 21958 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21959 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21960 err = 0; 21961 } else { 21962 err = EIO; 21963 } 21964 break; 21965 #endif 21966 21967 #ifdef SD_FAULT_INJECTION 21968 /* SDIOC FaultInjection testing ioctls */ 21969 case SDIOCSTART: 21970 case SDIOCSTOP: 21971 case SDIOCINSERTPKT: 21972 case SDIOCINSERTXB: 21973 case SDIOCINSERTUN: 21974 case SDIOCINSERTARQ: 21975 case SDIOCPUSH: 21976 case SDIOCRETRIEVE: 21977 case SDIOCRUN: 21978 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21979 "SDIOC detected cmd:0x%X:\n", cmd); 21980 /* call error generator */ 21981 sd_faultinjection_ioctl(cmd, arg, un); 21982 err = 0; 21983 break; 21984 21985 #endif /* SD_FAULT_INJECTION */ 21986 21987 case DKIOCFLUSHWRITECACHE: 21988 { 21989 struct dk_callback *dkc = (struct dk_callback *)arg; 21990 21991 mutex_enter(SD_MUTEX(un)); 21992 if (!un->un_f_sync_cache_supported || 21993 !un->un_f_write_cache_enabled) { 21994 err = un->un_f_sync_cache_supported ? 21995 0 : ENOTSUP; 21996 mutex_exit(SD_MUTEX(un)); 21997 if ((flag & FKIOCTL) && dkc != NULL && 21998 dkc->dkc_callback != NULL) { 21999 (*dkc->dkc_callback)(dkc->dkc_cookie, 22000 err); 22001 /* 22002 * Did callback and reported error. 22003 * Since we did a callback, ioctl 22004 * should return 0. 22005 */ 22006 err = 0; 22007 } 22008 break; 22009 } 22010 mutex_exit(SD_MUTEX(un)); 22011 22012 if ((flag & FKIOCTL) && dkc != NULL && 22013 dkc->dkc_callback != NULL) { 22014 /* async SYNC CACHE request */ 22015 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22016 } else { 22017 /* synchronous SYNC CACHE request */ 22018 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22019 } 22020 } 22021 break; 22022 22023 case DKIOCGETWCE: { 22024 22025 int wce; 22026 22027 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22028 break; 22029 } 22030 22031 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22032 err = EFAULT; 22033 } 22034 break; 22035 } 22036 22037 case DKIOCSETWCE: { 22038 22039 int wce, sync_supported; 22040 22041 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22042 err = EFAULT; 22043 break; 22044 } 22045 22046 /* 22047 * Synchronize multiple threads trying to enable 22048 * or disable the cache via the un_f_wcc_cv 22049 * condition variable. 22050 */ 22051 mutex_enter(SD_MUTEX(un)); 22052 22053 /* 22054 * Don't allow the cache to be enabled if the 22055 * config file has it disabled. 22056 */ 22057 if (un->un_f_opt_disable_cache && wce) { 22058 mutex_exit(SD_MUTEX(un)); 22059 err = EINVAL; 22060 break; 22061 } 22062 22063 /* 22064 * Wait for write cache change in progress 22065 * bit to be clear before proceeding. 22066 */ 22067 while (un->un_f_wcc_inprog) 22068 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22069 22070 un->un_f_wcc_inprog = 1; 22071 22072 if (un->un_f_write_cache_enabled && wce == 0) { 22073 /* 22074 * Disable the write cache. Don't clear 22075 * un_f_write_cache_enabled until after 22076 * the mode select and flush are complete. 22077 */ 22078 sync_supported = un->un_f_sync_cache_supported; 22079 22080 /* 22081 * If cache flush is suppressed, we assume that the 22082 * controller firmware will take care of managing the 22083 * write cache for us: no need to explicitly 22084 * disable it. 22085 */ 22086 if (!un->un_f_suppress_cache_flush) { 22087 mutex_exit(SD_MUTEX(un)); 22088 if ((err = sd_cache_control(ssc, 22089 SD_CACHE_NOCHANGE, 22090 SD_CACHE_DISABLE)) == 0 && 22091 sync_supported) { 22092 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22093 NULL); 22094 } 22095 } else { 22096 mutex_exit(SD_MUTEX(un)); 22097 } 22098 22099 mutex_enter(SD_MUTEX(un)); 22100 if (err == 0) { 22101 un->un_f_write_cache_enabled = 0; 22102 } 22103 22104 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22105 /* 22106 * Set un_f_write_cache_enabled first, so there is 22107 * no window where the cache is enabled, but the 22108 * bit says it isn't. 22109 */ 22110 un->un_f_write_cache_enabled = 1; 22111 22112 /* 22113 * If cache flush is suppressed, we assume that the 22114 * controller firmware will take care of managing the 22115 * write cache for us: no need to explicitly 22116 * enable it. 22117 */ 22118 if (!un->un_f_suppress_cache_flush) { 22119 mutex_exit(SD_MUTEX(un)); 22120 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22121 SD_CACHE_ENABLE); 22122 } else { 22123 mutex_exit(SD_MUTEX(un)); 22124 } 22125 22126 mutex_enter(SD_MUTEX(un)); 22127 22128 if (err) { 22129 un->un_f_write_cache_enabled = 0; 22130 } 22131 } 22132 22133 un->un_f_wcc_inprog = 0; 22134 cv_broadcast(&un->un_wcc_cv); 22135 mutex_exit(SD_MUTEX(un)); 22136 break; 22137 } 22138 22139 default: 22140 err = ENOTTY; 22141 break; 22142 } 22143 mutex_enter(SD_MUTEX(un)); 22144 un->un_ncmds_in_driver--; 22145 ASSERT(un->un_ncmds_in_driver >= 0); 22146 mutex_exit(SD_MUTEX(un)); 22147 22148 22149 done_without_assess: 22150 sd_ssc_fini(ssc); 22151 22152 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22153 return (err); 22154 22155 done_with_assess: 22156 mutex_enter(SD_MUTEX(un)); 22157 un->un_ncmds_in_driver--; 22158 ASSERT(un->un_ncmds_in_driver >= 0); 22159 mutex_exit(SD_MUTEX(un)); 22160 22161 done_quick_assess: 22162 if (err != 0) 22163 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22164 /* Uninitialize sd_ssc_t pointer */ 22165 sd_ssc_fini(ssc); 22166 22167 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22168 return (err); 22169 } 22170 22171 22172 /* 22173 * Function: sd_dkio_ctrl_info 22174 * 22175 * Description: This routine is the driver entry point for handling controller 22176 * information ioctl requests (DKIOCINFO). 22177 * 22178 * Arguments: dev - the device number 22179 * arg - pointer to user provided dk_cinfo structure 22180 * specifying the controller type and attributes. 22181 * flag - this argument is a pass through to ddi_copyxxx() 22182 * directly from the mode argument of ioctl(). 22183 * 22184 * Return Code: 0 22185 * EFAULT 22186 * ENXIO 22187 */ 22188 22189 static int 22190 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22191 { 22192 struct sd_lun *un = NULL; 22193 struct dk_cinfo *info; 22194 dev_info_t *pdip; 22195 int lun, tgt; 22196 22197 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22198 return (ENXIO); 22199 } 22200 22201 info = (struct dk_cinfo *) 22202 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22203 22204 switch (un->un_ctype) { 22205 case CTYPE_CDROM: 22206 info->dki_ctype = DKC_CDROM; 22207 break; 22208 default: 22209 info->dki_ctype = DKC_SCSI_CCS; 22210 break; 22211 } 22212 pdip = ddi_get_parent(SD_DEVINFO(un)); 22213 info->dki_cnum = ddi_get_instance(pdip); 22214 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22215 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22216 } else { 22217 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22218 DK_DEVLEN - 1); 22219 } 22220 22221 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22222 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22223 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22224 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22225 22226 /* Unit Information */ 22227 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22228 info->dki_slave = ((tgt << 3) | lun); 22229 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22230 DK_DEVLEN - 1); 22231 info->dki_flags = DKI_FMTVOL; 22232 info->dki_partition = SDPART(dev); 22233 22234 /* Max Transfer size of this device in blocks */ 22235 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22236 info->dki_addr = 0; 22237 info->dki_space = 0; 22238 info->dki_prio = 0; 22239 info->dki_vec = 0; 22240 22241 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22242 kmem_free(info, sizeof (struct dk_cinfo)); 22243 return (EFAULT); 22244 } else { 22245 kmem_free(info, sizeof (struct dk_cinfo)); 22246 return (0); 22247 } 22248 } 22249 22250 22251 /* 22252 * Function: sd_get_media_info 22253 * 22254 * Description: This routine is the driver entry point for handling ioctl 22255 * requests for the media type or command set profile used by the 22256 * drive to operate on the media (DKIOCGMEDIAINFO). 22257 * 22258 * Arguments: dev - the device number 22259 * arg - pointer to user provided dk_minfo structure 22260 * specifying the media type, logical block size and 22261 * drive capacity. 22262 * flag - this argument is a pass through to ddi_copyxxx() 22263 * directly from the mode argument of ioctl(). 22264 * 22265 * Return Code: 0 22266 * EACCESS 22267 * EFAULT 22268 * ENXIO 22269 * EIO 22270 */ 22271 22272 static int 22273 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22274 { 22275 struct sd_lun *un = NULL; 22276 struct uscsi_cmd com; 22277 struct scsi_inquiry *sinq; 22278 struct dk_minfo media_info; 22279 u_longlong_t media_capacity; 22280 uint64_t capacity; 22281 uint_t lbasize; 22282 uchar_t *out_data; 22283 uchar_t *rqbuf; 22284 int rval = 0; 22285 int rtn; 22286 sd_ssc_t *ssc; 22287 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22288 (un->un_state == SD_STATE_OFFLINE)) { 22289 return (ENXIO); 22290 } 22291 22292 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22293 22294 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22295 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22296 22297 /* Issue a TUR to determine if the drive is ready with media present */ 22298 ssc = sd_ssc_init(un); 22299 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22300 if (rval == ENXIO) { 22301 goto done; 22302 } else if (rval != 0) { 22303 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22304 } 22305 22306 /* Now get configuration data */ 22307 if (ISCD(un)) { 22308 media_info.dki_media_type = DK_CDROM; 22309 22310 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22311 if (un->un_f_mmc_cap == TRUE) { 22312 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22313 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22314 SD_PATH_STANDARD); 22315 22316 if (rtn) { 22317 /* 22318 * Failed for other than an illegal request 22319 * or command not supported 22320 */ 22321 if ((com.uscsi_status == STATUS_CHECK) && 22322 (com.uscsi_rqstatus == STATUS_GOOD)) { 22323 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22324 (rqbuf[12] != 0x20)) { 22325 rval = EIO; 22326 goto done; 22327 } 22328 } 22329 else 22330 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22331 } else { 22332 /* 22333 * The GET CONFIGURATION command succeeded 22334 * so set the media type according to the 22335 * returned data 22336 */ 22337 media_info.dki_media_type = out_data[6]; 22338 media_info.dki_media_type <<= 8; 22339 media_info.dki_media_type |= out_data[7]; 22340 } 22341 } 22342 } else { 22343 /* 22344 * The profile list is not available, so we attempt to identify 22345 * the media type based on the inquiry data 22346 */ 22347 sinq = un->un_sd->sd_inq; 22348 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22349 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22350 /* This is a direct access device or optical disk */ 22351 media_info.dki_media_type = DK_FIXED_DISK; 22352 22353 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22354 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22355 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22356 media_info.dki_media_type = DK_ZIP; 22357 } else if ( 22358 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22359 media_info.dki_media_type = DK_JAZ; 22360 } 22361 } 22362 } else { 22363 /* 22364 * Not a CD, direct access or optical disk so return 22365 * unknown media 22366 */ 22367 media_info.dki_media_type = DK_UNKNOWN; 22368 } 22369 } 22370 22371 /* Now read the capacity so we can provide the lbasize and capacity */ 22372 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22373 SD_PATH_DIRECT); 22374 switch (rval) { 22375 case 0: 22376 break; 22377 case EACCES: 22378 rval = EACCES; 22379 goto done; 22380 default: 22381 rval = EIO; 22382 goto done; 22383 } 22384 22385 /* 22386 * If lun is expanded dynamically, update the un structure. 22387 */ 22388 mutex_enter(SD_MUTEX(un)); 22389 if ((un->un_f_blockcount_is_valid == TRUE) && 22390 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22391 (capacity > un->un_blockcount)) { 22392 sd_update_block_info(un, lbasize, capacity); 22393 } 22394 mutex_exit(SD_MUTEX(un)); 22395 22396 media_info.dki_lbsize = lbasize; 22397 media_capacity = capacity; 22398 22399 /* 22400 * sd_send_scsi_READ_CAPACITY() reports capacity in 22401 * un->un_sys_blocksize chunks. So we need to convert it into 22402 * cap.lbasize chunks. 22403 */ 22404 media_capacity *= un->un_sys_blocksize; 22405 media_capacity /= lbasize; 22406 media_info.dki_capacity = media_capacity; 22407 22408 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22409 rval = EFAULT; 22410 /* Put goto. Anybody might add some code below in future */ 22411 goto no_assessment; 22412 } 22413 done: 22414 if (rval != 0) { 22415 if (rval == EIO) 22416 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22417 else 22418 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22419 } 22420 no_assessment: 22421 sd_ssc_fini(ssc); 22422 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22423 kmem_free(rqbuf, SENSE_LENGTH); 22424 return (rval); 22425 } 22426 22427 22428 /* 22429 * Function: sd_check_media 22430 * 22431 * Description: This utility routine implements the functionality for the 22432 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22433 * driver state changes from that specified by the user 22434 * (inserted or ejected). For example, if the user specifies 22435 * DKIO_EJECTED and the current media state is inserted this 22436 * routine will immediately return DKIO_INSERTED. However, if the 22437 * current media state is not inserted the user thread will be 22438 * blocked until the drive state changes. If DKIO_NONE is specified 22439 * the user thread will block until a drive state change occurs. 22440 * 22441 * Arguments: dev - the device number 22442 * state - user pointer to a dkio_state, updated with the current 22443 * drive state at return. 22444 * 22445 * Return Code: ENXIO 22446 * EIO 22447 * EAGAIN 22448 * EINTR 22449 */ 22450 22451 static int 22452 sd_check_media(dev_t dev, enum dkio_state state) 22453 { 22454 struct sd_lun *un = NULL; 22455 enum dkio_state prev_state; 22456 opaque_t token = NULL; 22457 int rval = 0; 22458 sd_ssc_t *ssc; 22459 22460 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22461 return (ENXIO); 22462 } 22463 22464 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22465 22466 ssc = sd_ssc_init(un); 22467 22468 mutex_enter(SD_MUTEX(un)); 22469 22470 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22471 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22472 22473 prev_state = un->un_mediastate; 22474 22475 /* is there anything to do? */ 22476 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22477 /* 22478 * submit the request to the scsi_watch service; 22479 * scsi_media_watch_cb() does the real work 22480 */ 22481 mutex_exit(SD_MUTEX(un)); 22482 22483 /* 22484 * This change handles the case where a scsi watch request is 22485 * added to a device that is powered down. To accomplish this 22486 * we power up the device before adding the scsi watch request, 22487 * since the scsi watch sends a TUR directly to the device 22488 * which the device cannot handle if it is powered down. 22489 */ 22490 if (sd_pm_entry(un) != DDI_SUCCESS) { 22491 mutex_enter(SD_MUTEX(un)); 22492 goto done; 22493 } 22494 22495 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22496 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22497 (caddr_t)dev); 22498 22499 sd_pm_exit(un); 22500 22501 mutex_enter(SD_MUTEX(un)); 22502 if (token == NULL) { 22503 rval = EAGAIN; 22504 goto done; 22505 } 22506 22507 /* 22508 * This is a special case IOCTL that doesn't return 22509 * until the media state changes. Routine sdpower 22510 * knows about and handles this so don't count it 22511 * as an active cmd in the driver, which would 22512 * keep the device busy to the pm framework. 22513 * If the count isn't decremented the device can't 22514 * be powered down. 22515 */ 22516 un->un_ncmds_in_driver--; 22517 ASSERT(un->un_ncmds_in_driver >= 0); 22518 22519 /* 22520 * if a prior request had been made, this will be the same 22521 * token, as scsi_watch was designed that way. 22522 */ 22523 un->un_swr_token = token; 22524 un->un_specified_mediastate = state; 22525 22526 /* 22527 * now wait for media change 22528 * we will not be signalled unless mediastate == state but it is 22529 * still better to test for this condition, since there is a 22530 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22531 */ 22532 SD_TRACE(SD_LOG_COMMON, un, 22533 "sd_check_media: waiting for media state change\n"); 22534 while (un->un_mediastate == state) { 22535 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22536 SD_TRACE(SD_LOG_COMMON, un, 22537 "sd_check_media: waiting for media state " 22538 "was interrupted\n"); 22539 un->un_ncmds_in_driver++; 22540 rval = EINTR; 22541 goto done; 22542 } 22543 SD_TRACE(SD_LOG_COMMON, un, 22544 "sd_check_media: received signal, state=%x\n", 22545 un->un_mediastate); 22546 } 22547 /* 22548 * Inc the counter to indicate the device once again 22549 * has an active outstanding cmd. 22550 */ 22551 un->un_ncmds_in_driver++; 22552 } 22553 22554 /* invalidate geometry */ 22555 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22556 sr_ejected(un); 22557 } 22558 22559 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22560 uint64_t capacity; 22561 uint_t lbasize; 22562 22563 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22564 mutex_exit(SD_MUTEX(un)); 22565 /* 22566 * Since the following routines use SD_PATH_DIRECT, we must 22567 * call PM directly before the upcoming disk accesses. This 22568 * may cause the disk to be power/spin up. 22569 */ 22570 22571 if (sd_pm_entry(un) == DDI_SUCCESS) { 22572 rval = sd_send_scsi_READ_CAPACITY(ssc, 22573 &capacity, &lbasize, SD_PATH_DIRECT); 22574 if (rval != 0) { 22575 sd_pm_exit(un); 22576 if (rval == EIO) 22577 sd_ssc_assessment(ssc, 22578 SD_FMT_STATUS_CHECK); 22579 else 22580 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22581 mutex_enter(SD_MUTEX(un)); 22582 goto done; 22583 } 22584 } else { 22585 rval = EIO; 22586 mutex_enter(SD_MUTEX(un)); 22587 goto done; 22588 } 22589 mutex_enter(SD_MUTEX(un)); 22590 22591 sd_update_block_info(un, lbasize, capacity); 22592 22593 /* 22594 * Check if the media in the device is writable or not 22595 */ 22596 if (ISCD(un)) { 22597 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22598 } 22599 22600 mutex_exit(SD_MUTEX(un)); 22601 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22602 if ((cmlb_validate(un->un_cmlbhandle, 0, 22603 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22604 sd_set_pstats(un); 22605 SD_TRACE(SD_LOG_IO_PARTITION, un, 22606 "sd_check_media: un:0x%p pstats created and " 22607 "set\n", un); 22608 } 22609 22610 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22611 SD_PATH_DIRECT); 22612 22613 sd_pm_exit(un); 22614 22615 if (rval != 0) { 22616 if (rval == EIO) 22617 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22618 else 22619 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22620 } 22621 22622 mutex_enter(SD_MUTEX(un)); 22623 } 22624 done: 22625 sd_ssc_fini(ssc); 22626 un->un_f_watcht_stopped = FALSE; 22627 /* 22628 * Use of this local token and the mutex ensures that we avoid 22629 * some race conditions associated with terminating the 22630 * scsi watch. 22631 */ 22632 if (token) { 22633 un->un_swr_token = (opaque_t)NULL; 22634 mutex_exit(SD_MUTEX(un)); 22635 (void) scsi_watch_request_terminate(token, 22636 SCSI_WATCH_TERMINATE_WAIT); 22637 mutex_enter(SD_MUTEX(un)); 22638 } 22639 22640 /* 22641 * Update the capacity kstat value, if no media previously 22642 * (capacity kstat is 0) and a media has been inserted 22643 * (un_f_blockcount_is_valid == TRUE) 22644 */ 22645 if (un->un_errstats) { 22646 struct sd_errstats *stp = NULL; 22647 22648 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22649 if ((stp->sd_capacity.value.ui64 == 0) && 22650 (un->un_f_blockcount_is_valid == TRUE)) { 22651 stp->sd_capacity.value.ui64 = 22652 (uint64_t)((uint64_t)un->un_blockcount * 22653 un->un_sys_blocksize); 22654 } 22655 } 22656 mutex_exit(SD_MUTEX(un)); 22657 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22658 return (rval); 22659 } 22660 22661 22662 /* 22663 * Function: sd_delayed_cv_broadcast 22664 * 22665 * Description: Delayed cv_broadcast to allow for target to recover from media 22666 * insertion. 22667 * 22668 * Arguments: arg - driver soft state (unit) structure 22669 */ 22670 22671 static void 22672 sd_delayed_cv_broadcast(void *arg) 22673 { 22674 struct sd_lun *un = arg; 22675 22676 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22677 22678 mutex_enter(SD_MUTEX(un)); 22679 un->un_dcvb_timeid = NULL; 22680 cv_broadcast(&un->un_state_cv); 22681 mutex_exit(SD_MUTEX(un)); 22682 } 22683 22684 22685 /* 22686 * Function: sd_media_watch_cb 22687 * 22688 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22689 * routine processes the TUR sense data and updates the driver 22690 * state if a transition has occurred. The user thread 22691 * (sd_check_media) is then signalled. 22692 * 22693 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22694 * among multiple watches that share this callback function 22695 * resultp - scsi watch facility result packet containing scsi 22696 * packet, status byte and sense data 22697 * 22698 * Return Code: 0 for success, -1 for failure 22699 */ 22700 22701 static int 22702 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22703 { 22704 struct sd_lun *un; 22705 struct scsi_status *statusp = resultp->statusp; 22706 uint8_t *sensep = (uint8_t *)resultp->sensep; 22707 enum dkio_state state = DKIO_NONE; 22708 dev_t dev = (dev_t)arg; 22709 uchar_t actual_sense_length; 22710 uint8_t skey, asc, ascq; 22711 22712 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22713 return (-1); 22714 } 22715 actual_sense_length = resultp->actual_sense_length; 22716 22717 mutex_enter(SD_MUTEX(un)); 22718 SD_TRACE(SD_LOG_COMMON, un, 22719 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22720 *((char *)statusp), (void *)sensep, actual_sense_length); 22721 22722 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22723 un->un_mediastate = DKIO_DEV_GONE; 22724 cv_broadcast(&un->un_state_cv); 22725 mutex_exit(SD_MUTEX(un)); 22726 22727 return (0); 22728 } 22729 22730 /* 22731 * If there was a check condition then sensep points to valid sense data 22732 * If status was not a check condition but a reservation or busy status 22733 * then the new state is DKIO_NONE 22734 */ 22735 if (sensep != NULL) { 22736 skey = scsi_sense_key(sensep); 22737 asc = scsi_sense_asc(sensep); 22738 ascq = scsi_sense_ascq(sensep); 22739 22740 SD_INFO(SD_LOG_COMMON, un, 22741 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22742 skey, asc, ascq); 22743 /* This routine only uses up to 13 bytes of sense data. */ 22744 if (actual_sense_length >= 13) { 22745 if (skey == KEY_UNIT_ATTENTION) { 22746 if (asc == 0x28) { 22747 state = DKIO_INSERTED; 22748 } 22749 } else if (skey == KEY_NOT_READY) { 22750 /* 22751 * if 02/04/02 means that the host 22752 * should send start command. Explicitly 22753 * leave the media state as is 22754 * (inserted) as the media is inserted 22755 * and host has stopped device for PM 22756 * reasons. Upon next true read/write 22757 * to this media will bring the 22758 * device to the right state good for 22759 * media access. 22760 */ 22761 if (asc == 0x3a) { 22762 state = DKIO_EJECTED; 22763 } else { 22764 /* 22765 * If the drive is busy with an 22766 * operation or long write, keep the 22767 * media in an inserted state. 22768 */ 22769 22770 if ((asc == 0x04) && 22771 ((ascq == 0x02) || 22772 (ascq == 0x07) || 22773 (ascq == 0x08))) { 22774 state = DKIO_INSERTED; 22775 } 22776 } 22777 } else if (skey == KEY_NO_SENSE) { 22778 if ((asc == 0x00) && (ascq == 0x00)) { 22779 /* 22780 * Sense Data 00/00/00 does not provide 22781 * any information about the state of 22782 * the media. Ignore it. 22783 */ 22784 mutex_exit(SD_MUTEX(un)); 22785 return (0); 22786 } 22787 } 22788 } 22789 } else if ((*((char *)statusp) == STATUS_GOOD) && 22790 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22791 state = DKIO_INSERTED; 22792 } 22793 22794 SD_TRACE(SD_LOG_COMMON, un, 22795 "sd_media_watch_cb: state=%x, specified=%x\n", 22796 state, un->un_specified_mediastate); 22797 22798 /* 22799 * now signal the waiting thread if this is *not* the specified state; 22800 * delay the signal if the state is DKIO_INSERTED to allow the target 22801 * to recover 22802 */ 22803 if (state != un->un_specified_mediastate) { 22804 un->un_mediastate = state; 22805 if (state == DKIO_INSERTED) { 22806 /* 22807 * delay the signal to give the drive a chance 22808 * to do what it apparently needs to do 22809 */ 22810 SD_TRACE(SD_LOG_COMMON, un, 22811 "sd_media_watch_cb: delayed cv_broadcast\n"); 22812 if (un->un_dcvb_timeid == NULL) { 22813 un->un_dcvb_timeid = 22814 timeout(sd_delayed_cv_broadcast, un, 22815 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 22816 } 22817 } else { 22818 SD_TRACE(SD_LOG_COMMON, un, 22819 "sd_media_watch_cb: immediate cv_broadcast\n"); 22820 cv_broadcast(&un->un_state_cv); 22821 } 22822 } 22823 mutex_exit(SD_MUTEX(un)); 22824 return (0); 22825 } 22826 22827 22828 /* 22829 * Function: sd_dkio_get_temp 22830 * 22831 * Description: This routine is the driver entry point for handling ioctl 22832 * requests to get the disk temperature. 22833 * 22834 * Arguments: dev - the device number 22835 * arg - pointer to user provided dk_temperature structure. 22836 * flag - this argument is a pass through to ddi_copyxxx() 22837 * directly from the mode argument of ioctl(). 22838 * 22839 * Return Code: 0 22840 * EFAULT 22841 * ENXIO 22842 * EAGAIN 22843 */ 22844 22845 static int 22846 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 22847 { 22848 struct sd_lun *un = NULL; 22849 struct dk_temperature *dktemp = NULL; 22850 uchar_t *temperature_page; 22851 int rval = 0; 22852 int path_flag = SD_PATH_STANDARD; 22853 sd_ssc_t *ssc; 22854 22855 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22856 return (ENXIO); 22857 } 22858 22859 ssc = sd_ssc_init(un); 22860 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 22861 22862 /* copyin the disk temp argument to get the user flags */ 22863 if (ddi_copyin((void *)arg, dktemp, 22864 sizeof (struct dk_temperature), flag) != 0) { 22865 rval = EFAULT; 22866 goto done; 22867 } 22868 22869 /* Initialize the temperature to invalid. */ 22870 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22871 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22872 22873 /* 22874 * Note: Investigate removing the "bypass pm" semantic. 22875 * Can we just bypass PM always? 22876 */ 22877 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 22878 path_flag = SD_PATH_DIRECT; 22879 ASSERT(!mutex_owned(&un->un_pm_mutex)); 22880 mutex_enter(&un->un_pm_mutex); 22881 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22882 /* 22883 * If DKT_BYPASS_PM is set, and the drive happens to be 22884 * in low power mode, we can not wake it up, Need to 22885 * return EAGAIN. 22886 */ 22887 mutex_exit(&un->un_pm_mutex); 22888 rval = EAGAIN; 22889 goto done; 22890 } else { 22891 /* 22892 * Indicate to PM the device is busy. This is required 22893 * to avoid a race - i.e. the ioctl is issuing a 22894 * command and the pm framework brings down the device 22895 * to low power mode (possible power cut-off on some 22896 * platforms). 22897 */ 22898 mutex_exit(&un->un_pm_mutex); 22899 if (sd_pm_entry(un) != DDI_SUCCESS) { 22900 rval = EAGAIN; 22901 goto done; 22902 } 22903 } 22904 } 22905 22906 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 22907 22908 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 22909 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 22910 if (rval != 0) 22911 goto done2; 22912 22913 /* 22914 * For the current temperature verify that the parameter length is 0x02 22915 * and the parameter code is 0x00 22916 */ 22917 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 22918 (temperature_page[5] == 0x00)) { 22919 if (temperature_page[9] == 0xFF) { 22920 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22921 } else { 22922 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 22923 } 22924 } 22925 22926 /* 22927 * For the reference temperature verify that the parameter 22928 * length is 0x02 and the parameter code is 0x01 22929 */ 22930 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 22931 (temperature_page[11] == 0x01)) { 22932 if (temperature_page[15] == 0xFF) { 22933 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22934 } else { 22935 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 22936 } 22937 } 22938 22939 /* Do the copyout regardless of the temperature commands status. */ 22940 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 22941 flag) != 0) { 22942 rval = EFAULT; 22943 goto done1; 22944 } 22945 22946 done2: 22947 if (rval != 0) { 22948 if (rval == EIO) 22949 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22950 else 22951 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22952 } 22953 done1: 22954 if (path_flag == SD_PATH_DIRECT) { 22955 sd_pm_exit(un); 22956 } 22957 22958 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 22959 done: 22960 sd_ssc_fini(ssc); 22961 if (dktemp != NULL) { 22962 kmem_free(dktemp, sizeof (struct dk_temperature)); 22963 } 22964 22965 return (rval); 22966 } 22967 22968 22969 /* 22970 * Function: sd_log_page_supported 22971 * 22972 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 22973 * supported log pages. 22974 * 22975 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22976 * structure for this target. 22977 * log_page - 22978 * 22979 * Return Code: -1 - on error (log sense is optional and may not be supported). 22980 * 0 - log page not found. 22981 * 1 - log page found. 22982 */ 22983 22984 static int 22985 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 22986 { 22987 uchar_t *log_page_data; 22988 int i; 22989 int match = 0; 22990 int log_size; 22991 int status = 0; 22992 struct sd_lun *un; 22993 22994 ASSERT(ssc != NULL); 22995 un = ssc->ssc_un; 22996 ASSERT(un != NULL); 22997 22998 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 22999 23000 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23001 SD_PATH_DIRECT); 23002 23003 if (status != 0) { 23004 if (status == EIO) { 23005 /* 23006 * Some disks do not support log sense, we 23007 * should ignore this kind of error(sense key is 23008 * 0x5 - illegal request). 23009 */ 23010 uint8_t *sensep; 23011 int senlen; 23012 23013 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23014 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23015 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23016 23017 if (senlen > 0 && 23018 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23019 sd_ssc_assessment(ssc, 23020 SD_FMT_IGNORE_COMPROMISE); 23021 } else { 23022 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23023 } 23024 } else { 23025 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23026 } 23027 23028 SD_ERROR(SD_LOG_COMMON, un, 23029 "sd_log_page_supported: failed log page retrieval\n"); 23030 kmem_free(log_page_data, 0xFF); 23031 return (-1); 23032 } 23033 23034 log_size = log_page_data[3]; 23035 23036 /* 23037 * The list of supported log pages start from the fourth byte. Check 23038 * until we run out of log pages or a match is found. 23039 */ 23040 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23041 if (log_page_data[i] == log_page) { 23042 match++; 23043 } 23044 } 23045 kmem_free(log_page_data, 0xFF); 23046 return (match); 23047 } 23048 23049 23050 /* 23051 * Function: sd_mhdioc_failfast 23052 * 23053 * Description: This routine is the driver entry point for handling ioctl 23054 * requests to enable/disable the multihost failfast option. 23055 * (MHIOCENFAILFAST) 23056 * 23057 * Arguments: dev - the device number 23058 * arg - user specified probing interval. 23059 * flag - this argument is a pass through to ddi_copyxxx() 23060 * directly from the mode argument of ioctl(). 23061 * 23062 * Return Code: 0 23063 * EFAULT 23064 * ENXIO 23065 */ 23066 23067 static int 23068 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23069 { 23070 struct sd_lun *un = NULL; 23071 int mh_time; 23072 int rval = 0; 23073 23074 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23075 return (ENXIO); 23076 } 23077 23078 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23079 return (EFAULT); 23080 23081 if (mh_time) { 23082 mutex_enter(SD_MUTEX(un)); 23083 un->un_resvd_status |= SD_FAILFAST; 23084 mutex_exit(SD_MUTEX(un)); 23085 /* 23086 * If mh_time is INT_MAX, then this ioctl is being used for 23087 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23088 */ 23089 if (mh_time != INT_MAX) { 23090 rval = sd_check_mhd(dev, mh_time); 23091 } 23092 } else { 23093 (void) sd_check_mhd(dev, 0); 23094 mutex_enter(SD_MUTEX(un)); 23095 un->un_resvd_status &= ~SD_FAILFAST; 23096 mutex_exit(SD_MUTEX(un)); 23097 } 23098 return (rval); 23099 } 23100 23101 23102 /* 23103 * Function: sd_mhdioc_takeown 23104 * 23105 * Description: This routine is the driver entry point for handling ioctl 23106 * requests to forcefully acquire exclusive access rights to the 23107 * multihost disk (MHIOCTKOWN). 23108 * 23109 * Arguments: dev - the device number 23110 * arg - user provided structure specifying the delay 23111 * parameters in milliseconds 23112 * flag - this argument is a pass through to ddi_copyxxx() 23113 * directly from the mode argument of ioctl(). 23114 * 23115 * Return Code: 0 23116 * EFAULT 23117 * ENXIO 23118 */ 23119 23120 static int 23121 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23122 { 23123 struct sd_lun *un = NULL; 23124 struct mhioctkown *tkown = NULL; 23125 int rval = 0; 23126 23127 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23128 return (ENXIO); 23129 } 23130 23131 if (arg != NULL) { 23132 tkown = (struct mhioctkown *) 23133 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23134 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23135 if (rval != 0) { 23136 rval = EFAULT; 23137 goto error; 23138 } 23139 } 23140 23141 rval = sd_take_ownership(dev, tkown); 23142 mutex_enter(SD_MUTEX(un)); 23143 if (rval == 0) { 23144 un->un_resvd_status |= SD_RESERVE; 23145 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23146 sd_reinstate_resv_delay = 23147 tkown->reinstate_resv_delay * 1000; 23148 } else { 23149 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23150 } 23151 /* 23152 * Give the scsi_watch routine interval set by 23153 * the MHIOCENFAILFAST ioctl precedence here. 23154 */ 23155 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23156 mutex_exit(SD_MUTEX(un)); 23157 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23158 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23159 "sd_mhdioc_takeown : %d\n", 23160 sd_reinstate_resv_delay); 23161 } else { 23162 mutex_exit(SD_MUTEX(un)); 23163 } 23164 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23165 sd_mhd_reset_notify_cb, (caddr_t)un); 23166 } else { 23167 un->un_resvd_status &= ~SD_RESERVE; 23168 mutex_exit(SD_MUTEX(un)); 23169 } 23170 23171 error: 23172 if (tkown != NULL) { 23173 kmem_free(tkown, sizeof (struct mhioctkown)); 23174 } 23175 return (rval); 23176 } 23177 23178 23179 /* 23180 * Function: sd_mhdioc_release 23181 * 23182 * Description: This routine is the driver entry point for handling ioctl 23183 * requests to release exclusive access rights to the multihost 23184 * disk (MHIOCRELEASE). 23185 * 23186 * Arguments: dev - the device number 23187 * 23188 * Return Code: 0 23189 * ENXIO 23190 */ 23191 23192 static int 23193 sd_mhdioc_release(dev_t dev) 23194 { 23195 struct sd_lun *un = NULL; 23196 timeout_id_t resvd_timeid_save; 23197 int resvd_status_save; 23198 int rval = 0; 23199 23200 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23201 return (ENXIO); 23202 } 23203 23204 mutex_enter(SD_MUTEX(un)); 23205 resvd_status_save = un->un_resvd_status; 23206 un->un_resvd_status &= 23207 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23208 if (un->un_resvd_timeid) { 23209 resvd_timeid_save = un->un_resvd_timeid; 23210 un->un_resvd_timeid = NULL; 23211 mutex_exit(SD_MUTEX(un)); 23212 (void) untimeout(resvd_timeid_save); 23213 } else { 23214 mutex_exit(SD_MUTEX(un)); 23215 } 23216 23217 /* 23218 * destroy any pending timeout thread that may be attempting to 23219 * reinstate reservation on this device. 23220 */ 23221 sd_rmv_resv_reclaim_req(dev); 23222 23223 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23224 mutex_enter(SD_MUTEX(un)); 23225 if ((un->un_mhd_token) && 23226 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23227 mutex_exit(SD_MUTEX(un)); 23228 (void) sd_check_mhd(dev, 0); 23229 } else { 23230 mutex_exit(SD_MUTEX(un)); 23231 } 23232 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23233 sd_mhd_reset_notify_cb, (caddr_t)un); 23234 } else { 23235 /* 23236 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23237 */ 23238 mutex_enter(SD_MUTEX(un)); 23239 un->un_resvd_status = resvd_status_save; 23240 mutex_exit(SD_MUTEX(un)); 23241 } 23242 return (rval); 23243 } 23244 23245 23246 /* 23247 * Function: sd_mhdioc_register_devid 23248 * 23249 * Description: This routine is the driver entry point for handling ioctl 23250 * requests to register the device id (MHIOCREREGISTERDEVID). 23251 * 23252 * Note: The implementation for this ioctl has been updated to 23253 * be consistent with the original PSARC case (1999/357) 23254 * (4375899, 4241671, 4220005) 23255 * 23256 * Arguments: dev - the device number 23257 * 23258 * Return Code: 0 23259 * ENXIO 23260 */ 23261 23262 static int 23263 sd_mhdioc_register_devid(dev_t dev) 23264 { 23265 struct sd_lun *un = NULL; 23266 int rval = 0; 23267 sd_ssc_t *ssc; 23268 23269 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23270 return (ENXIO); 23271 } 23272 23273 ASSERT(!mutex_owned(SD_MUTEX(un))); 23274 23275 mutex_enter(SD_MUTEX(un)); 23276 23277 /* If a devid already exists, de-register it */ 23278 if (un->un_devid != NULL) { 23279 ddi_devid_unregister(SD_DEVINFO(un)); 23280 /* 23281 * After unregister devid, needs to free devid memory 23282 */ 23283 ddi_devid_free(un->un_devid); 23284 un->un_devid = NULL; 23285 } 23286 23287 /* Check for reservation conflict */ 23288 mutex_exit(SD_MUTEX(un)); 23289 ssc = sd_ssc_init(un); 23290 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23291 mutex_enter(SD_MUTEX(un)); 23292 23293 switch (rval) { 23294 case 0: 23295 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23296 break; 23297 case EACCES: 23298 break; 23299 default: 23300 rval = EIO; 23301 } 23302 23303 mutex_exit(SD_MUTEX(un)); 23304 if (rval != 0) { 23305 if (rval == EIO) 23306 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23307 else 23308 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23309 } 23310 sd_ssc_fini(ssc); 23311 return (rval); 23312 } 23313 23314 23315 /* 23316 * Function: sd_mhdioc_inkeys 23317 * 23318 * Description: This routine is the driver entry point for handling ioctl 23319 * requests to issue the SCSI-3 Persistent In Read Keys command 23320 * to the device (MHIOCGRP_INKEYS). 23321 * 23322 * Arguments: dev - the device number 23323 * arg - user provided in_keys structure 23324 * flag - this argument is a pass through to ddi_copyxxx() 23325 * directly from the mode argument of ioctl(). 23326 * 23327 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23328 * ENXIO 23329 * EFAULT 23330 */ 23331 23332 static int 23333 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23334 { 23335 struct sd_lun *un; 23336 mhioc_inkeys_t inkeys; 23337 int rval = 0; 23338 23339 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23340 return (ENXIO); 23341 } 23342 23343 #ifdef _MULTI_DATAMODEL 23344 switch (ddi_model_convert_from(flag & FMODELS)) { 23345 case DDI_MODEL_ILP32: { 23346 struct mhioc_inkeys32 inkeys32; 23347 23348 if (ddi_copyin(arg, &inkeys32, 23349 sizeof (struct mhioc_inkeys32), flag) != 0) { 23350 return (EFAULT); 23351 } 23352 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23353 if ((rval = sd_persistent_reservation_in_read_keys(un, 23354 &inkeys, flag)) != 0) { 23355 return (rval); 23356 } 23357 inkeys32.generation = inkeys.generation; 23358 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23359 flag) != 0) { 23360 return (EFAULT); 23361 } 23362 break; 23363 } 23364 case DDI_MODEL_NONE: 23365 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23366 flag) != 0) { 23367 return (EFAULT); 23368 } 23369 if ((rval = sd_persistent_reservation_in_read_keys(un, 23370 &inkeys, flag)) != 0) { 23371 return (rval); 23372 } 23373 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23374 flag) != 0) { 23375 return (EFAULT); 23376 } 23377 break; 23378 } 23379 23380 #else /* ! _MULTI_DATAMODEL */ 23381 23382 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23383 return (EFAULT); 23384 } 23385 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23386 if (rval != 0) { 23387 return (rval); 23388 } 23389 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23390 return (EFAULT); 23391 } 23392 23393 #endif /* _MULTI_DATAMODEL */ 23394 23395 return (rval); 23396 } 23397 23398 23399 /* 23400 * Function: sd_mhdioc_inresv 23401 * 23402 * Description: This routine is the driver entry point for handling ioctl 23403 * requests to issue the SCSI-3 Persistent In Read Reservations 23404 * command to the device (MHIOCGRP_INKEYS). 23405 * 23406 * Arguments: dev - the device number 23407 * arg - user provided in_resv structure 23408 * flag - this argument is a pass through to ddi_copyxxx() 23409 * directly from the mode argument of ioctl(). 23410 * 23411 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23412 * ENXIO 23413 * EFAULT 23414 */ 23415 23416 static int 23417 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23418 { 23419 struct sd_lun *un; 23420 mhioc_inresvs_t inresvs; 23421 int rval = 0; 23422 23423 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23424 return (ENXIO); 23425 } 23426 23427 #ifdef _MULTI_DATAMODEL 23428 23429 switch (ddi_model_convert_from(flag & FMODELS)) { 23430 case DDI_MODEL_ILP32: { 23431 struct mhioc_inresvs32 inresvs32; 23432 23433 if (ddi_copyin(arg, &inresvs32, 23434 sizeof (struct mhioc_inresvs32), flag) != 0) { 23435 return (EFAULT); 23436 } 23437 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23438 if ((rval = sd_persistent_reservation_in_read_resv(un, 23439 &inresvs, flag)) != 0) { 23440 return (rval); 23441 } 23442 inresvs32.generation = inresvs.generation; 23443 if (ddi_copyout(&inresvs32, arg, 23444 sizeof (struct mhioc_inresvs32), flag) != 0) { 23445 return (EFAULT); 23446 } 23447 break; 23448 } 23449 case DDI_MODEL_NONE: 23450 if (ddi_copyin(arg, &inresvs, 23451 sizeof (mhioc_inresvs_t), flag) != 0) { 23452 return (EFAULT); 23453 } 23454 if ((rval = sd_persistent_reservation_in_read_resv(un, 23455 &inresvs, flag)) != 0) { 23456 return (rval); 23457 } 23458 if (ddi_copyout(&inresvs, arg, 23459 sizeof (mhioc_inresvs_t), flag) != 0) { 23460 return (EFAULT); 23461 } 23462 break; 23463 } 23464 23465 #else /* ! _MULTI_DATAMODEL */ 23466 23467 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23468 return (EFAULT); 23469 } 23470 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23471 if (rval != 0) { 23472 return (rval); 23473 } 23474 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23475 return (EFAULT); 23476 } 23477 23478 #endif /* ! _MULTI_DATAMODEL */ 23479 23480 return (rval); 23481 } 23482 23483 23484 /* 23485 * The following routines support the clustering functionality described below 23486 * and implement lost reservation reclaim functionality. 23487 * 23488 * Clustering 23489 * ---------- 23490 * The clustering code uses two different, independent forms of SCSI 23491 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23492 * Persistent Group Reservations. For any particular disk, it will use either 23493 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23494 * 23495 * SCSI-2 23496 * The cluster software takes ownership of a multi-hosted disk by issuing the 23497 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23498 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23499 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23500 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23501 * driver. The meaning of failfast is that if the driver (on this host) ever 23502 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23503 * it should immediately panic the host. The motivation for this ioctl is that 23504 * if this host does encounter reservation conflict, the underlying cause is 23505 * that some other host of the cluster has decided that this host is no longer 23506 * in the cluster and has seized control of the disks for itself. Since this 23507 * host is no longer in the cluster, it ought to panic itself. The 23508 * MHIOCENFAILFAST ioctl does two things: 23509 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23510 * error to panic the host 23511 * (b) it sets up a periodic timer to test whether this host still has 23512 * "access" (in that no other host has reserved the device): if the 23513 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23514 * purpose of that periodic timer is to handle scenarios where the host is 23515 * otherwise temporarily quiescent, temporarily doing no real i/o. 23516 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23517 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23518 * the device itself. 23519 * 23520 * SCSI-3 PGR 23521 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23522 * facility is supported through the shared multihost disk ioctls 23523 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23524 * MHIOCGRP_PREEMPTANDABORT) 23525 * 23526 * Reservation Reclaim: 23527 * -------------------- 23528 * To support the lost reservation reclaim operations this driver creates a 23529 * single thread to handle reinstating reservations on all devices that have 23530 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23531 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23532 * and the reservation reclaim thread loops through the requests to regain the 23533 * lost reservations. 23534 */ 23535 23536 /* 23537 * Function: sd_check_mhd() 23538 * 23539 * Description: This function sets up and submits a scsi watch request or 23540 * terminates an existing watch request. This routine is used in 23541 * support of reservation reclaim. 23542 * 23543 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23544 * among multiple watches that share the callback function 23545 * interval - the number of microseconds specifying the watch 23546 * interval for issuing TEST UNIT READY commands. If 23547 * set to 0 the watch should be terminated. If the 23548 * interval is set to 0 and if the device is required 23549 * to hold reservation while disabling failfast, the 23550 * watch is restarted with an interval of 23551 * reinstate_resv_delay. 23552 * 23553 * Return Code: 0 - Successful submit/terminate of scsi watch request 23554 * ENXIO - Indicates an invalid device was specified 23555 * EAGAIN - Unable to submit the scsi watch request 23556 */ 23557 23558 static int 23559 sd_check_mhd(dev_t dev, int interval) 23560 { 23561 struct sd_lun *un; 23562 opaque_t token; 23563 23564 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23565 return (ENXIO); 23566 } 23567 23568 /* is this a watch termination request? */ 23569 if (interval == 0) { 23570 mutex_enter(SD_MUTEX(un)); 23571 /* if there is an existing watch task then terminate it */ 23572 if (un->un_mhd_token) { 23573 token = un->un_mhd_token; 23574 un->un_mhd_token = NULL; 23575 mutex_exit(SD_MUTEX(un)); 23576 (void) scsi_watch_request_terminate(token, 23577 SCSI_WATCH_TERMINATE_ALL_WAIT); 23578 mutex_enter(SD_MUTEX(un)); 23579 } else { 23580 mutex_exit(SD_MUTEX(un)); 23581 /* 23582 * Note: If we return here we don't check for the 23583 * failfast case. This is the original legacy 23584 * implementation but perhaps we should be checking 23585 * the failfast case. 23586 */ 23587 return (0); 23588 } 23589 /* 23590 * If the device is required to hold reservation while 23591 * disabling failfast, we need to restart the scsi_watch 23592 * routine with an interval of reinstate_resv_delay. 23593 */ 23594 if (un->un_resvd_status & SD_RESERVE) { 23595 interval = sd_reinstate_resv_delay/1000; 23596 } else { 23597 /* no failfast so bail */ 23598 mutex_exit(SD_MUTEX(un)); 23599 return (0); 23600 } 23601 mutex_exit(SD_MUTEX(un)); 23602 } 23603 23604 /* 23605 * adjust minimum time interval to 1 second, 23606 * and convert from msecs to usecs 23607 */ 23608 if (interval > 0 && interval < 1000) { 23609 interval = 1000; 23610 } 23611 interval *= 1000; 23612 23613 /* 23614 * submit the request to the scsi_watch service 23615 */ 23616 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23617 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23618 if (token == NULL) { 23619 return (EAGAIN); 23620 } 23621 23622 /* 23623 * save token for termination later on 23624 */ 23625 mutex_enter(SD_MUTEX(un)); 23626 un->un_mhd_token = token; 23627 mutex_exit(SD_MUTEX(un)); 23628 return (0); 23629 } 23630 23631 23632 /* 23633 * Function: sd_mhd_watch_cb() 23634 * 23635 * Description: This function is the call back function used by the scsi watch 23636 * facility. The scsi watch facility sends the "Test Unit Ready" 23637 * and processes the status. If applicable (i.e. a "Unit Attention" 23638 * status and automatic "Request Sense" not used) the scsi watch 23639 * facility will send a "Request Sense" and retrieve the sense data 23640 * to be passed to this callback function. In either case the 23641 * automatic "Request Sense" or the facility submitting one, this 23642 * callback is passed the status and sense data. 23643 * 23644 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23645 * among multiple watches that share this callback function 23646 * resultp - scsi watch facility result packet containing scsi 23647 * packet, status byte and sense data 23648 * 23649 * Return Code: 0 - continue the watch task 23650 * non-zero - terminate the watch task 23651 */ 23652 23653 static int 23654 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23655 { 23656 struct sd_lun *un; 23657 struct scsi_status *statusp; 23658 uint8_t *sensep; 23659 struct scsi_pkt *pkt; 23660 uchar_t actual_sense_length; 23661 dev_t dev = (dev_t)arg; 23662 23663 ASSERT(resultp != NULL); 23664 statusp = resultp->statusp; 23665 sensep = (uint8_t *)resultp->sensep; 23666 pkt = resultp->pkt; 23667 actual_sense_length = resultp->actual_sense_length; 23668 23669 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23670 return (ENXIO); 23671 } 23672 23673 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23674 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23675 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23676 23677 /* Begin processing of the status and/or sense data */ 23678 if (pkt->pkt_reason != CMD_CMPLT) { 23679 /* Handle the incomplete packet */ 23680 sd_mhd_watch_incomplete(un, pkt); 23681 return (0); 23682 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23683 if (*((unsigned char *)statusp) 23684 == STATUS_RESERVATION_CONFLICT) { 23685 /* 23686 * Handle a reservation conflict by panicking if 23687 * configured for failfast or by logging the conflict 23688 * and updating the reservation status 23689 */ 23690 mutex_enter(SD_MUTEX(un)); 23691 if ((un->un_resvd_status & SD_FAILFAST) && 23692 (sd_failfast_enable)) { 23693 sd_panic_for_res_conflict(un); 23694 /*NOTREACHED*/ 23695 } 23696 SD_INFO(SD_LOG_IOCTL_MHD, un, 23697 "sd_mhd_watch_cb: Reservation Conflict\n"); 23698 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23699 mutex_exit(SD_MUTEX(un)); 23700 } 23701 } 23702 23703 if (sensep != NULL) { 23704 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23705 mutex_enter(SD_MUTEX(un)); 23706 if ((scsi_sense_asc(sensep) == 23707 SD_SCSI_RESET_SENSE_CODE) && 23708 (un->un_resvd_status & SD_RESERVE)) { 23709 /* 23710 * The additional sense code indicates a power 23711 * on or bus device reset has occurred; update 23712 * the reservation status. 23713 */ 23714 un->un_resvd_status |= 23715 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23716 SD_INFO(SD_LOG_IOCTL_MHD, un, 23717 "sd_mhd_watch_cb: Lost Reservation\n"); 23718 } 23719 } else { 23720 return (0); 23721 } 23722 } else { 23723 mutex_enter(SD_MUTEX(un)); 23724 } 23725 23726 if ((un->un_resvd_status & SD_RESERVE) && 23727 (un->un_resvd_status & SD_LOST_RESERVE)) { 23728 if (un->un_resvd_status & SD_WANT_RESERVE) { 23729 /* 23730 * A reset occurred in between the last probe and this 23731 * one so if a timeout is pending cancel it. 23732 */ 23733 if (un->un_resvd_timeid) { 23734 timeout_id_t temp_id = un->un_resvd_timeid; 23735 un->un_resvd_timeid = NULL; 23736 mutex_exit(SD_MUTEX(un)); 23737 (void) untimeout(temp_id); 23738 mutex_enter(SD_MUTEX(un)); 23739 } 23740 un->un_resvd_status &= ~SD_WANT_RESERVE; 23741 } 23742 if (un->un_resvd_timeid == 0) { 23743 /* Schedule a timeout to handle the lost reservation */ 23744 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23745 (void *)dev, 23746 drv_usectohz(sd_reinstate_resv_delay)); 23747 } 23748 } 23749 mutex_exit(SD_MUTEX(un)); 23750 return (0); 23751 } 23752 23753 23754 /* 23755 * Function: sd_mhd_watch_incomplete() 23756 * 23757 * Description: This function is used to find out why a scsi pkt sent by the 23758 * scsi watch facility was not completed. Under some scenarios this 23759 * routine will return. Otherwise it will send a bus reset to see 23760 * if the drive is still online. 23761 * 23762 * Arguments: un - driver soft state (unit) structure 23763 * pkt - incomplete scsi pkt 23764 */ 23765 23766 static void 23767 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23768 { 23769 int be_chatty; 23770 int perr; 23771 23772 ASSERT(pkt != NULL); 23773 ASSERT(un != NULL); 23774 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23775 perr = (pkt->pkt_statistics & STAT_PERR); 23776 23777 mutex_enter(SD_MUTEX(un)); 23778 if (un->un_state == SD_STATE_DUMPING) { 23779 mutex_exit(SD_MUTEX(un)); 23780 return; 23781 } 23782 23783 switch (pkt->pkt_reason) { 23784 case CMD_UNX_BUS_FREE: 23785 /* 23786 * If we had a parity error that caused the target to drop BSY*, 23787 * don't be chatty about it. 23788 */ 23789 if (perr && be_chatty) { 23790 be_chatty = 0; 23791 } 23792 break; 23793 case CMD_TAG_REJECT: 23794 /* 23795 * The SCSI-2 spec states that a tag reject will be sent by the 23796 * target if tagged queuing is not supported. A tag reject may 23797 * also be sent during certain initialization periods or to 23798 * control internal resources. For the latter case the target 23799 * may also return Queue Full. 23800 * 23801 * If this driver receives a tag reject from a target that is 23802 * going through an init period or controlling internal 23803 * resources tagged queuing will be disabled. This is a less 23804 * than optimal behavior but the driver is unable to determine 23805 * the target state and assumes tagged queueing is not supported 23806 */ 23807 pkt->pkt_flags = 0; 23808 un->un_tagflags = 0; 23809 23810 if (un->un_f_opt_queueing == TRUE) { 23811 un->un_throttle = min(un->un_throttle, 3); 23812 } else { 23813 un->un_throttle = 1; 23814 } 23815 mutex_exit(SD_MUTEX(un)); 23816 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 23817 mutex_enter(SD_MUTEX(un)); 23818 break; 23819 case CMD_INCOMPLETE: 23820 /* 23821 * The transport stopped with an abnormal state, fallthrough and 23822 * reset the target and/or bus unless selection did not complete 23823 * (indicated by STATE_GOT_BUS) in which case we don't want to 23824 * go through a target/bus reset 23825 */ 23826 if (pkt->pkt_state == STATE_GOT_BUS) { 23827 break; 23828 } 23829 /*FALLTHROUGH*/ 23830 23831 case CMD_TIMEOUT: 23832 default: 23833 /* 23834 * The lun may still be running the command, so a lun reset 23835 * should be attempted. If the lun reset fails or cannot be 23836 * issued, than try a target reset. Lastly try a bus reset. 23837 */ 23838 if ((pkt->pkt_statistics & 23839 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 23840 int reset_retval = 0; 23841 mutex_exit(SD_MUTEX(un)); 23842 if (un->un_f_allow_bus_device_reset == TRUE) { 23843 if (un->un_f_lun_reset_enabled == TRUE) { 23844 reset_retval = 23845 scsi_reset(SD_ADDRESS(un), 23846 RESET_LUN); 23847 } 23848 if (reset_retval == 0) { 23849 reset_retval = 23850 scsi_reset(SD_ADDRESS(un), 23851 RESET_TARGET); 23852 } 23853 } 23854 if (reset_retval == 0) { 23855 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23856 } 23857 mutex_enter(SD_MUTEX(un)); 23858 } 23859 break; 23860 } 23861 23862 /* A device/bus reset has occurred; update the reservation status. */ 23863 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 23864 (STAT_BUS_RESET | STAT_DEV_RESET))) { 23865 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 23866 un->un_resvd_status |= 23867 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23868 SD_INFO(SD_LOG_IOCTL_MHD, un, 23869 "sd_mhd_watch_incomplete: Lost Reservation\n"); 23870 } 23871 } 23872 23873 /* 23874 * The disk has been turned off; Update the device state. 23875 * 23876 * Note: Should we be offlining the disk here? 23877 */ 23878 if (pkt->pkt_state == STATE_GOT_BUS) { 23879 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 23880 "Disk not responding to selection\n"); 23881 if (un->un_state != SD_STATE_OFFLINE) { 23882 New_state(un, SD_STATE_OFFLINE); 23883 } 23884 } else if (be_chatty) { 23885 /* 23886 * suppress messages if they are all the same pkt reason; 23887 * with TQ, many (up to 256) are returned with the same 23888 * pkt_reason 23889 */ 23890 if (pkt->pkt_reason != un->un_last_pkt_reason) { 23891 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23892 "sd_mhd_watch_incomplete: " 23893 "SCSI transport failed: reason '%s'\n", 23894 scsi_rname(pkt->pkt_reason)); 23895 } 23896 } 23897 un->un_last_pkt_reason = pkt->pkt_reason; 23898 mutex_exit(SD_MUTEX(un)); 23899 } 23900 23901 23902 /* 23903 * Function: sd_sname() 23904 * 23905 * Description: This is a simple little routine to return a string containing 23906 * a printable description of command status byte for use in 23907 * logging. 23908 * 23909 * Arguments: status - pointer to a status byte 23910 * 23911 * Return Code: char * - string containing status description. 23912 */ 23913 23914 static char * 23915 sd_sname(uchar_t status) 23916 { 23917 switch (status & STATUS_MASK) { 23918 case STATUS_GOOD: 23919 return ("good status"); 23920 case STATUS_CHECK: 23921 return ("check condition"); 23922 case STATUS_MET: 23923 return ("condition met"); 23924 case STATUS_BUSY: 23925 return ("busy"); 23926 case STATUS_INTERMEDIATE: 23927 return ("intermediate"); 23928 case STATUS_INTERMEDIATE_MET: 23929 return ("intermediate - condition met"); 23930 case STATUS_RESERVATION_CONFLICT: 23931 return ("reservation_conflict"); 23932 case STATUS_TERMINATED: 23933 return ("command terminated"); 23934 case STATUS_QFULL: 23935 return ("queue full"); 23936 default: 23937 return ("<unknown status>"); 23938 } 23939 } 23940 23941 23942 /* 23943 * Function: sd_mhd_resvd_recover() 23944 * 23945 * Description: This function adds a reservation entry to the 23946 * sd_resv_reclaim_request list and signals the reservation 23947 * reclaim thread that there is work pending. If the reservation 23948 * reclaim thread has not been previously created this function 23949 * will kick it off. 23950 * 23951 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23952 * among multiple watches that share this callback function 23953 * 23954 * Context: This routine is called by timeout() and is run in interrupt 23955 * context. It must not sleep or call other functions which may 23956 * sleep. 23957 */ 23958 23959 static void 23960 sd_mhd_resvd_recover(void *arg) 23961 { 23962 dev_t dev = (dev_t)arg; 23963 struct sd_lun *un; 23964 struct sd_thr_request *sd_treq = NULL; 23965 struct sd_thr_request *sd_cur = NULL; 23966 struct sd_thr_request *sd_prev = NULL; 23967 int already_there = 0; 23968 23969 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23970 return; 23971 } 23972 23973 mutex_enter(SD_MUTEX(un)); 23974 un->un_resvd_timeid = NULL; 23975 if (un->un_resvd_status & SD_WANT_RESERVE) { 23976 /* 23977 * There was a reset so don't issue the reserve, allow the 23978 * sd_mhd_watch_cb callback function to notice this and 23979 * reschedule the timeout for reservation. 23980 */ 23981 mutex_exit(SD_MUTEX(un)); 23982 return; 23983 } 23984 mutex_exit(SD_MUTEX(un)); 23985 23986 /* 23987 * Add this device to the sd_resv_reclaim_request list and the 23988 * sd_resv_reclaim_thread should take care of the rest. 23989 * 23990 * Note: We can't sleep in this context so if the memory allocation 23991 * fails allow the sd_mhd_watch_cb callback function to notice this and 23992 * reschedule the timeout for reservation. (4378460) 23993 */ 23994 sd_treq = (struct sd_thr_request *) 23995 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 23996 if (sd_treq == NULL) { 23997 return; 23998 } 23999 24000 sd_treq->sd_thr_req_next = NULL; 24001 sd_treq->dev = dev; 24002 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24003 if (sd_tr.srq_thr_req_head == NULL) { 24004 sd_tr.srq_thr_req_head = sd_treq; 24005 } else { 24006 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24007 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24008 if (sd_cur->dev == dev) { 24009 /* 24010 * already in Queue so don't log 24011 * another request for the device 24012 */ 24013 already_there = 1; 24014 break; 24015 } 24016 sd_prev = sd_cur; 24017 } 24018 if (!already_there) { 24019 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24020 "logging request for %lx\n", dev); 24021 sd_prev->sd_thr_req_next = sd_treq; 24022 } else { 24023 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24024 } 24025 } 24026 24027 /* 24028 * Create a kernel thread to do the reservation reclaim and free up this 24029 * thread. We cannot block this thread while we go away to do the 24030 * reservation reclaim 24031 */ 24032 if (sd_tr.srq_resv_reclaim_thread == NULL) 24033 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24034 sd_resv_reclaim_thread, NULL, 24035 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24036 24037 /* Tell the reservation reclaim thread that it has work to do */ 24038 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24039 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24040 } 24041 24042 /* 24043 * Function: sd_resv_reclaim_thread() 24044 * 24045 * Description: This function implements the reservation reclaim operations 24046 * 24047 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24048 * among multiple watches that share this callback function 24049 */ 24050 24051 static void 24052 sd_resv_reclaim_thread() 24053 { 24054 struct sd_lun *un; 24055 struct sd_thr_request *sd_mhreq; 24056 24057 /* Wait for work */ 24058 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24059 if (sd_tr.srq_thr_req_head == NULL) { 24060 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24061 &sd_tr.srq_resv_reclaim_mutex); 24062 } 24063 24064 /* Loop while we have work */ 24065 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24066 un = ddi_get_soft_state(sd_state, 24067 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24068 if (un == NULL) { 24069 /* 24070 * softstate structure is NULL so just 24071 * dequeue the request and continue 24072 */ 24073 sd_tr.srq_thr_req_head = 24074 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24075 kmem_free(sd_tr.srq_thr_cur_req, 24076 sizeof (struct sd_thr_request)); 24077 continue; 24078 } 24079 24080 /* dequeue the request */ 24081 sd_mhreq = sd_tr.srq_thr_cur_req; 24082 sd_tr.srq_thr_req_head = 24083 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24084 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24085 24086 /* 24087 * Reclaim reservation only if SD_RESERVE is still set. There 24088 * may have been a call to MHIOCRELEASE before we got here. 24089 */ 24090 mutex_enter(SD_MUTEX(un)); 24091 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24092 /* 24093 * Note: The SD_LOST_RESERVE flag is cleared before 24094 * reclaiming the reservation. If this is done after the 24095 * call to sd_reserve_release a reservation loss in the 24096 * window between pkt completion of reserve cmd and 24097 * mutex_enter below may not be recognized 24098 */ 24099 un->un_resvd_status &= ~SD_LOST_RESERVE; 24100 mutex_exit(SD_MUTEX(un)); 24101 24102 if (sd_reserve_release(sd_mhreq->dev, 24103 SD_RESERVE) == 0) { 24104 mutex_enter(SD_MUTEX(un)); 24105 un->un_resvd_status |= SD_RESERVE; 24106 mutex_exit(SD_MUTEX(un)); 24107 SD_INFO(SD_LOG_IOCTL_MHD, un, 24108 "sd_resv_reclaim_thread: " 24109 "Reservation Recovered\n"); 24110 } else { 24111 mutex_enter(SD_MUTEX(un)); 24112 un->un_resvd_status |= SD_LOST_RESERVE; 24113 mutex_exit(SD_MUTEX(un)); 24114 SD_INFO(SD_LOG_IOCTL_MHD, un, 24115 "sd_resv_reclaim_thread: Failed " 24116 "Reservation Recovery\n"); 24117 } 24118 } else { 24119 mutex_exit(SD_MUTEX(un)); 24120 } 24121 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24122 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24123 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24124 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24125 /* 24126 * wakeup the destroy thread if anyone is waiting on 24127 * us to complete. 24128 */ 24129 cv_signal(&sd_tr.srq_inprocess_cv); 24130 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24131 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24132 } 24133 24134 /* 24135 * cleanup the sd_tr structure now that this thread will not exist 24136 */ 24137 ASSERT(sd_tr.srq_thr_req_head == NULL); 24138 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24139 sd_tr.srq_resv_reclaim_thread = NULL; 24140 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24141 thread_exit(); 24142 } 24143 24144 24145 /* 24146 * Function: sd_rmv_resv_reclaim_req() 24147 * 24148 * Description: This function removes any pending reservation reclaim requests 24149 * for the specified device. 24150 * 24151 * Arguments: dev - the device 'dev_t' 24152 */ 24153 24154 static void 24155 sd_rmv_resv_reclaim_req(dev_t dev) 24156 { 24157 struct sd_thr_request *sd_mhreq; 24158 struct sd_thr_request *sd_prev; 24159 24160 /* Remove a reservation reclaim request from the list */ 24161 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24162 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24163 /* 24164 * We are attempting to reinstate reservation for 24165 * this device. We wait for sd_reserve_release() 24166 * to return before we return. 24167 */ 24168 cv_wait(&sd_tr.srq_inprocess_cv, 24169 &sd_tr.srq_resv_reclaim_mutex); 24170 } else { 24171 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24172 if (sd_mhreq && sd_mhreq->dev == dev) { 24173 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24174 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24175 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24176 return; 24177 } 24178 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24179 if (sd_mhreq && sd_mhreq->dev == dev) { 24180 break; 24181 } 24182 sd_prev = sd_mhreq; 24183 } 24184 if (sd_mhreq != NULL) { 24185 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24186 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24187 } 24188 } 24189 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24190 } 24191 24192 24193 /* 24194 * Function: sd_mhd_reset_notify_cb() 24195 * 24196 * Description: This is a call back function for scsi_reset_notify. This 24197 * function updates the softstate reserved status and logs the 24198 * reset. The driver scsi watch facility callback function 24199 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24200 * will reclaim the reservation. 24201 * 24202 * Arguments: arg - driver soft state (unit) structure 24203 */ 24204 24205 static void 24206 sd_mhd_reset_notify_cb(caddr_t arg) 24207 { 24208 struct sd_lun *un = (struct sd_lun *)arg; 24209 24210 mutex_enter(SD_MUTEX(un)); 24211 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24212 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24213 SD_INFO(SD_LOG_IOCTL_MHD, un, 24214 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24215 } 24216 mutex_exit(SD_MUTEX(un)); 24217 } 24218 24219 24220 /* 24221 * Function: sd_take_ownership() 24222 * 24223 * Description: This routine implements an algorithm to achieve a stable 24224 * reservation on disks which don't implement priority reserve, 24225 * and makes sure that other host lose re-reservation attempts. 24226 * This algorithm contains of a loop that keeps issuing the RESERVE 24227 * for some period of time (min_ownership_delay, default 6 seconds) 24228 * During that loop, it looks to see if there has been a bus device 24229 * reset or bus reset (both of which cause an existing reservation 24230 * to be lost). If the reservation is lost issue RESERVE until a 24231 * period of min_ownership_delay with no resets has gone by, or 24232 * until max_ownership_delay has expired. This loop ensures that 24233 * the host really did manage to reserve the device, in spite of 24234 * resets. The looping for min_ownership_delay (default six 24235 * seconds) is important to early generation clustering products, 24236 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24237 * MHIOCENFAILFAST periodic timer of two seconds. By having 24238 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24239 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24240 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24241 * have already noticed, via the MHIOCENFAILFAST polling, that it 24242 * no longer "owns" the disk and will have panicked itself. Thus, 24243 * the host issuing the MHIOCTKOWN is assured (with timing 24244 * dependencies) that by the time it actually starts to use the 24245 * disk for real work, the old owner is no longer accessing it. 24246 * 24247 * min_ownership_delay is the minimum amount of time for which the 24248 * disk must be reserved continuously devoid of resets before the 24249 * MHIOCTKOWN ioctl will return success. 24250 * 24251 * max_ownership_delay indicates the amount of time by which the 24252 * take ownership should succeed or timeout with an error. 24253 * 24254 * Arguments: dev - the device 'dev_t' 24255 * *p - struct containing timing info. 24256 * 24257 * Return Code: 0 for success or error code 24258 */ 24259 24260 static int 24261 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24262 { 24263 struct sd_lun *un; 24264 int rval; 24265 int err; 24266 int reservation_count = 0; 24267 int min_ownership_delay = 6000000; /* in usec */ 24268 int max_ownership_delay = 30000000; /* in usec */ 24269 clock_t start_time; /* starting time of this algorithm */ 24270 clock_t end_time; /* time limit for giving up */ 24271 clock_t ownership_time; /* time limit for stable ownership */ 24272 clock_t current_time; 24273 clock_t previous_current_time; 24274 24275 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24276 return (ENXIO); 24277 } 24278 24279 /* 24280 * Attempt a device reservation. A priority reservation is requested. 24281 */ 24282 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24283 != SD_SUCCESS) { 24284 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24285 "sd_take_ownership: return(1)=%d\n", rval); 24286 return (rval); 24287 } 24288 24289 /* Update the softstate reserved status to indicate the reservation */ 24290 mutex_enter(SD_MUTEX(un)); 24291 un->un_resvd_status |= SD_RESERVE; 24292 un->un_resvd_status &= 24293 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24294 mutex_exit(SD_MUTEX(un)); 24295 24296 if (p != NULL) { 24297 if (p->min_ownership_delay != 0) { 24298 min_ownership_delay = p->min_ownership_delay * 1000; 24299 } 24300 if (p->max_ownership_delay != 0) { 24301 max_ownership_delay = p->max_ownership_delay * 1000; 24302 } 24303 } 24304 SD_INFO(SD_LOG_IOCTL_MHD, un, 24305 "sd_take_ownership: min, max delays: %d, %d\n", 24306 min_ownership_delay, max_ownership_delay); 24307 24308 start_time = ddi_get_lbolt(); 24309 current_time = start_time; 24310 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24311 end_time = start_time + drv_usectohz(max_ownership_delay); 24312 24313 while (current_time - end_time < 0) { 24314 delay(drv_usectohz(500000)); 24315 24316 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24317 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24318 mutex_enter(SD_MUTEX(un)); 24319 rval = (un->un_resvd_status & 24320 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24321 mutex_exit(SD_MUTEX(un)); 24322 break; 24323 } 24324 } 24325 previous_current_time = current_time; 24326 current_time = ddi_get_lbolt(); 24327 mutex_enter(SD_MUTEX(un)); 24328 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24329 ownership_time = ddi_get_lbolt() + 24330 drv_usectohz(min_ownership_delay); 24331 reservation_count = 0; 24332 } else { 24333 reservation_count++; 24334 } 24335 un->un_resvd_status |= SD_RESERVE; 24336 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24337 mutex_exit(SD_MUTEX(un)); 24338 24339 SD_INFO(SD_LOG_IOCTL_MHD, un, 24340 "sd_take_ownership: ticks for loop iteration=%ld, " 24341 "reservation=%s\n", (current_time - previous_current_time), 24342 reservation_count ? "ok" : "reclaimed"); 24343 24344 if (current_time - ownership_time >= 0 && 24345 reservation_count >= 4) { 24346 rval = 0; /* Achieved a stable ownership */ 24347 break; 24348 } 24349 if (current_time - end_time >= 0) { 24350 rval = EACCES; /* No ownership in max possible time */ 24351 break; 24352 } 24353 } 24354 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24355 "sd_take_ownership: return(2)=%d\n", rval); 24356 return (rval); 24357 } 24358 24359 24360 /* 24361 * Function: sd_reserve_release() 24362 * 24363 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24364 * PRIORITY RESERVE commands based on a user specified command type 24365 * 24366 * Arguments: dev - the device 'dev_t' 24367 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24368 * SD_RESERVE, SD_RELEASE 24369 * 24370 * Return Code: 0 or Error Code 24371 */ 24372 24373 static int 24374 sd_reserve_release(dev_t dev, int cmd) 24375 { 24376 struct uscsi_cmd *com = NULL; 24377 struct sd_lun *un = NULL; 24378 char cdb[CDB_GROUP0]; 24379 int rval; 24380 24381 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24382 (cmd == SD_PRIORITY_RESERVE)); 24383 24384 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24385 return (ENXIO); 24386 } 24387 24388 /* instantiate and initialize the command and cdb */ 24389 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24390 bzero(cdb, CDB_GROUP0); 24391 com->uscsi_flags = USCSI_SILENT; 24392 com->uscsi_timeout = un->un_reserve_release_time; 24393 com->uscsi_cdblen = CDB_GROUP0; 24394 com->uscsi_cdb = cdb; 24395 if (cmd == SD_RELEASE) { 24396 cdb[0] = SCMD_RELEASE; 24397 } else { 24398 cdb[0] = SCMD_RESERVE; 24399 } 24400 24401 /* Send the command. */ 24402 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24403 SD_PATH_STANDARD); 24404 24405 /* 24406 * "break" a reservation that is held by another host, by issuing a 24407 * reset if priority reserve is desired, and we could not get the 24408 * device. 24409 */ 24410 if ((cmd == SD_PRIORITY_RESERVE) && 24411 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24412 /* 24413 * First try to reset the LUN. If we cannot, then try a target 24414 * reset, followed by a bus reset if the target reset fails. 24415 */ 24416 int reset_retval = 0; 24417 if (un->un_f_lun_reset_enabled == TRUE) { 24418 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24419 } 24420 if (reset_retval == 0) { 24421 /* The LUN reset either failed or was not issued */ 24422 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24423 } 24424 if ((reset_retval == 0) && 24425 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24426 rval = EIO; 24427 kmem_free(com, sizeof (*com)); 24428 return (rval); 24429 } 24430 24431 bzero(com, sizeof (struct uscsi_cmd)); 24432 com->uscsi_flags = USCSI_SILENT; 24433 com->uscsi_cdb = cdb; 24434 com->uscsi_cdblen = CDB_GROUP0; 24435 com->uscsi_timeout = 5; 24436 24437 /* 24438 * Reissue the last reserve command, this time without request 24439 * sense. Assume that it is just a regular reserve command. 24440 */ 24441 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24442 SD_PATH_STANDARD); 24443 } 24444 24445 /* Return an error if still getting a reservation conflict. */ 24446 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24447 rval = EACCES; 24448 } 24449 24450 kmem_free(com, sizeof (*com)); 24451 return (rval); 24452 } 24453 24454 24455 #define SD_NDUMP_RETRIES 12 24456 /* 24457 * System Crash Dump routine 24458 */ 24459 24460 static int 24461 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24462 { 24463 int instance; 24464 int partition; 24465 int i; 24466 int err; 24467 struct sd_lun *un; 24468 struct scsi_pkt *wr_pktp; 24469 struct buf *wr_bp; 24470 struct buf wr_buf; 24471 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24472 daddr_t tgt_blkno; /* rmw - blkno for target */ 24473 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24474 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24475 size_t io_start_offset; 24476 int doing_rmw = FALSE; 24477 int rval; 24478 ssize_t dma_resid; 24479 daddr_t oblkno; 24480 diskaddr_t nblks = 0; 24481 diskaddr_t start_block; 24482 24483 instance = SDUNIT(dev); 24484 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24485 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24486 return (ENXIO); 24487 } 24488 24489 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24490 24491 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24492 24493 partition = SDPART(dev); 24494 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24495 24496 /* Validate blocks to dump at against partition size. */ 24497 24498 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24499 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24500 24501 if ((blkno + nblk) > nblks) { 24502 SD_TRACE(SD_LOG_DUMP, un, 24503 "sddump: dump range larger than partition: " 24504 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24505 blkno, nblk, nblks); 24506 return (EINVAL); 24507 } 24508 24509 mutex_enter(&un->un_pm_mutex); 24510 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24511 struct scsi_pkt *start_pktp; 24512 24513 mutex_exit(&un->un_pm_mutex); 24514 24515 /* 24516 * use pm framework to power on HBA 1st 24517 */ 24518 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24519 24520 /* 24521 * Dump no long uses sdpower to power on a device, it's 24522 * in-line here so it can be done in polled mode. 24523 */ 24524 24525 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24526 24527 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24528 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24529 24530 if (start_pktp == NULL) { 24531 /* We were not given a SCSI packet, fail. */ 24532 return (EIO); 24533 } 24534 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24535 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24536 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24537 start_pktp->pkt_flags = FLAG_NOINTR; 24538 24539 mutex_enter(SD_MUTEX(un)); 24540 SD_FILL_SCSI1_LUN(un, start_pktp); 24541 mutex_exit(SD_MUTEX(un)); 24542 /* 24543 * Scsi_poll returns 0 (success) if the command completes and 24544 * the status block is STATUS_GOOD. 24545 */ 24546 if (sd_scsi_poll(un, start_pktp) != 0) { 24547 scsi_destroy_pkt(start_pktp); 24548 return (EIO); 24549 } 24550 scsi_destroy_pkt(start_pktp); 24551 (void) sd_ddi_pm_resume(un); 24552 } else { 24553 mutex_exit(&un->un_pm_mutex); 24554 } 24555 24556 mutex_enter(SD_MUTEX(un)); 24557 un->un_throttle = 0; 24558 24559 /* 24560 * The first time through, reset the specific target device. 24561 * However, when cpr calls sddump we know that sd is in a 24562 * a good state so no bus reset is required. 24563 * Clear sense data via Request Sense cmd. 24564 * In sddump we don't care about allow_bus_device_reset anymore 24565 */ 24566 24567 if ((un->un_state != SD_STATE_SUSPENDED) && 24568 (un->un_state != SD_STATE_DUMPING)) { 24569 24570 New_state(un, SD_STATE_DUMPING); 24571 24572 if (un->un_f_is_fibre == FALSE) { 24573 mutex_exit(SD_MUTEX(un)); 24574 /* 24575 * Attempt a bus reset for parallel scsi. 24576 * 24577 * Note: A bus reset is required because on some host 24578 * systems (i.e. E420R) a bus device reset is 24579 * insufficient to reset the state of the target. 24580 * 24581 * Note: Don't issue the reset for fibre-channel, 24582 * because this tends to hang the bus (loop) for 24583 * too long while everyone is logging out and in 24584 * and the deadman timer for dumping will fire 24585 * before the dump is complete. 24586 */ 24587 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24588 mutex_enter(SD_MUTEX(un)); 24589 Restore_state(un); 24590 mutex_exit(SD_MUTEX(un)); 24591 return (EIO); 24592 } 24593 24594 /* Delay to give the device some recovery time. */ 24595 drv_usecwait(10000); 24596 24597 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24598 SD_INFO(SD_LOG_DUMP, un, 24599 "sddump: sd_send_polled_RQS failed\n"); 24600 } 24601 mutex_enter(SD_MUTEX(un)); 24602 } 24603 } 24604 24605 /* 24606 * Convert the partition-relative block number to a 24607 * disk physical block number. 24608 */ 24609 blkno += start_block; 24610 24611 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24612 24613 24614 /* 24615 * Check if the device has a non-512 block size. 24616 */ 24617 wr_bp = NULL; 24618 if (NOT_DEVBSIZE(un)) { 24619 tgt_byte_offset = blkno * un->un_sys_blocksize; 24620 tgt_byte_count = nblk * un->un_sys_blocksize; 24621 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24622 (tgt_byte_count % un->un_tgt_blocksize)) { 24623 doing_rmw = TRUE; 24624 /* 24625 * Calculate the block number and number of block 24626 * in terms of the media block size. 24627 */ 24628 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24629 tgt_nblk = 24630 ((tgt_byte_offset + tgt_byte_count + 24631 (un->un_tgt_blocksize - 1)) / 24632 un->un_tgt_blocksize) - tgt_blkno; 24633 24634 /* 24635 * Invoke the routine which is going to do read part 24636 * of read-modify-write. 24637 * Note that this routine returns a pointer to 24638 * a valid bp in wr_bp. 24639 */ 24640 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24641 &wr_bp); 24642 if (err) { 24643 mutex_exit(SD_MUTEX(un)); 24644 return (err); 24645 } 24646 /* 24647 * Offset is being calculated as - 24648 * (original block # * system block size) - 24649 * (new block # * target block size) 24650 */ 24651 io_start_offset = 24652 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24653 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24654 24655 ASSERT((io_start_offset >= 0) && 24656 (io_start_offset < un->un_tgt_blocksize)); 24657 /* 24658 * Do the modify portion of read modify write. 24659 */ 24660 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24661 (size_t)nblk * un->un_sys_blocksize); 24662 } else { 24663 doing_rmw = FALSE; 24664 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24665 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24666 } 24667 24668 /* Convert blkno and nblk to target blocks */ 24669 blkno = tgt_blkno; 24670 nblk = tgt_nblk; 24671 } else { 24672 wr_bp = &wr_buf; 24673 bzero(wr_bp, sizeof (struct buf)); 24674 wr_bp->b_flags = B_BUSY; 24675 wr_bp->b_un.b_addr = addr; 24676 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24677 wr_bp->b_resid = 0; 24678 } 24679 24680 mutex_exit(SD_MUTEX(un)); 24681 24682 /* 24683 * Obtain a SCSI packet for the write command. 24684 * It should be safe to call the allocator here without 24685 * worrying about being locked for DVMA mapping because 24686 * the address we're passed is already a DVMA mapping 24687 * 24688 * We are also not going to worry about semaphore ownership 24689 * in the dump buffer. Dumping is single threaded at present. 24690 */ 24691 24692 wr_pktp = NULL; 24693 24694 dma_resid = wr_bp->b_bcount; 24695 oblkno = blkno; 24696 24697 while (dma_resid != 0) { 24698 24699 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24700 wr_bp->b_flags &= ~B_ERROR; 24701 24702 if (un->un_partial_dma_supported == 1) { 24703 blkno = oblkno + 24704 ((wr_bp->b_bcount - dma_resid) / 24705 un->un_tgt_blocksize); 24706 nblk = dma_resid / un->un_tgt_blocksize; 24707 24708 if (wr_pktp) { 24709 /* 24710 * Partial DMA transfers after initial transfer 24711 */ 24712 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24713 blkno, nblk); 24714 } else { 24715 /* Initial transfer */ 24716 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24717 un->un_pkt_flags, NULL_FUNC, NULL, 24718 blkno, nblk); 24719 } 24720 } else { 24721 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24722 0, NULL_FUNC, NULL, blkno, nblk); 24723 } 24724 24725 if (rval == 0) { 24726 /* We were given a SCSI packet, continue. */ 24727 break; 24728 } 24729 24730 if (i == 0) { 24731 if (wr_bp->b_flags & B_ERROR) { 24732 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24733 "no resources for dumping; " 24734 "error code: 0x%x, retrying", 24735 geterror(wr_bp)); 24736 } else { 24737 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24738 "no resources for dumping; retrying"); 24739 } 24740 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24741 if (wr_bp->b_flags & B_ERROR) { 24742 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24743 "no resources for dumping; error code: " 24744 "0x%x, retrying\n", geterror(wr_bp)); 24745 } 24746 } else { 24747 if (wr_bp->b_flags & B_ERROR) { 24748 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24749 "no resources for dumping; " 24750 "error code: 0x%x, retries failed, " 24751 "giving up.\n", geterror(wr_bp)); 24752 } else { 24753 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24754 "no resources for dumping; " 24755 "retries failed, giving up.\n"); 24756 } 24757 mutex_enter(SD_MUTEX(un)); 24758 Restore_state(un); 24759 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24760 mutex_exit(SD_MUTEX(un)); 24761 scsi_free_consistent_buf(wr_bp); 24762 } else { 24763 mutex_exit(SD_MUTEX(un)); 24764 } 24765 return (EIO); 24766 } 24767 drv_usecwait(10000); 24768 } 24769 24770 if (un->un_partial_dma_supported == 1) { 24771 /* 24772 * save the resid from PARTIAL_DMA 24773 */ 24774 dma_resid = wr_pktp->pkt_resid; 24775 if (dma_resid != 0) 24776 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24777 wr_pktp->pkt_resid = 0; 24778 } else { 24779 dma_resid = 0; 24780 } 24781 24782 /* SunBug 1222170 */ 24783 wr_pktp->pkt_flags = FLAG_NOINTR; 24784 24785 err = EIO; 24786 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24787 24788 /* 24789 * Scsi_poll returns 0 (success) if the command completes and 24790 * the status block is STATUS_GOOD. We should only check 24791 * errors if this condition is not true. Even then we should 24792 * send our own request sense packet only if we have a check 24793 * condition and auto request sense has not been performed by 24794 * the hba. 24795 */ 24796 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 24797 24798 if ((sd_scsi_poll(un, wr_pktp) == 0) && 24799 (wr_pktp->pkt_resid == 0)) { 24800 err = SD_SUCCESS; 24801 break; 24802 } 24803 24804 /* 24805 * Check CMD_DEV_GONE 1st, give up if device is gone. 24806 */ 24807 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 24808 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24809 "Error while dumping state...Device is gone\n"); 24810 break; 24811 } 24812 24813 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 24814 SD_INFO(SD_LOG_DUMP, un, 24815 "sddump: write failed with CHECK, try # %d\n", i); 24816 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 24817 (void) sd_send_polled_RQS(un); 24818 } 24819 24820 continue; 24821 } 24822 24823 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 24824 int reset_retval = 0; 24825 24826 SD_INFO(SD_LOG_DUMP, un, 24827 "sddump: write failed with BUSY, try # %d\n", i); 24828 24829 if (un->un_f_lun_reset_enabled == TRUE) { 24830 reset_retval = scsi_reset(SD_ADDRESS(un), 24831 RESET_LUN); 24832 } 24833 if (reset_retval == 0) { 24834 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24835 } 24836 (void) sd_send_polled_RQS(un); 24837 24838 } else { 24839 SD_INFO(SD_LOG_DUMP, un, 24840 "sddump: write failed with 0x%x, try # %d\n", 24841 SD_GET_PKT_STATUS(wr_pktp), i); 24842 mutex_enter(SD_MUTEX(un)); 24843 sd_reset_target(un, wr_pktp); 24844 mutex_exit(SD_MUTEX(un)); 24845 } 24846 24847 /* 24848 * If we are not getting anywhere with lun/target resets, 24849 * let's reset the bus. 24850 */ 24851 if (i == SD_NDUMP_RETRIES/2) { 24852 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24853 (void) sd_send_polled_RQS(un); 24854 } 24855 } 24856 } 24857 24858 scsi_destroy_pkt(wr_pktp); 24859 mutex_enter(SD_MUTEX(un)); 24860 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 24861 mutex_exit(SD_MUTEX(un)); 24862 scsi_free_consistent_buf(wr_bp); 24863 } else { 24864 mutex_exit(SD_MUTEX(un)); 24865 } 24866 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 24867 return (err); 24868 } 24869 24870 /* 24871 * Function: sd_scsi_poll() 24872 * 24873 * Description: This is a wrapper for the scsi_poll call. 24874 * 24875 * Arguments: sd_lun - The unit structure 24876 * scsi_pkt - The scsi packet being sent to the device. 24877 * 24878 * Return Code: 0 - Command completed successfully with good status 24879 * -1 - Command failed. This could indicate a check condition 24880 * or other status value requiring recovery action. 24881 * 24882 * NOTE: This code is only called off sddump(). 24883 */ 24884 24885 static int 24886 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 24887 { 24888 int status; 24889 24890 ASSERT(un != NULL); 24891 ASSERT(!mutex_owned(SD_MUTEX(un))); 24892 ASSERT(pktp != NULL); 24893 24894 status = SD_SUCCESS; 24895 24896 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 24897 pktp->pkt_flags |= un->un_tagflags; 24898 pktp->pkt_flags &= ~FLAG_NODISCON; 24899 } 24900 24901 status = sd_ddi_scsi_poll(pktp); 24902 /* 24903 * Scsi_poll returns 0 (success) if the command completes and the 24904 * status block is STATUS_GOOD. We should only check errors if this 24905 * condition is not true. Even then we should send our own request 24906 * sense packet only if we have a check condition and auto 24907 * request sense has not been performed by the hba. 24908 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 24909 */ 24910 if ((status != SD_SUCCESS) && 24911 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 24912 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 24913 (pktp->pkt_reason != CMD_DEV_GONE)) 24914 (void) sd_send_polled_RQS(un); 24915 24916 return (status); 24917 } 24918 24919 /* 24920 * Function: sd_send_polled_RQS() 24921 * 24922 * Description: This sends the request sense command to a device. 24923 * 24924 * Arguments: sd_lun - The unit structure 24925 * 24926 * Return Code: 0 - Command completed successfully with good status 24927 * -1 - Command failed. 24928 * 24929 */ 24930 24931 static int 24932 sd_send_polled_RQS(struct sd_lun *un) 24933 { 24934 int ret_val; 24935 struct scsi_pkt *rqs_pktp; 24936 struct buf *rqs_bp; 24937 24938 ASSERT(un != NULL); 24939 ASSERT(!mutex_owned(SD_MUTEX(un))); 24940 24941 ret_val = SD_SUCCESS; 24942 24943 rqs_pktp = un->un_rqs_pktp; 24944 rqs_bp = un->un_rqs_bp; 24945 24946 mutex_enter(SD_MUTEX(un)); 24947 24948 if (un->un_sense_isbusy) { 24949 ret_val = SD_FAILURE; 24950 mutex_exit(SD_MUTEX(un)); 24951 return (ret_val); 24952 } 24953 24954 /* 24955 * If the request sense buffer (and packet) is not in use, 24956 * let's set the un_sense_isbusy and send our packet 24957 */ 24958 un->un_sense_isbusy = 1; 24959 rqs_pktp->pkt_resid = 0; 24960 rqs_pktp->pkt_reason = 0; 24961 rqs_pktp->pkt_flags |= FLAG_NOINTR; 24962 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 24963 24964 mutex_exit(SD_MUTEX(un)); 24965 24966 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 24967 " 0x%p\n", rqs_bp->b_un.b_addr); 24968 24969 /* 24970 * Can't send this to sd_scsi_poll, we wrap ourselves around the 24971 * axle - it has a call into us! 24972 */ 24973 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 24974 SD_INFO(SD_LOG_COMMON, un, 24975 "sd_send_polled_RQS: RQS failed\n"); 24976 } 24977 24978 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 24979 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 24980 24981 mutex_enter(SD_MUTEX(un)); 24982 un->un_sense_isbusy = 0; 24983 mutex_exit(SD_MUTEX(un)); 24984 24985 return (ret_val); 24986 } 24987 24988 /* 24989 * Defines needed for localized version of the scsi_poll routine. 24990 */ 24991 #define CSEC 10000 /* usecs */ 24992 #define SEC_TO_CSEC (1000000/CSEC) 24993 24994 /* 24995 * Function: sd_ddi_scsi_poll() 24996 * 24997 * Description: Localized version of the scsi_poll routine. The purpose is to 24998 * send a scsi_pkt to a device as a polled command. This version 24999 * is to ensure more robust handling of transport errors. 25000 * Specifically this routine cures not ready, coming ready 25001 * transition for power up and reset of sonoma's. This can take 25002 * up to 45 seconds for power-on and 20 seconds for reset of a 25003 * sonoma lun. 25004 * 25005 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25006 * 25007 * Return Code: 0 - Command completed successfully with good status 25008 * -1 - Command failed. 25009 * 25010 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25011 * be fixed (removing this code), we need to determine how to handle the 25012 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25013 * 25014 * NOTE: This code is only called off sddump(). 25015 */ 25016 static int 25017 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25018 { 25019 int rval = -1; 25020 int savef; 25021 long savet; 25022 void (*savec)(); 25023 int timeout; 25024 int busy_count; 25025 int poll_delay; 25026 int rc; 25027 uint8_t *sensep; 25028 struct scsi_arq_status *arqstat; 25029 extern int do_polled_io; 25030 25031 ASSERT(pkt->pkt_scbp); 25032 25033 /* 25034 * save old flags.. 25035 */ 25036 savef = pkt->pkt_flags; 25037 savec = pkt->pkt_comp; 25038 savet = pkt->pkt_time; 25039 25040 pkt->pkt_flags |= FLAG_NOINTR; 25041 25042 /* 25043 * XXX there is nothing in the SCSA spec that states that we should not 25044 * do a callback for polled cmds; however, removing this will break sd 25045 * and probably other target drivers 25046 */ 25047 pkt->pkt_comp = NULL; 25048 25049 /* 25050 * we don't like a polled command without timeout. 25051 * 60 seconds seems long enough. 25052 */ 25053 if (pkt->pkt_time == 0) 25054 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25055 25056 /* 25057 * Send polled cmd. 25058 * 25059 * We do some error recovery for various errors. Tran_busy, 25060 * queue full, and non-dispatched commands are retried every 10 msec. 25061 * as they are typically transient failures. Busy status and Not 25062 * Ready are retried every second as this status takes a while to 25063 * change. 25064 */ 25065 timeout = pkt->pkt_time * SEC_TO_CSEC; 25066 25067 for (busy_count = 0; busy_count < timeout; busy_count++) { 25068 /* 25069 * Initialize pkt status variables. 25070 */ 25071 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25072 25073 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25074 if (rc != TRAN_BUSY) { 25075 /* Transport failed - give up. */ 25076 break; 25077 } else { 25078 /* Transport busy - try again. */ 25079 poll_delay = 1 * CSEC; /* 10 msec. */ 25080 } 25081 } else { 25082 /* 25083 * Transport accepted - check pkt status. 25084 */ 25085 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25086 if ((pkt->pkt_reason == CMD_CMPLT) && 25087 (rc == STATUS_CHECK) && 25088 (pkt->pkt_state & STATE_ARQ_DONE)) { 25089 arqstat = 25090 (struct scsi_arq_status *)(pkt->pkt_scbp); 25091 sensep = (uint8_t *)&arqstat->sts_sensedata; 25092 } else { 25093 sensep = NULL; 25094 } 25095 25096 if ((pkt->pkt_reason == CMD_CMPLT) && 25097 (rc == STATUS_GOOD)) { 25098 /* No error - we're done */ 25099 rval = 0; 25100 break; 25101 25102 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25103 /* Lost connection - give up */ 25104 break; 25105 25106 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25107 (pkt->pkt_state == 0)) { 25108 /* Pkt not dispatched - try again. */ 25109 poll_delay = 1 * CSEC; /* 10 msec. */ 25110 25111 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25112 (rc == STATUS_QFULL)) { 25113 /* Queue full - try again. */ 25114 poll_delay = 1 * CSEC; /* 10 msec. */ 25115 25116 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25117 (rc == STATUS_BUSY)) { 25118 /* Busy - try again. */ 25119 poll_delay = 100 * CSEC; /* 1 sec. */ 25120 busy_count += (SEC_TO_CSEC - 1); 25121 25122 } else if ((sensep != NULL) && 25123 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25124 /* 25125 * Unit Attention - try again. 25126 * Pretend it took 1 sec. 25127 * NOTE: 'continue' avoids poll_delay 25128 */ 25129 busy_count += (SEC_TO_CSEC - 1); 25130 continue; 25131 25132 } else if ((sensep != NULL) && 25133 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25134 (scsi_sense_asc(sensep) == 0x04) && 25135 (scsi_sense_ascq(sensep) == 0x01)) { 25136 /* 25137 * Not ready -> ready - try again. 25138 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25139 * ...same as STATUS_BUSY 25140 */ 25141 poll_delay = 100 * CSEC; /* 1 sec. */ 25142 busy_count += (SEC_TO_CSEC - 1); 25143 25144 } else { 25145 /* BAD status - give up. */ 25146 break; 25147 } 25148 } 25149 25150 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25151 !do_polled_io) { 25152 delay(drv_usectohz(poll_delay)); 25153 } else { 25154 /* we busy wait during cpr_dump or interrupt threads */ 25155 drv_usecwait(poll_delay); 25156 } 25157 } 25158 25159 pkt->pkt_flags = savef; 25160 pkt->pkt_comp = savec; 25161 pkt->pkt_time = savet; 25162 25163 /* return on error */ 25164 if (rval) 25165 return (rval); 25166 25167 /* 25168 * This is not a performance critical code path. 25169 * 25170 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25171 * issues associated with looking at DMA memory prior to 25172 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25173 */ 25174 scsi_sync_pkt(pkt); 25175 return (0); 25176 } 25177 25178 25179 25180 /* 25181 * Function: sd_persistent_reservation_in_read_keys 25182 * 25183 * Description: This routine is the driver entry point for handling CD-ROM 25184 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25185 * by sending the SCSI-3 PRIN commands to the device. 25186 * Processes the read keys command response by copying the 25187 * reservation key information into the user provided buffer. 25188 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25189 * 25190 * Arguments: un - Pointer to soft state struct for the target. 25191 * usrp - user provided pointer to multihost Persistent In Read 25192 * Keys structure (mhioc_inkeys_t) 25193 * flag - this argument is a pass through to ddi_copyxxx() 25194 * directly from the mode argument of ioctl(). 25195 * 25196 * Return Code: 0 - Success 25197 * EACCES 25198 * ENOTSUP 25199 * errno return code from sd_send_scsi_cmd() 25200 * 25201 * Context: Can sleep. Does not return until command is completed. 25202 */ 25203 25204 static int 25205 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25206 mhioc_inkeys_t *usrp, int flag) 25207 { 25208 #ifdef _MULTI_DATAMODEL 25209 struct mhioc_key_list32 li32; 25210 #endif 25211 sd_prin_readkeys_t *in; 25212 mhioc_inkeys_t *ptr; 25213 mhioc_key_list_t li; 25214 uchar_t *data_bufp; 25215 int data_len; 25216 int rval = 0; 25217 size_t copysz; 25218 sd_ssc_t *ssc; 25219 25220 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25221 return (EINVAL); 25222 } 25223 bzero(&li, sizeof (mhioc_key_list_t)); 25224 25225 ssc = sd_ssc_init(un); 25226 25227 /* 25228 * Get the listsize from user 25229 */ 25230 #ifdef _MULTI_DATAMODEL 25231 25232 switch (ddi_model_convert_from(flag & FMODELS)) { 25233 case DDI_MODEL_ILP32: 25234 copysz = sizeof (struct mhioc_key_list32); 25235 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25236 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25237 "sd_persistent_reservation_in_read_keys: " 25238 "failed ddi_copyin: mhioc_key_list32_t\n"); 25239 rval = EFAULT; 25240 goto done; 25241 } 25242 li.listsize = li32.listsize; 25243 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25244 break; 25245 25246 case DDI_MODEL_NONE: 25247 copysz = sizeof (mhioc_key_list_t); 25248 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25249 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25250 "sd_persistent_reservation_in_read_keys: " 25251 "failed ddi_copyin: mhioc_key_list_t\n"); 25252 rval = EFAULT; 25253 goto done; 25254 } 25255 break; 25256 } 25257 25258 #else /* ! _MULTI_DATAMODEL */ 25259 copysz = sizeof (mhioc_key_list_t); 25260 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25261 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25262 "sd_persistent_reservation_in_read_keys: " 25263 "failed ddi_copyin: mhioc_key_list_t\n"); 25264 rval = EFAULT; 25265 goto done; 25266 } 25267 #endif 25268 25269 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25270 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25271 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25272 25273 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25274 data_len, data_bufp); 25275 if (rval != 0) { 25276 if (rval == EIO) 25277 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25278 else 25279 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25280 goto done; 25281 } 25282 in = (sd_prin_readkeys_t *)data_bufp; 25283 ptr->generation = BE_32(in->generation); 25284 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25285 25286 /* 25287 * Return the min(listsize, listlen) keys 25288 */ 25289 #ifdef _MULTI_DATAMODEL 25290 25291 switch (ddi_model_convert_from(flag & FMODELS)) { 25292 case DDI_MODEL_ILP32: 25293 li32.listlen = li.listlen; 25294 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25295 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25296 "sd_persistent_reservation_in_read_keys: " 25297 "failed ddi_copyout: mhioc_key_list32_t\n"); 25298 rval = EFAULT; 25299 goto done; 25300 } 25301 break; 25302 25303 case DDI_MODEL_NONE: 25304 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25305 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25306 "sd_persistent_reservation_in_read_keys: " 25307 "failed ddi_copyout: mhioc_key_list_t\n"); 25308 rval = EFAULT; 25309 goto done; 25310 } 25311 break; 25312 } 25313 25314 #else /* ! _MULTI_DATAMODEL */ 25315 25316 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25317 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25318 "sd_persistent_reservation_in_read_keys: " 25319 "failed ddi_copyout: mhioc_key_list_t\n"); 25320 rval = EFAULT; 25321 goto done; 25322 } 25323 25324 #endif /* _MULTI_DATAMODEL */ 25325 25326 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25327 li.listsize * MHIOC_RESV_KEY_SIZE); 25328 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25329 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25330 "sd_persistent_reservation_in_read_keys: " 25331 "failed ddi_copyout: keylist\n"); 25332 rval = EFAULT; 25333 } 25334 done: 25335 sd_ssc_fini(ssc); 25336 kmem_free(data_bufp, data_len); 25337 return (rval); 25338 } 25339 25340 25341 /* 25342 * Function: sd_persistent_reservation_in_read_resv 25343 * 25344 * Description: This routine is the driver entry point for handling CD-ROM 25345 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25346 * by sending the SCSI-3 PRIN commands to the device. 25347 * Process the read persistent reservations command response by 25348 * copying the reservation information into the user provided 25349 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25350 * 25351 * Arguments: un - Pointer to soft state struct for the target. 25352 * usrp - user provided pointer to multihost Persistent In Read 25353 * Keys structure (mhioc_inkeys_t) 25354 * flag - this argument is a pass through to ddi_copyxxx() 25355 * directly from the mode argument of ioctl(). 25356 * 25357 * Return Code: 0 - Success 25358 * EACCES 25359 * ENOTSUP 25360 * errno return code from sd_send_scsi_cmd() 25361 * 25362 * Context: Can sleep. Does not return until command is completed. 25363 */ 25364 25365 static int 25366 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25367 mhioc_inresvs_t *usrp, int flag) 25368 { 25369 #ifdef _MULTI_DATAMODEL 25370 struct mhioc_resv_desc_list32 resvlist32; 25371 #endif 25372 sd_prin_readresv_t *in; 25373 mhioc_inresvs_t *ptr; 25374 sd_readresv_desc_t *readresv_ptr; 25375 mhioc_resv_desc_list_t resvlist; 25376 mhioc_resv_desc_t resvdesc; 25377 uchar_t *data_bufp = NULL; 25378 int data_len; 25379 int rval = 0; 25380 int i; 25381 size_t copysz; 25382 mhioc_resv_desc_t *bufp; 25383 sd_ssc_t *ssc; 25384 25385 if ((ptr = usrp) == NULL) { 25386 return (EINVAL); 25387 } 25388 25389 ssc = sd_ssc_init(un); 25390 25391 /* 25392 * Get the listsize from user 25393 */ 25394 #ifdef _MULTI_DATAMODEL 25395 switch (ddi_model_convert_from(flag & FMODELS)) { 25396 case DDI_MODEL_ILP32: 25397 copysz = sizeof (struct mhioc_resv_desc_list32); 25398 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25399 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25400 "sd_persistent_reservation_in_read_resv: " 25401 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25402 rval = EFAULT; 25403 goto done; 25404 } 25405 resvlist.listsize = resvlist32.listsize; 25406 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25407 break; 25408 25409 case DDI_MODEL_NONE: 25410 copysz = sizeof (mhioc_resv_desc_list_t); 25411 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25412 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25413 "sd_persistent_reservation_in_read_resv: " 25414 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25415 rval = EFAULT; 25416 goto done; 25417 } 25418 break; 25419 } 25420 #else /* ! _MULTI_DATAMODEL */ 25421 copysz = sizeof (mhioc_resv_desc_list_t); 25422 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25423 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25424 "sd_persistent_reservation_in_read_resv: " 25425 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25426 rval = EFAULT; 25427 goto done; 25428 } 25429 #endif /* ! _MULTI_DATAMODEL */ 25430 25431 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25432 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25433 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25434 25435 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25436 data_len, data_bufp); 25437 if (rval != 0) { 25438 if (rval == EIO) 25439 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25440 else 25441 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25442 goto done; 25443 } 25444 in = (sd_prin_readresv_t *)data_bufp; 25445 ptr->generation = BE_32(in->generation); 25446 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25447 25448 /* 25449 * Return the min(listsize, listlen( keys 25450 */ 25451 #ifdef _MULTI_DATAMODEL 25452 25453 switch (ddi_model_convert_from(flag & FMODELS)) { 25454 case DDI_MODEL_ILP32: 25455 resvlist32.listlen = resvlist.listlen; 25456 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25457 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25458 "sd_persistent_reservation_in_read_resv: " 25459 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25460 rval = EFAULT; 25461 goto done; 25462 } 25463 break; 25464 25465 case DDI_MODEL_NONE: 25466 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25467 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25468 "sd_persistent_reservation_in_read_resv: " 25469 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25470 rval = EFAULT; 25471 goto done; 25472 } 25473 break; 25474 } 25475 25476 #else /* ! _MULTI_DATAMODEL */ 25477 25478 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25479 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25480 "sd_persistent_reservation_in_read_resv: " 25481 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25482 rval = EFAULT; 25483 goto done; 25484 } 25485 25486 #endif /* ! _MULTI_DATAMODEL */ 25487 25488 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25489 bufp = resvlist.list; 25490 copysz = sizeof (mhioc_resv_desc_t); 25491 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25492 i++, readresv_ptr++, bufp++) { 25493 25494 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25495 MHIOC_RESV_KEY_SIZE); 25496 resvdesc.type = readresv_ptr->type; 25497 resvdesc.scope = readresv_ptr->scope; 25498 resvdesc.scope_specific_addr = 25499 BE_32(readresv_ptr->scope_specific_addr); 25500 25501 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25502 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25503 "sd_persistent_reservation_in_read_resv: " 25504 "failed ddi_copyout: resvlist\n"); 25505 rval = EFAULT; 25506 goto done; 25507 } 25508 } 25509 done: 25510 sd_ssc_fini(ssc); 25511 /* only if data_bufp is allocated, we need to free it */ 25512 if (data_bufp) { 25513 kmem_free(data_bufp, data_len); 25514 } 25515 return (rval); 25516 } 25517 25518 25519 /* 25520 * Function: sr_change_blkmode() 25521 * 25522 * Description: This routine is the driver entry point for handling CD-ROM 25523 * block mode ioctl requests. Support for returning and changing 25524 * the current block size in use by the device is implemented. The 25525 * LBA size is changed via a MODE SELECT Block Descriptor. 25526 * 25527 * This routine issues a mode sense with an allocation length of 25528 * 12 bytes for the mode page header and a single block descriptor. 25529 * 25530 * Arguments: dev - the device 'dev_t' 25531 * cmd - the request type; one of CDROMGBLKMODE (get) or 25532 * CDROMSBLKMODE (set) 25533 * data - current block size or requested block size 25534 * flag - this argument is a pass through to ddi_copyxxx() directly 25535 * from the mode argument of ioctl(). 25536 * 25537 * Return Code: the code returned by sd_send_scsi_cmd() 25538 * EINVAL if invalid arguments are provided 25539 * EFAULT if ddi_copyxxx() fails 25540 * ENXIO if fail ddi_get_soft_state 25541 * EIO if invalid mode sense block descriptor length 25542 * 25543 */ 25544 25545 static int 25546 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25547 { 25548 struct sd_lun *un = NULL; 25549 struct mode_header *sense_mhp, *select_mhp; 25550 struct block_descriptor *sense_desc, *select_desc; 25551 int current_bsize; 25552 int rval = EINVAL; 25553 uchar_t *sense = NULL; 25554 uchar_t *select = NULL; 25555 sd_ssc_t *ssc; 25556 25557 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25558 25559 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25560 return (ENXIO); 25561 } 25562 25563 /* 25564 * The block length is changed via the Mode Select block descriptor, the 25565 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25566 * required as part of this routine. Therefore the mode sense allocation 25567 * length is specified to be the length of a mode page header and a 25568 * block descriptor. 25569 */ 25570 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25571 25572 ssc = sd_ssc_init(un); 25573 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25574 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25575 sd_ssc_fini(ssc); 25576 if (rval != 0) { 25577 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25578 "sr_change_blkmode: Mode Sense Failed\n"); 25579 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25580 return (rval); 25581 } 25582 25583 /* Check the block descriptor len to handle only 1 block descriptor */ 25584 sense_mhp = (struct mode_header *)sense; 25585 if ((sense_mhp->bdesc_length == 0) || 25586 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25587 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25588 "sr_change_blkmode: Mode Sense returned invalid block" 25589 " descriptor length\n"); 25590 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25591 return (EIO); 25592 } 25593 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25594 current_bsize = ((sense_desc->blksize_hi << 16) | 25595 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25596 25597 /* Process command */ 25598 switch (cmd) { 25599 case CDROMGBLKMODE: 25600 /* Return the block size obtained during the mode sense */ 25601 if (ddi_copyout(¤t_bsize, (void *)data, 25602 sizeof (int), flag) != 0) 25603 rval = EFAULT; 25604 break; 25605 case CDROMSBLKMODE: 25606 /* Validate the requested block size */ 25607 switch (data) { 25608 case CDROM_BLK_512: 25609 case CDROM_BLK_1024: 25610 case CDROM_BLK_2048: 25611 case CDROM_BLK_2056: 25612 case CDROM_BLK_2336: 25613 case CDROM_BLK_2340: 25614 case CDROM_BLK_2352: 25615 case CDROM_BLK_2368: 25616 case CDROM_BLK_2448: 25617 case CDROM_BLK_2646: 25618 case CDROM_BLK_2647: 25619 break; 25620 default: 25621 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25622 "sr_change_blkmode: " 25623 "Block Size '%ld' Not Supported\n", data); 25624 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25625 return (EINVAL); 25626 } 25627 25628 /* 25629 * The current block size matches the requested block size so 25630 * there is no need to send the mode select to change the size 25631 */ 25632 if (current_bsize == data) { 25633 break; 25634 } 25635 25636 /* Build the select data for the requested block size */ 25637 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25638 select_mhp = (struct mode_header *)select; 25639 select_desc = 25640 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25641 /* 25642 * The LBA size is changed via the block descriptor, so the 25643 * descriptor is built according to the user data 25644 */ 25645 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25646 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25647 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25648 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25649 25650 /* Send the mode select for the requested block size */ 25651 ssc = sd_ssc_init(un); 25652 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25653 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25654 SD_PATH_STANDARD); 25655 sd_ssc_fini(ssc); 25656 if (rval != 0) { 25657 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25658 "sr_change_blkmode: Mode Select Failed\n"); 25659 /* 25660 * The mode select failed for the requested block size, 25661 * so reset the data for the original block size and 25662 * send it to the target. The error is indicated by the 25663 * return value for the failed mode select. 25664 */ 25665 select_desc->blksize_hi = sense_desc->blksize_hi; 25666 select_desc->blksize_mid = sense_desc->blksize_mid; 25667 select_desc->blksize_lo = sense_desc->blksize_lo; 25668 ssc = sd_ssc_init(un); 25669 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25670 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25671 SD_PATH_STANDARD); 25672 sd_ssc_fini(ssc); 25673 } else { 25674 ASSERT(!mutex_owned(SD_MUTEX(un))); 25675 mutex_enter(SD_MUTEX(un)); 25676 sd_update_block_info(un, (uint32_t)data, 0); 25677 mutex_exit(SD_MUTEX(un)); 25678 } 25679 break; 25680 default: 25681 /* should not reach here, but check anyway */ 25682 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25683 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25684 rval = EINVAL; 25685 break; 25686 } 25687 25688 if (select) { 25689 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25690 } 25691 if (sense) { 25692 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25693 } 25694 return (rval); 25695 } 25696 25697 25698 /* 25699 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25700 * implement driver support for getting and setting the CD speed. The command 25701 * set used will be based on the device type. If the device has not been 25702 * identified as MMC the Toshiba vendor specific mode page will be used. If 25703 * the device is MMC but does not support the Real Time Streaming feature 25704 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25705 * be used to read the speed. 25706 */ 25707 25708 /* 25709 * Function: sr_change_speed() 25710 * 25711 * Description: This routine is the driver entry point for handling CD-ROM 25712 * drive speed ioctl requests for devices supporting the Toshiba 25713 * vendor specific drive speed mode page. Support for returning 25714 * and changing the current drive speed in use by the device is 25715 * implemented. 25716 * 25717 * Arguments: dev - the device 'dev_t' 25718 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25719 * CDROMSDRVSPEED (set) 25720 * data - current drive speed or requested drive speed 25721 * flag - this argument is a pass through to ddi_copyxxx() directly 25722 * from the mode argument of ioctl(). 25723 * 25724 * Return Code: the code returned by sd_send_scsi_cmd() 25725 * EINVAL if invalid arguments are provided 25726 * EFAULT if ddi_copyxxx() fails 25727 * ENXIO if fail ddi_get_soft_state 25728 * EIO if invalid mode sense block descriptor length 25729 */ 25730 25731 static int 25732 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25733 { 25734 struct sd_lun *un = NULL; 25735 struct mode_header *sense_mhp, *select_mhp; 25736 struct mode_speed *sense_page, *select_page; 25737 int current_speed; 25738 int rval = EINVAL; 25739 int bd_len; 25740 uchar_t *sense = NULL; 25741 uchar_t *select = NULL; 25742 sd_ssc_t *ssc; 25743 25744 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25745 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25746 return (ENXIO); 25747 } 25748 25749 /* 25750 * Note: The drive speed is being modified here according to a Toshiba 25751 * vendor specific mode page (0x31). 25752 */ 25753 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25754 25755 ssc = sd_ssc_init(un); 25756 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25757 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25758 SD_PATH_STANDARD); 25759 sd_ssc_fini(ssc); 25760 if (rval != 0) { 25761 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25762 "sr_change_speed: Mode Sense Failed\n"); 25763 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25764 return (rval); 25765 } 25766 sense_mhp = (struct mode_header *)sense; 25767 25768 /* Check the block descriptor len to handle only 1 block descriptor */ 25769 bd_len = sense_mhp->bdesc_length; 25770 if (bd_len > MODE_BLK_DESC_LENGTH) { 25771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25772 "sr_change_speed: Mode Sense returned invalid block " 25773 "descriptor length\n"); 25774 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25775 return (EIO); 25776 } 25777 25778 sense_page = (struct mode_speed *) 25779 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25780 current_speed = sense_page->speed; 25781 25782 /* Process command */ 25783 switch (cmd) { 25784 case CDROMGDRVSPEED: 25785 /* Return the drive speed obtained during the mode sense */ 25786 if (current_speed == 0x2) { 25787 current_speed = CDROM_TWELVE_SPEED; 25788 } 25789 if (ddi_copyout(¤t_speed, (void *)data, 25790 sizeof (int), flag) != 0) { 25791 rval = EFAULT; 25792 } 25793 break; 25794 case CDROMSDRVSPEED: 25795 /* Validate the requested drive speed */ 25796 switch ((uchar_t)data) { 25797 case CDROM_TWELVE_SPEED: 25798 data = 0x2; 25799 /*FALLTHROUGH*/ 25800 case CDROM_NORMAL_SPEED: 25801 case CDROM_DOUBLE_SPEED: 25802 case CDROM_QUAD_SPEED: 25803 case CDROM_MAXIMUM_SPEED: 25804 break; 25805 default: 25806 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25807 "sr_change_speed: " 25808 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 25809 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25810 return (EINVAL); 25811 } 25812 25813 /* 25814 * The current drive speed matches the requested drive speed so 25815 * there is no need to send the mode select to change the speed 25816 */ 25817 if (current_speed == data) { 25818 break; 25819 } 25820 25821 /* Build the select data for the requested drive speed */ 25822 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25823 select_mhp = (struct mode_header *)select; 25824 select_mhp->bdesc_length = 0; 25825 select_page = 25826 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25827 select_page = 25828 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25829 select_page->mode_page.code = CDROM_MODE_SPEED; 25830 select_page->mode_page.length = 2; 25831 select_page->speed = (uchar_t)data; 25832 25833 /* Send the mode select for the requested block size */ 25834 ssc = sd_ssc_init(un); 25835 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25836 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25837 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25838 sd_ssc_fini(ssc); 25839 if (rval != 0) { 25840 /* 25841 * The mode select failed for the requested drive speed, 25842 * so reset the data for the original drive speed and 25843 * send it to the target. The error is indicated by the 25844 * return value for the failed mode select. 25845 */ 25846 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25847 "sr_drive_speed: Mode Select Failed\n"); 25848 select_page->speed = sense_page->speed; 25849 ssc = sd_ssc_init(un); 25850 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25851 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25852 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25853 sd_ssc_fini(ssc); 25854 } 25855 break; 25856 default: 25857 /* should not reach here, but check anyway */ 25858 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25859 "sr_change_speed: Command '%x' Not Supported\n", cmd); 25860 rval = EINVAL; 25861 break; 25862 } 25863 25864 if (select) { 25865 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 25866 } 25867 if (sense) { 25868 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25869 } 25870 25871 return (rval); 25872 } 25873 25874 25875 /* 25876 * Function: sr_atapi_change_speed() 25877 * 25878 * Description: This routine is the driver entry point for handling CD-ROM 25879 * drive speed ioctl requests for MMC devices that do not support 25880 * the Real Time Streaming feature (0x107). 25881 * 25882 * Note: This routine will use the SET SPEED command which may not 25883 * be supported by all devices. 25884 * 25885 * Arguments: dev- the device 'dev_t' 25886 * cmd- the request type; one of CDROMGDRVSPEED (get) or 25887 * CDROMSDRVSPEED (set) 25888 * data- current drive speed or requested drive speed 25889 * flag- this argument is a pass through to ddi_copyxxx() directly 25890 * from the mode argument of ioctl(). 25891 * 25892 * Return Code: the code returned by sd_send_scsi_cmd() 25893 * EINVAL if invalid arguments are provided 25894 * EFAULT if ddi_copyxxx() fails 25895 * ENXIO if fail ddi_get_soft_state 25896 * EIO if invalid mode sense block descriptor length 25897 */ 25898 25899 static int 25900 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25901 { 25902 struct sd_lun *un; 25903 struct uscsi_cmd *com = NULL; 25904 struct mode_header_grp2 *sense_mhp; 25905 uchar_t *sense_page; 25906 uchar_t *sense = NULL; 25907 char cdb[CDB_GROUP5]; 25908 int bd_len; 25909 int current_speed = 0; 25910 int max_speed = 0; 25911 int rval; 25912 sd_ssc_t *ssc; 25913 25914 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25915 25916 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25917 return (ENXIO); 25918 } 25919 25920 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 25921 25922 ssc = sd_ssc_init(un); 25923 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 25924 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 25925 SD_PATH_STANDARD); 25926 sd_ssc_fini(ssc); 25927 if (rval != 0) { 25928 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25929 "sr_atapi_change_speed: Mode Sense Failed\n"); 25930 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25931 return (rval); 25932 } 25933 25934 /* Check the block descriptor len to handle only 1 block descriptor */ 25935 sense_mhp = (struct mode_header_grp2 *)sense; 25936 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 25937 if (bd_len > MODE_BLK_DESC_LENGTH) { 25938 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25939 "sr_atapi_change_speed: Mode Sense returned invalid " 25940 "block descriptor length\n"); 25941 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25942 return (EIO); 25943 } 25944 25945 /* Calculate the current and maximum drive speeds */ 25946 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25947 current_speed = (sense_page[14] << 8) | sense_page[15]; 25948 max_speed = (sense_page[8] << 8) | sense_page[9]; 25949 25950 /* Process the command */ 25951 switch (cmd) { 25952 case CDROMGDRVSPEED: 25953 current_speed /= SD_SPEED_1X; 25954 if (ddi_copyout(¤t_speed, (void *)data, 25955 sizeof (int), flag) != 0) 25956 rval = EFAULT; 25957 break; 25958 case CDROMSDRVSPEED: 25959 /* Convert the speed code to KB/sec */ 25960 switch ((uchar_t)data) { 25961 case CDROM_NORMAL_SPEED: 25962 current_speed = SD_SPEED_1X; 25963 break; 25964 case CDROM_DOUBLE_SPEED: 25965 current_speed = 2 * SD_SPEED_1X; 25966 break; 25967 case CDROM_QUAD_SPEED: 25968 current_speed = 4 * SD_SPEED_1X; 25969 break; 25970 case CDROM_TWELVE_SPEED: 25971 current_speed = 12 * SD_SPEED_1X; 25972 break; 25973 case CDROM_MAXIMUM_SPEED: 25974 current_speed = 0xffff; 25975 break; 25976 default: 25977 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25978 "sr_atapi_change_speed: invalid drive speed %d\n", 25979 (uchar_t)data); 25980 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25981 return (EINVAL); 25982 } 25983 25984 /* Check the request against the drive's max speed. */ 25985 if (current_speed != 0xffff) { 25986 if (current_speed > max_speed) { 25987 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25988 return (EINVAL); 25989 } 25990 } 25991 25992 /* 25993 * Build and send the SET SPEED command 25994 * 25995 * Note: The SET SPEED (0xBB) command used in this routine is 25996 * obsolete per the SCSI MMC spec but still supported in the 25997 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 25998 * therefore the command is still implemented in this routine. 25999 */ 26000 bzero(cdb, sizeof (cdb)); 26001 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26002 cdb[2] = (uchar_t)(current_speed >> 8); 26003 cdb[3] = (uchar_t)current_speed; 26004 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26005 com->uscsi_cdb = (caddr_t)cdb; 26006 com->uscsi_cdblen = CDB_GROUP5; 26007 com->uscsi_bufaddr = NULL; 26008 com->uscsi_buflen = 0; 26009 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26010 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26011 break; 26012 default: 26013 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26014 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26015 rval = EINVAL; 26016 } 26017 26018 if (sense) { 26019 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26020 } 26021 if (com) { 26022 kmem_free(com, sizeof (*com)); 26023 } 26024 return (rval); 26025 } 26026 26027 26028 /* 26029 * Function: sr_pause_resume() 26030 * 26031 * Description: This routine is the driver entry point for handling CD-ROM 26032 * pause/resume ioctl requests. This only affects the audio play 26033 * operation. 26034 * 26035 * Arguments: dev - the device 'dev_t' 26036 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26037 * for setting the resume bit of the cdb. 26038 * 26039 * Return Code: the code returned by sd_send_scsi_cmd() 26040 * EINVAL if invalid mode specified 26041 * 26042 */ 26043 26044 static int 26045 sr_pause_resume(dev_t dev, int cmd) 26046 { 26047 struct sd_lun *un; 26048 struct uscsi_cmd *com; 26049 char cdb[CDB_GROUP1]; 26050 int rval; 26051 26052 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26053 return (ENXIO); 26054 } 26055 26056 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26057 bzero(cdb, CDB_GROUP1); 26058 cdb[0] = SCMD_PAUSE_RESUME; 26059 switch (cmd) { 26060 case CDROMRESUME: 26061 cdb[8] = 1; 26062 break; 26063 case CDROMPAUSE: 26064 cdb[8] = 0; 26065 break; 26066 default: 26067 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26068 " Command '%x' Not Supported\n", cmd); 26069 rval = EINVAL; 26070 goto done; 26071 } 26072 26073 com->uscsi_cdb = cdb; 26074 com->uscsi_cdblen = CDB_GROUP1; 26075 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26076 26077 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26078 SD_PATH_STANDARD); 26079 26080 done: 26081 kmem_free(com, sizeof (*com)); 26082 return (rval); 26083 } 26084 26085 26086 /* 26087 * Function: sr_play_msf() 26088 * 26089 * Description: This routine is the driver entry point for handling CD-ROM 26090 * ioctl requests to output the audio signals at the specified 26091 * starting address and continue the audio play until the specified 26092 * ending address (CDROMPLAYMSF) The address is in Minute Second 26093 * Frame (MSF) format. 26094 * 26095 * Arguments: dev - the device 'dev_t' 26096 * data - pointer to user provided audio msf structure, 26097 * specifying start/end addresses. 26098 * flag - this argument is a pass through to ddi_copyxxx() 26099 * directly from the mode argument of ioctl(). 26100 * 26101 * Return Code: the code returned by sd_send_scsi_cmd() 26102 * EFAULT if ddi_copyxxx() fails 26103 * ENXIO if fail ddi_get_soft_state 26104 * EINVAL if data pointer is NULL 26105 */ 26106 26107 static int 26108 sr_play_msf(dev_t dev, caddr_t data, int flag) 26109 { 26110 struct sd_lun *un; 26111 struct uscsi_cmd *com; 26112 struct cdrom_msf msf_struct; 26113 struct cdrom_msf *msf = &msf_struct; 26114 char cdb[CDB_GROUP1]; 26115 int rval; 26116 26117 if (data == NULL) { 26118 return (EINVAL); 26119 } 26120 26121 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26122 return (ENXIO); 26123 } 26124 26125 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26126 return (EFAULT); 26127 } 26128 26129 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26130 bzero(cdb, CDB_GROUP1); 26131 cdb[0] = SCMD_PLAYAUDIO_MSF; 26132 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26133 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26134 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26135 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26136 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26137 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26138 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26139 } else { 26140 cdb[3] = msf->cdmsf_min0; 26141 cdb[4] = msf->cdmsf_sec0; 26142 cdb[5] = msf->cdmsf_frame0; 26143 cdb[6] = msf->cdmsf_min1; 26144 cdb[7] = msf->cdmsf_sec1; 26145 cdb[8] = msf->cdmsf_frame1; 26146 } 26147 com->uscsi_cdb = cdb; 26148 com->uscsi_cdblen = CDB_GROUP1; 26149 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26150 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26151 SD_PATH_STANDARD); 26152 kmem_free(com, sizeof (*com)); 26153 return (rval); 26154 } 26155 26156 26157 /* 26158 * Function: sr_play_trkind() 26159 * 26160 * Description: This routine is the driver entry point for handling CD-ROM 26161 * ioctl requests to output the audio signals at the specified 26162 * starting address and continue the audio play until the specified 26163 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26164 * format. 26165 * 26166 * Arguments: dev - the device 'dev_t' 26167 * data - pointer to user provided audio track/index structure, 26168 * specifying start/end addresses. 26169 * flag - this argument is a pass through to ddi_copyxxx() 26170 * directly from the mode argument of ioctl(). 26171 * 26172 * Return Code: the code returned by sd_send_scsi_cmd() 26173 * EFAULT if ddi_copyxxx() fails 26174 * ENXIO if fail ddi_get_soft_state 26175 * EINVAL if data pointer is NULL 26176 */ 26177 26178 static int 26179 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26180 { 26181 struct cdrom_ti ti_struct; 26182 struct cdrom_ti *ti = &ti_struct; 26183 struct uscsi_cmd *com = NULL; 26184 char cdb[CDB_GROUP1]; 26185 int rval; 26186 26187 if (data == NULL) { 26188 return (EINVAL); 26189 } 26190 26191 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26192 return (EFAULT); 26193 } 26194 26195 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26196 bzero(cdb, CDB_GROUP1); 26197 cdb[0] = SCMD_PLAYAUDIO_TI; 26198 cdb[4] = ti->cdti_trk0; 26199 cdb[5] = ti->cdti_ind0; 26200 cdb[7] = ti->cdti_trk1; 26201 cdb[8] = ti->cdti_ind1; 26202 com->uscsi_cdb = cdb; 26203 com->uscsi_cdblen = CDB_GROUP1; 26204 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26205 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26206 SD_PATH_STANDARD); 26207 kmem_free(com, sizeof (*com)); 26208 return (rval); 26209 } 26210 26211 26212 /* 26213 * Function: sr_read_all_subcodes() 26214 * 26215 * Description: This routine is the driver entry point for handling CD-ROM 26216 * ioctl requests to return raw subcode data while the target is 26217 * playing audio (CDROMSUBCODE). 26218 * 26219 * Arguments: dev - the device 'dev_t' 26220 * data - pointer to user provided cdrom subcode structure, 26221 * specifying the transfer length and address. 26222 * flag - this argument is a pass through to ddi_copyxxx() 26223 * directly from the mode argument of ioctl(). 26224 * 26225 * Return Code: the code returned by sd_send_scsi_cmd() 26226 * EFAULT if ddi_copyxxx() fails 26227 * ENXIO if fail ddi_get_soft_state 26228 * EINVAL if data pointer is NULL 26229 */ 26230 26231 static int 26232 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26233 { 26234 struct sd_lun *un = NULL; 26235 struct uscsi_cmd *com = NULL; 26236 struct cdrom_subcode *subcode = NULL; 26237 int rval; 26238 size_t buflen; 26239 char cdb[CDB_GROUP5]; 26240 26241 #ifdef _MULTI_DATAMODEL 26242 /* To support ILP32 applications in an LP64 world */ 26243 struct cdrom_subcode32 cdrom_subcode32; 26244 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26245 #endif 26246 if (data == NULL) { 26247 return (EINVAL); 26248 } 26249 26250 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26251 return (ENXIO); 26252 } 26253 26254 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26255 26256 #ifdef _MULTI_DATAMODEL 26257 switch (ddi_model_convert_from(flag & FMODELS)) { 26258 case DDI_MODEL_ILP32: 26259 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26260 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26261 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26262 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26263 return (EFAULT); 26264 } 26265 /* Convert the ILP32 uscsi data from the application to LP64 */ 26266 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26267 break; 26268 case DDI_MODEL_NONE: 26269 if (ddi_copyin(data, subcode, 26270 sizeof (struct cdrom_subcode), flag)) { 26271 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26272 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26273 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26274 return (EFAULT); 26275 } 26276 break; 26277 } 26278 #else /* ! _MULTI_DATAMODEL */ 26279 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26281 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26282 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26283 return (EFAULT); 26284 } 26285 #endif /* _MULTI_DATAMODEL */ 26286 26287 /* 26288 * Since MMC-2 expects max 3 bytes for length, check if the 26289 * length input is greater than 3 bytes 26290 */ 26291 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26292 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26293 "sr_read_all_subcodes: " 26294 "cdrom transfer length too large: %d (limit %d)\n", 26295 subcode->cdsc_length, 0xFFFFFF); 26296 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26297 return (EINVAL); 26298 } 26299 26300 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26301 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26302 bzero(cdb, CDB_GROUP5); 26303 26304 if (un->un_f_mmc_cap == TRUE) { 26305 cdb[0] = (char)SCMD_READ_CD; 26306 cdb[2] = (char)0xff; 26307 cdb[3] = (char)0xff; 26308 cdb[4] = (char)0xff; 26309 cdb[5] = (char)0xff; 26310 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26311 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26312 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26313 cdb[10] = 1; 26314 } else { 26315 /* 26316 * Note: A vendor specific command (0xDF) is being used her to 26317 * request a read of all subcodes. 26318 */ 26319 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26320 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26321 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26322 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26323 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26324 } 26325 com->uscsi_cdb = cdb; 26326 com->uscsi_cdblen = CDB_GROUP5; 26327 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26328 com->uscsi_buflen = buflen; 26329 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26330 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26331 SD_PATH_STANDARD); 26332 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26333 kmem_free(com, sizeof (*com)); 26334 return (rval); 26335 } 26336 26337 26338 /* 26339 * Function: sr_read_subchannel() 26340 * 26341 * Description: This routine is the driver entry point for handling CD-ROM 26342 * ioctl requests to return the Q sub-channel data of the CD 26343 * current position block. (CDROMSUBCHNL) The data includes the 26344 * track number, index number, absolute CD-ROM address (LBA or MSF 26345 * format per the user) , track relative CD-ROM address (LBA or MSF 26346 * format per the user), control data and audio status. 26347 * 26348 * Arguments: dev - the device 'dev_t' 26349 * data - pointer to user provided cdrom sub-channel structure 26350 * flag - this argument is a pass through to ddi_copyxxx() 26351 * directly from the mode argument of ioctl(). 26352 * 26353 * Return Code: the code returned by sd_send_scsi_cmd() 26354 * EFAULT if ddi_copyxxx() fails 26355 * ENXIO if fail ddi_get_soft_state 26356 * EINVAL if data pointer is NULL 26357 */ 26358 26359 static int 26360 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26361 { 26362 struct sd_lun *un; 26363 struct uscsi_cmd *com; 26364 struct cdrom_subchnl subchanel; 26365 struct cdrom_subchnl *subchnl = &subchanel; 26366 char cdb[CDB_GROUP1]; 26367 caddr_t buffer; 26368 int rval; 26369 26370 if (data == NULL) { 26371 return (EINVAL); 26372 } 26373 26374 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26375 (un->un_state == SD_STATE_OFFLINE)) { 26376 return (ENXIO); 26377 } 26378 26379 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26380 return (EFAULT); 26381 } 26382 26383 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26384 bzero(cdb, CDB_GROUP1); 26385 cdb[0] = SCMD_READ_SUBCHANNEL; 26386 /* Set the MSF bit based on the user requested address format */ 26387 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26388 /* 26389 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26390 * returned 26391 */ 26392 cdb[2] = 0x40; 26393 /* 26394 * Set byte 3 to specify the return data format. A value of 0x01 26395 * indicates that the CD-ROM current position should be returned. 26396 */ 26397 cdb[3] = 0x01; 26398 cdb[8] = 0x10; 26399 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26400 com->uscsi_cdb = cdb; 26401 com->uscsi_cdblen = CDB_GROUP1; 26402 com->uscsi_bufaddr = buffer; 26403 com->uscsi_buflen = 16; 26404 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26405 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26406 SD_PATH_STANDARD); 26407 if (rval != 0) { 26408 kmem_free(buffer, 16); 26409 kmem_free(com, sizeof (*com)); 26410 return (rval); 26411 } 26412 26413 /* Process the returned Q sub-channel data */ 26414 subchnl->cdsc_audiostatus = buffer[1]; 26415 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26416 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26417 subchnl->cdsc_trk = buffer[6]; 26418 subchnl->cdsc_ind = buffer[7]; 26419 if (subchnl->cdsc_format & CDROM_LBA) { 26420 subchnl->cdsc_absaddr.lba = 26421 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26422 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26423 subchnl->cdsc_reladdr.lba = 26424 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26425 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26426 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26427 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26428 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26429 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26430 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26431 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26432 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26433 } else { 26434 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26435 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26436 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26437 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26438 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26439 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26440 } 26441 kmem_free(buffer, 16); 26442 kmem_free(com, sizeof (*com)); 26443 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26444 != 0) { 26445 return (EFAULT); 26446 } 26447 return (rval); 26448 } 26449 26450 26451 /* 26452 * Function: sr_read_tocentry() 26453 * 26454 * Description: This routine is the driver entry point for handling CD-ROM 26455 * ioctl requests to read from the Table of Contents (TOC) 26456 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26457 * fields, the starting address (LBA or MSF format per the user) 26458 * and the data mode if the user specified track is a data track. 26459 * 26460 * Note: The READ HEADER (0x44) command used in this routine is 26461 * obsolete per the SCSI MMC spec but still supported in the 26462 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26463 * therefore the command is still implemented in this routine. 26464 * 26465 * Arguments: dev - the device 'dev_t' 26466 * data - pointer to user provided toc entry structure, 26467 * specifying the track # and the address format 26468 * (LBA or MSF). 26469 * flag - this argument is a pass through to ddi_copyxxx() 26470 * directly from the mode argument of ioctl(). 26471 * 26472 * Return Code: the code returned by sd_send_scsi_cmd() 26473 * EFAULT if ddi_copyxxx() fails 26474 * ENXIO if fail ddi_get_soft_state 26475 * EINVAL if data pointer is NULL 26476 */ 26477 26478 static int 26479 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26480 { 26481 struct sd_lun *un = NULL; 26482 struct uscsi_cmd *com; 26483 struct cdrom_tocentry toc_entry; 26484 struct cdrom_tocentry *entry = &toc_entry; 26485 caddr_t buffer; 26486 int rval; 26487 char cdb[CDB_GROUP1]; 26488 26489 if (data == NULL) { 26490 return (EINVAL); 26491 } 26492 26493 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26494 (un->un_state == SD_STATE_OFFLINE)) { 26495 return (ENXIO); 26496 } 26497 26498 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26499 return (EFAULT); 26500 } 26501 26502 /* Validate the requested track and address format */ 26503 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26504 return (EINVAL); 26505 } 26506 26507 if (entry->cdte_track == 0) { 26508 return (EINVAL); 26509 } 26510 26511 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26512 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26513 bzero(cdb, CDB_GROUP1); 26514 26515 cdb[0] = SCMD_READ_TOC; 26516 /* Set the MSF bit based on the user requested address format */ 26517 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26518 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26519 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26520 } else { 26521 cdb[6] = entry->cdte_track; 26522 } 26523 26524 /* 26525 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26526 * (4 byte TOC response header + 8 byte track descriptor) 26527 */ 26528 cdb[8] = 12; 26529 com->uscsi_cdb = cdb; 26530 com->uscsi_cdblen = CDB_GROUP1; 26531 com->uscsi_bufaddr = buffer; 26532 com->uscsi_buflen = 0x0C; 26533 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26534 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26535 SD_PATH_STANDARD); 26536 if (rval != 0) { 26537 kmem_free(buffer, 12); 26538 kmem_free(com, sizeof (*com)); 26539 return (rval); 26540 } 26541 26542 /* Process the toc entry */ 26543 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26544 entry->cdte_ctrl = (buffer[5] & 0x0F); 26545 if (entry->cdte_format & CDROM_LBA) { 26546 entry->cdte_addr.lba = 26547 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26548 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26549 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26550 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26551 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26552 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26553 /* 26554 * Send a READ TOC command using the LBA address format to get 26555 * the LBA for the track requested so it can be used in the 26556 * READ HEADER request 26557 * 26558 * Note: The MSF bit of the READ HEADER command specifies the 26559 * output format. The block address specified in that command 26560 * must be in LBA format. 26561 */ 26562 cdb[1] = 0; 26563 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26564 SD_PATH_STANDARD); 26565 if (rval != 0) { 26566 kmem_free(buffer, 12); 26567 kmem_free(com, sizeof (*com)); 26568 return (rval); 26569 } 26570 } else { 26571 entry->cdte_addr.msf.minute = buffer[9]; 26572 entry->cdte_addr.msf.second = buffer[10]; 26573 entry->cdte_addr.msf.frame = buffer[11]; 26574 /* 26575 * Send a READ TOC command using the LBA address format to get 26576 * the LBA for the track requested so it can be used in the 26577 * READ HEADER request 26578 * 26579 * Note: The MSF bit of the READ HEADER command specifies the 26580 * output format. The block address specified in that command 26581 * must be in LBA format. 26582 */ 26583 cdb[1] = 0; 26584 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26585 SD_PATH_STANDARD); 26586 if (rval != 0) { 26587 kmem_free(buffer, 12); 26588 kmem_free(com, sizeof (*com)); 26589 return (rval); 26590 } 26591 } 26592 26593 /* 26594 * Build and send the READ HEADER command to determine the data mode of 26595 * the user specified track. 26596 */ 26597 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26598 (entry->cdte_track != CDROM_LEADOUT)) { 26599 bzero(cdb, CDB_GROUP1); 26600 cdb[0] = SCMD_READ_HEADER; 26601 cdb[2] = buffer[8]; 26602 cdb[3] = buffer[9]; 26603 cdb[4] = buffer[10]; 26604 cdb[5] = buffer[11]; 26605 cdb[8] = 0x08; 26606 com->uscsi_buflen = 0x08; 26607 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26608 SD_PATH_STANDARD); 26609 if (rval == 0) { 26610 entry->cdte_datamode = buffer[0]; 26611 } else { 26612 /* 26613 * READ HEADER command failed, since this is 26614 * obsoleted in one spec, its better to return 26615 * -1 for an invlid track so that we can still 26616 * receive the rest of the TOC data. 26617 */ 26618 entry->cdte_datamode = (uchar_t)-1; 26619 } 26620 } else { 26621 entry->cdte_datamode = (uchar_t)-1; 26622 } 26623 26624 kmem_free(buffer, 12); 26625 kmem_free(com, sizeof (*com)); 26626 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26627 return (EFAULT); 26628 26629 return (rval); 26630 } 26631 26632 26633 /* 26634 * Function: sr_read_tochdr() 26635 * 26636 * Description: This routine is the driver entry point for handling CD-ROM 26637 * ioctl requests to read the Table of Contents (TOC) header 26638 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26639 * and ending track numbers 26640 * 26641 * Arguments: dev - the device 'dev_t' 26642 * data - pointer to user provided toc header structure, 26643 * specifying the starting and ending track numbers. 26644 * flag - this argument is a pass through to ddi_copyxxx() 26645 * directly from the mode argument of ioctl(). 26646 * 26647 * Return Code: the code returned by sd_send_scsi_cmd() 26648 * EFAULT if ddi_copyxxx() fails 26649 * ENXIO if fail ddi_get_soft_state 26650 * EINVAL if data pointer is NULL 26651 */ 26652 26653 static int 26654 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26655 { 26656 struct sd_lun *un; 26657 struct uscsi_cmd *com; 26658 struct cdrom_tochdr toc_header; 26659 struct cdrom_tochdr *hdr = &toc_header; 26660 char cdb[CDB_GROUP1]; 26661 int rval; 26662 caddr_t buffer; 26663 26664 if (data == NULL) { 26665 return (EINVAL); 26666 } 26667 26668 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26669 (un->un_state == SD_STATE_OFFLINE)) { 26670 return (ENXIO); 26671 } 26672 26673 buffer = kmem_zalloc(4, KM_SLEEP); 26674 bzero(cdb, CDB_GROUP1); 26675 cdb[0] = SCMD_READ_TOC; 26676 /* 26677 * Specifying a track number of 0x00 in the READ TOC command indicates 26678 * that the TOC header should be returned 26679 */ 26680 cdb[6] = 0x00; 26681 /* 26682 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26683 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26684 */ 26685 cdb[8] = 0x04; 26686 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26687 com->uscsi_cdb = cdb; 26688 com->uscsi_cdblen = CDB_GROUP1; 26689 com->uscsi_bufaddr = buffer; 26690 com->uscsi_buflen = 0x04; 26691 com->uscsi_timeout = 300; 26692 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26693 26694 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26695 SD_PATH_STANDARD); 26696 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26697 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26698 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26699 } else { 26700 hdr->cdth_trk0 = buffer[2]; 26701 hdr->cdth_trk1 = buffer[3]; 26702 } 26703 kmem_free(buffer, 4); 26704 kmem_free(com, sizeof (*com)); 26705 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26706 return (EFAULT); 26707 } 26708 return (rval); 26709 } 26710 26711 26712 /* 26713 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26714 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26715 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26716 * digital audio and extended architecture digital audio. These modes are 26717 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26718 * MMC specs. 26719 * 26720 * In addition to support for the various data formats these routines also 26721 * include support for devices that implement only the direct access READ 26722 * commands (0x08, 0x28), devices that implement the READ_CD commands 26723 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26724 * READ CDXA commands (0xD8, 0xDB) 26725 */ 26726 26727 /* 26728 * Function: sr_read_mode1() 26729 * 26730 * Description: This routine is the driver entry point for handling CD-ROM 26731 * ioctl read mode1 requests (CDROMREADMODE1). 26732 * 26733 * Arguments: dev - the device 'dev_t' 26734 * data - pointer to user provided cd read structure specifying 26735 * the lba buffer address and length. 26736 * flag - this argument is a pass through to ddi_copyxxx() 26737 * directly from the mode argument of ioctl(). 26738 * 26739 * Return Code: the code returned by sd_send_scsi_cmd() 26740 * EFAULT if ddi_copyxxx() fails 26741 * ENXIO if fail ddi_get_soft_state 26742 * EINVAL if data pointer is NULL 26743 */ 26744 26745 static int 26746 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26747 { 26748 struct sd_lun *un; 26749 struct cdrom_read mode1_struct; 26750 struct cdrom_read *mode1 = &mode1_struct; 26751 int rval; 26752 sd_ssc_t *ssc; 26753 26754 #ifdef _MULTI_DATAMODEL 26755 /* To support ILP32 applications in an LP64 world */ 26756 struct cdrom_read32 cdrom_read32; 26757 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26758 #endif /* _MULTI_DATAMODEL */ 26759 26760 if (data == NULL) { 26761 return (EINVAL); 26762 } 26763 26764 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26765 (un->un_state == SD_STATE_OFFLINE)) { 26766 return (ENXIO); 26767 } 26768 26769 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26770 "sd_read_mode1: entry: un:0x%p\n", un); 26771 26772 #ifdef _MULTI_DATAMODEL 26773 switch (ddi_model_convert_from(flag & FMODELS)) { 26774 case DDI_MODEL_ILP32: 26775 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26776 return (EFAULT); 26777 } 26778 /* Convert the ILP32 uscsi data from the application to LP64 */ 26779 cdrom_read32tocdrom_read(cdrd32, mode1); 26780 break; 26781 case DDI_MODEL_NONE: 26782 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26783 return (EFAULT); 26784 } 26785 } 26786 #else /* ! _MULTI_DATAMODEL */ 26787 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26788 return (EFAULT); 26789 } 26790 #endif /* _MULTI_DATAMODEL */ 26791 26792 ssc = sd_ssc_init(un); 26793 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 26794 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 26795 sd_ssc_fini(ssc); 26796 26797 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26798 "sd_read_mode1: exit: un:0x%p\n", un); 26799 26800 return (rval); 26801 } 26802 26803 26804 /* 26805 * Function: sr_read_cd_mode2() 26806 * 26807 * Description: This routine is the driver entry point for handling CD-ROM 26808 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26809 * support the READ CD (0xBE) command or the 1st generation 26810 * READ CD (0xD4) command. 26811 * 26812 * Arguments: dev - the device 'dev_t' 26813 * data - pointer to user provided cd read structure specifying 26814 * the lba buffer address and length. 26815 * flag - this argument is a pass through to ddi_copyxxx() 26816 * directly from the mode argument of ioctl(). 26817 * 26818 * Return Code: the code returned by sd_send_scsi_cmd() 26819 * EFAULT if ddi_copyxxx() fails 26820 * ENXIO if fail ddi_get_soft_state 26821 * EINVAL if data pointer is NULL 26822 */ 26823 26824 static int 26825 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 26826 { 26827 struct sd_lun *un; 26828 struct uscsi_cmd *com; 26829 struct cdrom_read mode2_struct; 26830 struct cdrom_read *mode2 = &mode2_struct; 26831 uchar_t cdb[CDB_GROUP5]; 26832 int nblocks; 26833 int rval; 26834 #ifdef _MULTI_DATAMODEL 26835 /* To support ILP32 applications in an LP64 world */ 26836 struct cdrom_read32 cdrom_read32; 26837 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26838 #endif /* _MULTI_DATAMODEL */ 26839 26840 if (data == NULL) { 26841 return (EINVAL); 26842 } 26843 26844 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26845 (un->un_state == SD_STATE_OFFLINE)) { 26846 return (ENXIO); 26847 } 26848 26849 #ifdef _MULTI_DATAMODEL 26850 switch (ddi_model_convert_from(flag & FMODELS)) { 26851 case DDI_MODEL_ILP32: 26852 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26853 return (EFAULT); 26854 } 26855 /* Convert the ILP32 uscsi data from the application to LP64 */ 26856 cdrom_read32tocdrom_read(cdrd32, mode2); 26857 break; 26858 case DDI_MODEL_NONE: 26859 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26860 return (EFAULT); 26861 } 26862 break; 26863 } 26864 26865 #else /* ! _MULTI_DATAMODEL */ 26866 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26867 return (EFAULT); 26868 } 26869 #endif /* _MULTI_DATAMODEL */ 26870 26871 bzero(cdb, sizeof (cdb)); 26872 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 26873 /* Read command supported by 1st generation atapi drives */ 26874 cdb[0] = SCMD_READ_CDD4; 26875 } else { 26876 /* Universal CD Access Command */ 26877 cdb[0] = SCMD_READ_CD; 26878 } 26879 26880 /* 26881 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 26882 */ 26883 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 26884 26885 /* set the start address */ 26886 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 26887 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 26888 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 26889 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 26890 26891 /* set the transfer length */ 26892 nblocks = mode2->cdread_buflen / 2336; 26893 cdb[6] = (uchar_t)(nblocks >> 16); 26894 cdb[7] = (uchar_t)(nblocks >> 8); 26895 cdb[8] = (uchar_t)nblocks; 26896 26897 /* set the filter bits */ 26898 cdb[9] = CDROM_READ_CD_USERDATA; 26899 26900 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26901 com->uscsi_cdb = (caddr_t)cdb; 26902 com->uscsi_cdblen = sizeof (cdb); 26903 com->uscsi_bufaddr = mode2->cdread_bufaddr; 26904 com->uscsi_buflen = mode2->cdread_buflen; 26905 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26906 26907 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26908 SD_PATH_STANDARD); 26909 kmem_free(com, sizeof (*com)); 26910 return (rval); 26911 } 26912 26913 26914 /* 26915 * Function: sr_read_mode2() 26916 * 26917 * Description: This routine is the driver entry point for handling CD-ROM 26918 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26919 * do not support the READ CD (0xBE) command. 26920 * 26921 * Arguments: dev - the device 'dev_t' 26922 * data - pointer to user provided cd read structure specifying 26923 * the lba buffer address and length. 26924 * flag - this argument is a pass through to ddi_copyxxx() 26925 * directly from the mode argument of ioctl(). 26926 * 26927 * Return Code: the code returned by sd_send_scsi_cmd() 26928 * EFAULT if ddi_copyxxx() fails 26929 * ENXIO if fail ddi_get_soft_state 26930 * EINVAL if data pointer is NULL 26931 * EIO if fail to reset block size 26932 * EAGAIN if commands are in progress in the driver 26933 */ 26934 26935 static int 26936 sr_read_mode2(dev_t dev, caddr_t data, int flag) 26937 { 26938 struct sd_lun *un; 26939 struct cdrom_read mode2_struct; 26940 struct cdrom_read *mode2 = &mode2_struct; 26941 int rval; 26942 uint32_t restore_blksize; 26943 struct uscsi_cmd *com; 26944 uchar_t cdb[CDB_GROUP0]; 26945 int nblocks; 26946 26947 #ifdef _MULTI_DATAMODEL 26948 /* To support ILP32 applications in an LP64 world */ 26949 struct cdrom_read32 cdrom_read32; 26950 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26951 #endif /* _MULTI_DATAMODEL */ 26952 26953 if (data == NULL) { 26954 return (EINVAL); 26955 } 26956 26957 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26958 (un->un_state == SD_STATE_OFFLINE)) { 26959 return (ENXIO); 26960 } 26961 26962 /* 26963 * Because this routine will update the device and driver block size 26964 * being used we want to make sure there are no commands in progress. 26965 * If commands are in progress the user will have to try again. 26966 * 26967 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 26968 * in sdioctl to protect commands from sdioctl through to the top of 26969 * sd_uscsi_strategy. See sdioctl for details. 26970 */ 26971 mutex_enter(SD_MUTEX(un)); 26972 if (un->un_ncmds_in_driver != 1) { 26973 mutex_exit(SD_MUTEX(un)); 26974 return (EAGAIN); 26975 } 26976 mutex_exit(SD_MUTEX(un)); 26977 26978 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26979 "sd_read_mode2: entry: un:0x%p\n", un); 26980 26981 #ifdef _MULTI_DATAMODEL 26982 switch (ddi_model_convert_from(flag & FMODELS)) { 26983 case DDI_MODEL_ILP32: 26984 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26985 return (EFAULT); 26986 } 26987 /* Convert the ILP32 uscsi data from the application to LP64 */ 26988 cdrom_read32tocdrom_read(cdrd32, mode2); 26989 break; 26990 case DDI_MODEL_NONE: 26991 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26992 return (EFAULT); 26993 } 26994 break; 26995 } 26996 #else /* ! _MULTI_DATAMODEL */ 26997 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 26998 return (EFAULT); 26999 } 27000 #endif /* _MULTI_DATAMODEL */ 27001 27002 /* Store the current target block size for restoration later */ 27003 restore_blksize = un->un_tgt_blocksize; 27004 27005 /* Change the device and soft state target block size to 2336 */ 27006 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27007 rval = EIO; 27008 goto done; 27009 } 27010 27011 27012 bzero(cdb, sizeof (cdb)); 27013 27014 /* set READ operation */ 27015 cdb[0] = SCMD_READ; 27016 27017 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27018 mode2->cdread_lba >>= 2; 27019 27020 /* set the start address */ 27021 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27022 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27023 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27024 27025 /* set the transfer length */ 27026 nblocks = mode2->cdread_buflen / 2336; 27027 cdb[4] = (uchar_t)nblocks & 0xFF; 27028 27029 /* build command */ 27030 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27031 com->uscsi_cdb = (caddr_t)cdb; 27032 com->uscsi_cdblen = sizeof (cdb); 27033 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27034 com->uscsi_buflen = mode2->cdread_buflen; 27035 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27036 27037 /* 27038 * Issue SCSI command with user space address for read buffer. 27039 * 27040 * This sends the command through main channel in the driver. 27041 * 27042 * Since this is accessed via an IOCTL call, we go through the 27043 * standard path, so that if the device was powered down, then 27044 * it would be 'awakened' to handle the command. 27045 */ 27046 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27047 SD_PATH_STANDARD); 27048 27049 kmem_free(com, sizeof (*com)); 27050 27051 /* Restore the device and soft state target block size */ 27052 if (sr_sector_mode(dev, restore_blksize) != 0) { 27053 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27054 "can't do switch back to mode 1\n"); 27055 /* 27056 * If sd_send_scsi_READ succeeded we still need to report 27057 * an error because we failed to reset the block size 27058 */ 27059 if (rval == 0) { 27060 rval = EIO; 27061 } 27062 } 27063 27064 done: 27065 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27066 "sd_read_mode2: exit: un:0x%p\n", un); 27067 27068 return (rval); 27069 } 27070 27071 27072 /* 27073 * Function: sr_sector_mode() 27074 * 27075 * Description: This utility function is used by sr_read_mode2 to set the target 27076 * block size based on the user specified size. This is a legacy 27077 * implementation based upon a vendor specific mode page 27078 * 27079 * Arguments: dev - the device 'dev_t' 27080 * data - flag indicating if block size is being set to 2336 or 27081 * 512. 27082 * 27083 * Return Code: the code returned by sd_send_scsi_cmd() 27084 * EFAULT if ddi_copyxxx() fails 27085 * ENXIO if fail ddi_get_soft_state 27086 * EINVAL if data pointer is NULL 27087 */ 27088 27089 static int 27090 sr_sector_mode(dev_t dev, uint32_t blksize) 27091 { 27092 struct sd_lun *un; 27093 uchar_t *sense; 27094 uchar_t *select; 27095 int rval; 27096 sd_ssc_t *ssc; 27097 27098 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27099 (un->un_state == SD_STATE_OFFLINE)) { 27100 return (ENXIO); 27101 } 27102 27103 sense = kmem_zalloc(20, KM_SLEEP); 27104 27105 /* Note: This is a vendor specific mode page (0x81) */ 27106 ssc = sd_ssc_init(un); 27107 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27108 SD_PATH_STANDARD); 27109 sd_ssc_fini(ssc); 27110 if (rval != 0) { 27111 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27112 "sr_sector_mode: Mode Sense failed\n"); 27113 kmem_free(sense, 20); 27114 return (rval); 27115 } 27116 select = kmem_zalloc(20, KM_SLEEP); 27117 select[3] = 0x08; 27118 select[10] = ((blksize >> 8) & 0xff); 27119 select[11] = (blksize & 0xff); 27120 select[12] = 0x01; 27121 select[13] = 0x06; 27122 select[14] = sense[14]; 27123 select[15] = sense[15]; 27124 if (blksize == SD_MODE2_BLKSIZE) { 27125 select[14] |= 0x01; 27126 } 27127 27128 ssc = sd_ssc_init(un); 27129 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27130 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27131 sd_ssc_fini(ssc); 27132 if (rval != 0) { 27133 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27134 "sr_sector_mode: Mode Select failed\n"); 27135 } else { 27136 /* 27137 * Only update the softstate block size if we successfully 27138 * changed the device block mode. 27139 */ 27140 mutex_enter(SD_MUTEX(un)); 27141 sd_update_block_info(un, blksize, 0); 27142 mutex_exit(SD_MUTEX(un)); 27143 } 27144 kmem_free(sense, 20); 27145 kmem_free(select, 20); 27146 return (rval); 27147 } 27148 27149 27150 /* 27151 * Function: sr_read_cdda() 27152 * 27153 * Description: This routine is the driver entry point for handling CD-ROM 27154 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27155 * the target supports CDDA these requests are handled via a vendor 27156 * specific command (0xD8) If the target does not support CDDA 27157 * these requests are handled via the READ CD command (0xBE). 27158 * 27159 * Arguments: dev - the device 'dev_t' 27160 * data - pointer to user provided CD-DA structure specifying 27161 * the track starting address, transfer length, and 27162 * subcode options. 27163 * flag - this argument is a pass through to ddi_copyxxx() 27164 * directly from the mode argument of ioctl(). 27165 * 27166 * Return Code: the code returned by sd_send_scsi_cmd() 27167 * EFAULT if ddi_copyxxx() fails 27168 * ENXIO if fail ddi_get_soft_state 27169 * EINVAL if invalid arguments are provided 27170 * ENOTTY 27171 */ 27172 27173 static int 27174 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27175 { 27176 struct sd_lun *un; 27177 struct uscsi_cmd *com; 27178 struct cdrom_cdda *cdda; 27179 int rval; 27180 size_t buflen; 27181 char cdb[CDB_GROUP5]; 27182 27183 #ifdef _MULTI_DATAMODEL 27184 /* To support ILP32 applications in an LP64 world */ 27185 struct cdrom_cdda32 cdrom_cdda32; 27186 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27187 #endif /* _MULTI_DATAMODEL */ 27188 27189 if (data == NULL) { 27190 return (EINVAL); 27191 } 27192 27193 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27194 return (ENXIO); 27195 } 27196 27197 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27198 27199 #ifdef _MULTI_DATAMODEL 27200 switch (ddi_model_convert_from(flag & FMODELS)) { 27201 case DDI_MODEL_ILP32: 27202 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27203 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27204 "sr_read_cdda: ddi_copyin Failed\n"); 27205 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27206 return (EFAULT); 27207 } 27208 /* Convert the ILP32 uscsi data from the application to LP64 */ 27209 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27210 break; 27211 case DDI_MODEL_NONE: 27212 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27213 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27214 "sr_read_cdda: ddi_copyin Failed\n"); 27215 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27216 return (EFAULT); 27217 } 27218 break; 27219 } 27220 #else /* ! _MULTI_DATAMODEL */ 27221 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27222 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27223 "sr_read_cdda: ddi_copyin Failed\n"); 27224 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27225 return (EFAULT); 27226 } 27227 #endif /* _MULTI_DATAMODEL */ 27228 27229 /* 27230 * Since MMC-2 expects max 3 bytes for length, check if the 27231 * length input is greater than 3 bytes 27232 */ 27233 if ((cdda->cdda_length & 0xFF000000) != 0) { 27234 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27235 "cdrom transfer length too large: %d (limit %d)\n", 27236 cdda->cdda_length, 0xFFFFFF); 27237 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27238 return (EINVAL); 27239 } 27240 27241 switch (cdda->cdda_subcode) { 27242 case CDROM_DA_NO_SUBCODE: 27243 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27244 break; 27245 case CDROM_DA_SUBQ: 27246 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27247 break; 27248 case CDROM_DA_ALL_SUBCODE: 27249 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27250 break; 27251 case CDROM_DA_SUBCODE_ONLY: 27252 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27253 break; 27254 default: 27255 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27256 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27257 cdda->cdda_subcode); 27258 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27259 return (EINVAL); 27260 } 27261 27262 /* Build and send the command */ 27263 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27264 bzero(cdb, CDB_GROUP5); 27265 27266 if (un->un_f_cfg_cdda == TRUE) { 27267 cdb[0] = (char)SCMD_READ_CD; 27268 cdb[1] = 0x04; 27269 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27270 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27271 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27272 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27273 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27274 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27275 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27276 cdb[9] = 0x10; 27277 switch (cdda->cdda_subcode) { 27278 case CDROM_DA_NO_SUBCODE : 27279 cdb[10] = 0x0; 27280 break; 27281 case CDROM_DA_SUBQ : 27282 cdb[10] = 0x2; 27283 break; 27284 case CDROM_DA_ALL_SUBCODE : 27285 cdb[10] = 0x1; 27286 break; 27287 case CDROM_DA_SUBCODE_ONLY : 27288 /* FALLTHROUGH */ 27289 default : 27290 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27291 kmem_free(com, sizeof (*com)); 27292 return (ENOTTY); 27293 } 27294 } else { 27295 cdb[0] = (char)SCMD_READ_CDDA; 27296 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27297 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27298 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27299 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27300 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27301 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27302 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27303 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27304 cdb[10] = cdda->cdda_subcode; 27305 } 27306 27307 com->uscsi_cdb = cdb; 27308 com->uscsi_cdblen = CDB_GROUP5; 27309 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27310 com->uscsi_buflen = buflen; 27311 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27312 27313 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27314 SD_PATH_STANDARD); 27315 27316 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27317 kmem_free(com, sizeof (*com)); 27318 return (rval); 27319 } 27320 27321 27322 /* 27323 * Function: sr_read_cdxa() 27324 * 27325 * Description: This routine is the driver entry point for handling CD-ROM 27326 * ioctl requests to return CD-XA (Extended Architecture) data. 27327 * (CDROMCDXA). 27328 * 27329 * Arguments: dev - the device 'dev_t' 27330 * data - pointer to user provided CD-XA structure specifying 27331 * the data starting address, transfer length, and format 27332 * flag - this argument is a pass through to ddi_copyxxx() 27333 * directly from the mode argument of ioctl(). 27334 * 27335 * Return Code: the code returned by sd_send_scsi_cmd() 27336 * EFAULT if ddi_copyxxx() fails 27337 * ENXIO if fail ddi_get_soft_state 27338 * EINVAL if data pointer is NULL 27339 */ 27340 27341 static int 27342 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27343 { 27344 struct sd_lun *un; 27345 struct uscsi_cmd *com; 27346 struct cdrom_cdxa *cdxa; 27347 int rval; 27348 size_t buflen; 27349 char cdb[CDB_GROUP5]; 27350 uchar_t read_flags; 27351 27352 #ifdef _MULTI_DATAMODEL 27353 /* To support ILP32 applications in an LP64 world */ 27354 struct cdrom_cdxa32 cdrom_cdxa32; 27355 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27356 #endif /* _MULTI_DATAMODEL */ 27357 27358 if (data == NULL) { 27359 return (EINVAL); 27360 } 27361 27362 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27363 return (ENXIO); 27364 } 27365 27366 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27367 27368 #ifdef _MULTI_DATAMODEL 27369 switch (ddi_model_convert_from(flag & FMODELS)) { 27370 case DDI_MODEL_ILP32: 27371 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27372 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27373 return (EFAULT); 27374 } 27375 /* 27376 * Convert the ILP32 uscsi data from the 27377 * application to LP64 for internal use. 27378 */ 27379 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27380 break; 27381 case DDI_MODEL_NONE: 27382 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27383 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27384 return (EFAULT); 27385 } 27386 break; 27387 } 27388 #else /* ! _MULTI_DATAMODEL */ 27389 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27390 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27391 return (EFAULT); 27392 } 27393 #endif /* _MULTI_DATAMODEL */ 27394 27395 /* 27396 * Since MMC-2 expects max 3 bytes for length, check if the 27397 * length input is greater than 3 bytes 27398 */ 27399 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27400 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27401 "cdrom transfer length too large: %d (limit %d)\n", 27402 cdxa->cdxa_length, 0xFFFFFF); 27403 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27404 return (EINVAL); 27405 } 27406 27407 switch (cdxa->cdxa_format) { 27408 case CDROM_XA_DATA: 27409 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27410 read_flags = 0x10; 27411 break; 27412 case CDROM_XA_SECTOR_DATA: 27413 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27414 read_flags = 0xf8; 27415 break; 27416 case CDROM_XA_DATA_W_ERROR: 27417 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27418 read_flags = 0xfc; 27419 break; 27420 default: 27421 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27422 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27423 cdxa->cdxa_format); 27424 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27425 return (EINVAL); 27426 } 27427 27428 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27429 bzero(cdb, CDB_GROUP5); 27430 if (un->un_f_mmc_cap == TRUE) { 27431 cdb[0] = (char)SCMD_READ_CD; 27432 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27433 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27434 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27435 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27436 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27437 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27438 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27439 cdb[9] = (char)read_flags; 27440 } else { 27441 /* 27442 * Note: A vendor specific command (0xDB) is being used her to 27443 * request a read of all subcodes. 27444 */ 27445 cdb[0] = (char)SCMD_READ_CDXA; 27446 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27447 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27448 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27449 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27450 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27451 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27452 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27453 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27454 cdb[10] = cdxa->cdxa_format; 27455 } 27456 com->uscsi_cdb = cdb; 27457 com->uscsi_cdblen = CDB_GROUP5; 27458 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27459 com->uscsi_buflen = buflen; 27460 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27461 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27462 SD_PATH_STANDARD); 27463 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27464 kmem_free(com, sizeof (*com)); 27465 return (rval); 27466 } 27467 27468 27469 /* 27470 * Function: sr_eject() 27471 * 27472 * Description: This routine is the driver entry point for handling CD-ROM 27473 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27474 * 27475 * Arguments: dev - the device 'dev_t' 27476 * 27477 * Return Code: the code returned by sd_send_scsi_cmd() 27478 */ 27479 27480 static int 27481 sr_eject(dev_t dev) 27482 { 27483 struct sd_lun *un; 27484 int rval; 27485 sd_ssc_t *ssc; 27486 27487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27488 (un->un_state == SD_STATE_OFFLINE)) { 27489 return (ENXIO); 27490 } 27491 27492 /* 27493 * To prevent race conditions with the eject 27494 * command, keep track of an eject command as 27495 * it progresses. If we are already handling 27496 * an eject command in the driver for the given 27497 * unit and another request to eject is received 27498 * immediately return EAGAIN so we don't lose 27499 * the command if the current eject command fails. 27500 */ 27501 mutex_enter(SD_MUTEX(un)); 27502 if (un->un_f_ejecting == TRUE) { 27503 mutex_exit(SD_MUTEX(un)); 27504 return (EAGAIN); 27505 } 27506 un->un_f_ejecting = TRUE; 27507 mutex_exit(SD_MUTEX(un)); 27508 27509 ssc = sd_ssc_init(un); 27510 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27511 SD_PATH_STANDARD); 27512 sd_ssc_fini(ssc); 27513 27514 if (rval != 0) { 27515 mutex_enter(SD_MUTEX(un)); 27516 un->un_f_ejecting = FALSE; 27517 mutex_exit(SD_MUTEX(un)); 27518 return (rval); 27519 } 27520 27521 ssc = sd_ssc_init(un); 27522 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27523 SD_PATH_STANDARD); 27524 sd_ssc_fini(ssc); 27525 27526 if (rval == 0) { 27527 mutex_enter(SD_MUTEX(un)); 27528 sr_ejected(un); 27529 un->un_mediastate = DKIO_EJECTED; 27530 un->un_f_ejecting = FALSE; 27531 cv_broadcast(&un->un_state_cv); 27532 mutex_exit(SD_MUTEX(un)); 27533 } else { 27534 mutex_enter(SD_MUTEX(un)); 27535 un->un_f_ejecting = FALSE; 27536 mutex_exit(SD_MUTEX(un)); 27537 } 27538 return (rval); 27539 } 27540 27541 27542 /* 27543 * Function: sr_ejected() 27544 * 27545 * Description: This routine updates the soft state structure to invalidate the 27546 * geometry information after the media has been ejected or a 27547 * media eject has been detected. 27548 * 27549 * Arguments: un - driver soft state (unit) structure 27550 */ 27551 27552 static void 27553 sr_ejected(struct sd_lun *un) 27554 { 27555 struct sd_errstats *stp; 27556 27557 ASSERT(un != NULL); 27558 ASSERT(mutex_owned(SD_MUTEX(un))); 27559 27560 un->un_f_blockcount_is_valid = FALSE; 27561 un->un_f_tgt_blocksize_is_valid = FALSE; 27562 mutex_exit(SD_MUTEX(un)); 27563 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27564 mutex_enter(SD_MUTEX(un)); 27565 27566 if (un->un_errstats != NULL) { 27567 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27568 stp->sd_capacity.value.ui64 = 0; 27569 } 27570 } 27571 27572 27573 /* 27574 * Function: sr_check_wp() 27575 * 27576 * Description: This routine checks the write protection of a removable 27577 * media disk and hotpluggable devices via the write protect bit of 27578 * the Mode Page Header device specific field. Some devices choke 27579 * on unsupported mode page. In order to workaround this issue, 27580 * this routine has been implemented to use 0x3f mode page(request 27581 * for all pages) for all device types. 27582 * 27583 * Arguments: dev - the device 'dev_t' 27584 * 27585 * Return Code: int indicating if the device is write protected (1) or not (0) 27586 * 27587 * Context: Kernel thread. 27588 * 27589 */ 27590 27591 static int 27592 sr_check_wp(dev_t dev) 27593 { 27594 struct sd_lun *un; 27595 uchar_t device_specific; 27596 uchar_t *sense; 27597 int hdrlen; 27598 int rval = FALSE; 27599 int status; 27600 sd_ssc_t *ssc; 27601 27602 /* 27603 * Note: The return codes for this routine should be reworked to 27604 * properly handle the case of a NULL softstate. 27605 */ 27606 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27607 return (FALSE); 27608 } 27609 27610 if (un->un_f_cfg_is_atapi == TRUE) { 27611 /* 27612 * The mode page contents are not required; set the allocation 27613 * length for the mode page header only 27614 */ 27615 hdrlen = MODE_HEADER_LENGTH_GRP2; 27616 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27617 ssc = sd_ssc_init(un); 27618 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27619 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27620 sd_ssc_fini(ssc); 27621 if (status != 0) 27622 goto err_exit; 27623 device_specific = 27624 ((struct mode_header_grp2 *)sense)->device_specific; 27625 } else { 27626 hdrlen = MODE_HEADER_LENGTH; 27627 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27628 ssc = sd_ssc_init(un); 27629 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27630 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27631 sd_ssc_fini(ssc); 27632 if (status != 0) 27633 goto err_exit; 27634 device_specific = 27635 ((struct mode_header *)sense)->device_specific; 27636 } 27637 27638 27639 /* 27640 * Write protect mode sense failed; not all disks 27641 * understand this query. Return FALSE assuming that 27642 * these devices are not writable. 27643 */ 27644 if (device_specific & WRITE_PROTECT) { 27645 rval = TRUE; 27646 } 27647 27648 err_exit: 27649 kmem_free(sense, hdrlen); 27650 return (rval); 27651 } 27652 27653 /* 27654 * Function: sr_volume_ctrl() 27655 * 27656 * Description: This routine is the driver entry point for handling CD-ROM 27657 * audio output volume ioctl requests. (CDROMVOLCTRL) 27658 * 27659 * Arguments: dev - the device 'dev_t' 27660 * data - pointer to user audio volume control structure 27661 * flag - this argument is a pass through to ddi_copyxxx() 27662 * directly from the mode argument of ioctl(). 27663 * 27664 * Return Code: the code returned by sd_send_scsi_cmd() 27665 * EFAULT if ddi_copyxxx() fails 27666 * ENXIO if fail ddi_get_soft_state 27667 * EINVAL if data pointer is NULL 27668 * 27669 */ 27670 27671 static int 27672 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27673 { 27674 struct sd_lun *un; 27675 struct cdrom_volctrl volume; 27676 struct cdrom_volctrl *vol = &volume; 27677 uchar_t *sense_page; 27678 uchar_t *select_page; 27679 uchar_t *sense; 27680 uchar_t *select; 27681 int sense_buflen; 27682 int select_buflen; 27683 int rval; 27684 sd_ssc_t *ssc; 27685 27686 if (data == NULL) { 27687 return (EINVAL); 27688 } 27689 27690 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27691 (un->un_state == SD_STATE_OFFLINE)) { 27692 return (ENXIO); 27693 } 27694 27695 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27696 return (EFAULT); 27697 } 27698 27699 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27700 struct mode_header_grp2 *sense_mhp; 27701 struct mode_header_grp2 *select_mhp; 27702 int bd_len; 27703 27704 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27705 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27706 MODEPAGE_AUDIO_CTRL_LEN; 27707 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27708 select = kmem_zalloc(select_buflen, KM_SLEEP); 27709 ssc = sd_ssc_init(un); 27710 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27711 sense_buflen, MODEPAGE_AUDIO_CTRL, 27712 SD_PATH_STANDARD); 27713 sd_ssc_fini(ssc); 27714 27715 if (rval != 0) { 27716 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27717 "sr_volume_ctrl: Mode Sense Failed\n"); 27718 kmem_free(sense, sense_buflen); 27719 kmem_free(select, select_buflen); 27720 return (rval); 27721 } 27722 sense_mhp = (struct mode_header_grp2 *)sense; 27723 select_mhp = (struct mode_header_grp2 *)select; 27724 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27725 sense_mhp->bdesc_length_lo; 27726 if (bd_len > MODE_BLK_DESC_LENGTH) { 27727 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27728 "sr_volume_ctrl: Mode Sense returned invalid " 27729 "block descriptor length\n"); 27730 kmem_free(sense, sense_buflen); 27731 kmem_free(select, select_buflen); 27732 return (EIO); 27733 } 27734 sense_page = (uchar_t *) 27735 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27736 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27737 select_mhp->length_msb = 0; 27738 select_mhp->length_lsb = 0; 27739 select_mhp->bdesc_length_hi = 0; 27740 select_mhp->bdesc_length_lo = 0; 27741 } else { 27742 struct mode_header *sense_mhp, *select_mhp; 27743 27744 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27745 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27746 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27747 select = kmem_zalloc(select_buflen, KM_SLEEP); 27748 ssc = sd_ssc_init(un); 27749 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27750 sense_buflen, MODEPAGE_AUDIO_CTRL, 27751 SD_PATH_STANDARD); 27752 sd_ssc_fini(ssc); 27753 27754 if (rval != 0) { 27755 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27756 "sr_volume_ctrl: Mode Sense Failed\n"); 27757 kmem_free(sense, sense_buflen); 27758 kmem_free(select, select_buflen); 27759 return (rval); 27760 } 27761 sense_mhp = (struct mode_header *)sense; 27762 select_mhp = (struct mode_header *)select; 27763 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27764 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27765 "sr_volume_ctrl: Mode Sense returned invalid " 27766 "block descriptor length\n"); 27767 kmem_free(sense, sense_buflen); 27768 kmem_free(select, select_buflen); 27769 return (EIO); 27770 } 27771 sense_page = (uchar_t *) 27772 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27773 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27774 select_mhp->length = 0; 27775 select_mhp->bdesc_length = 0; 27776 } 27777 /* 27778 * Note: An audio control data structure could be created and overlayed 27779 * on the following in place of the array indexing method implemented. 27780 */ 27781 27782 /* Build the select data for the user volume data */ 27783 select_page[0] = MODEPAGE_AUDIO_CTRL; 27784 select_page[1] = 0xE; 27785 /* Set the immediate bit */ 27786 select_page[2] = 0x04; 27787 /* Zero out reserved fields */ 27788 select_page[3] = 0x00; 27789 select_page[4] = 0x00; 27790 /* Return sense data for fields not to be modified */ 27791 select_page[5] = sense_page[5]; 27792 select_page[6] = sense_page[6]; 27793 select_page[7] = sense_page[7]; 27794 /* Set the user specified volume levels for channel 0 and 1 */ 27795 select_page[8] = 0x01; 27796 select_page[9] = vol->channel0; 27797 select_page[10] = 0x02; 27798 select_page[11] = vol->channel1; 27799 /* Channel 2 and 3 are currently unsupported so return the sense data */ 27800 select_page[12] = sense_page[12]; 27801 select_page[13] = sense_page[13]; 27802 select_page[14] = sense_page[14]; 27803 select_page[15] = sense_page[15]; 27804 27805 ssc = sd_ssc_init(un); 27806 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27807 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 27808 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27809 } else { 27810 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27811 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27812 } 27813 sd_ssc_fini(ssc); 27814 27815 kmem_free(sense, sense_buflen); 27816 kmem_free(select, select_buflen); 27817 return (rval); 27818 } 27819 27820 27821 /* 27822 * Function: sr_read_sony_session_offset() 27823 * 27824 * Description: This routine is the driver entry point for handling CD-ROM 27825 * ioctl requests for session offset information. (CDROMREADOFFSET) 27826 * The address of the first track in the last session of a 27827 * multi-session CD-ROM is returned 27828 * 27829 * Note: This routine uses a vendor specific key value in the 27830 * command control field without implementing any vendor check here 27831 * or in the ioctl routine. 27832 * 27833 * Arguments: dev - the device 'dev_t' 27834 * data - pointer to an int to hold the requested address 27835 * flag - this argument is a pass through to ddi_copyxxx() 27836 * directly from the mode argument of ioctl(). 27837 * 27838 * Return Code: the code returned by sd_send_scsi_cmd() 27839 * EFAULT if ddi_copyxxx() fails 27840 * ENXIO if fail ddi_get_soft_state 27841 * EINVAL if data pointer is NULL 27842 */ 27843 27844 static int 27845 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 27846 { 27847 struct sd_lun *un; 27848 struct uscsi_cmd *com; 27849 caddr_t buffer; 27850 char cdb[CDB_GROUP1]; 27851 int session_offset = 0; 27852 int rval; 27853 27854 if (data == NULL) { 27855 return (EINVAL); 27856 } 27857 27858 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27859 (un->un_state == SD_STATE_OFFLINE)) { 27860 return (ENXIO); 27861 } 27862 27863 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 27864 bzero(cdb, CDB_GROUP1); 27865 cdb[0] = SCMD_READ_TOC; 27866 /* 27867 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27868 * (4 byte TOC response header + 8 byte response data) 27869 */ 27870 cdb[8] = SONY_SESSION_OFFSET_LEN; 27871 /* Byte 9 is the control byte. A vendor specific value is used */ 27872 cdb[9] = SONY_SESSION_OFFSET_KEY; 27873 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27874 com->uscsi_cdb = cdb; 27875 com->uscsi_cdblen = CDB_GROUP1; 27876 com->uscsi_bufaddr = buffer; 27877 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 27878 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27879 27880 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27881 SD_PATH_STANDARD); 27882 if (rval != 0) { 27883 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27884 kmem_free(com, sizeof (*com)); 27885 return (rval); 27886 } 27887 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 27888 session_offset = 27889 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27890 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27891 /* 27892 * Offset returned offset in current lbasize block's. Convert to 27893 * 2k block's to return to the user 27894 */ 27895 if (un->un_tgt_blocksize == CDROM_BLK_512) { 27896 session_offset >>= 2; 27897 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 27898 session_offset >>= 1; 27899 } 27900 } 27901 27902 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 27903 rval = EFAULT; 27904 } 27905 27906 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27907 kmem_free(com, sizeof (*com)); 27908 return (rval); 27909 } 27910 27911 27912 /* 27913 * Function: sd_wm_cache_constructor() 27914 * 27915 * Description: Cache Constructor for the wmap cache for the read/modify/write 27916 * devices. 27917 * 27918 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27919 * un - sd_lun structure for the device. 27920 * flag - the km flags passed to constructor 27921 * 27922 * Return Code: 0 on success. 27923 * -1 on failure. 27924 */ 27925 27926 /*ARGSUSED*/ 27927 static int 27928 sd_wm_cache_constructor(void *wm, void *un, int flags) 27929 { 27930 bzero(wm, sizeof (struct sd_w_map)); 27931 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 27932 return (0); 27933 } 27934 27935 27936 /* 27937 * Function: sd_wm_cache_destructor() 27938 * 27939 * Description: Cache destructor for the wmap cache for the read/modify/write 27940 * devices. 27941 * 27942 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27943 * un - sd_lun structure for the device. 27944 */ 27945 /*ARGSUSED*/ 27946 static void 27947 sd_wm_cache_destructor(void *wm, void *un) 27948 { 27949 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 27950 } 27951 27952 27953 /* 27954 * Function: sd_range_lock() 27955 * 27956 * Description: Lock the range of blocks specified as parameter to ensure 27957 * that read, modify write is atomic and no other i/o writes 27958 * to the same location. The range is specified in terms 27959 * of start and end blocks. Block numbers are the actual 27960 * media block numbers and not system. 27961 * 27962 * Arguments: un - sd_lun structure for the device. 27963 * startb - The starting block number 27964 * endb - The end block number 27965 * typ - type of i/o - simple/read_modify_write 27966 * 27967 * Return Code: wm - pointer to the wmap structure. 27968 * 27969 * Context: This routine can sleep. 27970 */ 27971 27972 static struct sd_w_map * 27973 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 27974 { 27975 struct sd_w_map *wmp = NULL; 27976 struct sd_w_map *sl_wmp = NULL; 27977 struct sd_w_map *tmp_wmp; 27978 wm_state state = SD_WM_CHK_LIST; 27979 27980 27981 ASSERT(un != NULL); 27982 ASSERT(!mutex_owned(SD_MUTEX(un))); 27983 27984 mutex_enter(SD_MUTEX(un)); 27985 27986 while (state != SD_WM_DONE) { 27987 27988 switch (state) { 27989 case SD_WM_CHK_LIST: 27990 /* 27991 * This is the starting state. Check the wmap list 27992 * to see if the range is currently available. 27993 */ 27994 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 27995 /* 27996 * If this is a simple write and no rmw 27997 * i/o is pending then try to lock the 27998 * range as the range should be available. 27999 */ 28000 state = SD_WM_LOCK_RANGE; 28001 } else { 28002 tmp_wmp = sd_get_range(un, startb, endb); 28003 if (tmp_wmp != NULL) { 28004 if ((wmp != NULL) && ONLIST(un, wmp)) { 28005 /* 28006 * Should not keep onlist wmps 28007 * while waiting this macro 28008 * will also do wmp = NULL; 28009 */ 28010 FREE_ONLIST_WMAP(un, wmp); 28011 } 28012 /* 28013 * sl_wmp is the wmap on which wait 28014 * is done, since the tmp_wmp points 28015 * to the inuse wmap, set sl_wmp to 28016 * tmp_wmp and change the state to sleep 28017 */ 28018 sl_wmp = tmp_wmp; 28019 state = SD_WM_WAIT_MAP; 28020 } else { 28021 state = SD_WM_LOCK_RANGE; 28022 } 28023 28024 } 28025 break; 28026 28027 case SD_WM_LOCK_RANGE: 28028 ASSERT(un->un_wm_cache); 28029 /* 28030 * The range need to be locked, try to get a wmap. 28031 * First attempt it with NO_SLEEP, want to avoid a sleep 28032 * if possible as we will have to release the sd mutex 28033 * if we have to sleep. 28034 */ 28035 if (wmp == NULL) 28036 wmp = kmem_cache_alloc(un->un_wm_cache, 28037 KM_NOSLEEP); 28038 if (wmp == NULL) { 28039 mutex_exit(SD_MUTEX(un)); 28040 _NOTE(DATA_READABLE_WITHOUT_LOCK 28041 (sd_lun::un_wm_cache)) 28042 wmp = kmem_cache_alloc(un->un_wm_cache, 28043 KM_SLEEP); 28044 mutex_enter(SD_MUTEX(un)); 28045 /* 28046 * we released the mutex so recheck and go to 28047 * check list state. 28048 */ 28049 state = SD_WM_CHK_LIST; 28050 } else { 28051 /* 28052 * We exit out of state machine since we 28053 * have the wmap. Do the housekeeping first. 28054 * place the wmap on the wmap list if it is not 28055 * on it already and then set the state to done. 28056 */ 28057 wmp->wm_start = startb; 28058 wmp->wm_end = endb; 28059 wmp->wm_flags = typ | SD_WM_BUSY; 28060 if (typ & SD_WTYPE_RMW) { 28061 un->un_rmw_count++; 28062 } 28063 /* 28064 * If not already on the list then link 28065 */ 28066 if (!ONLIST(un, wmp)) { 28067 wmp->wm_next = un->un_wm; 28068 wmp->wm_prev = NULL; 28069 if (wmp->wm_next) 28070 wmp->wm_next->wm_prev = wmp; 28071 un->un_wm = wmp; 28072 } 28073 state = SD_WM_DONE; 28074 } 28075 break; 28076 28077 case SD_WM_WAIT_MAP: 28078 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28079 /* 28080 * Wait is done on sl_wmp, which is set in the 28081 * check_list state. 28082 */ 28083 sl_wmp->wm_wanted_count++; 28084 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28085 sl_wmp->wm_wanted_count--; 28086 /* 28087 * We can reuse the memory from the completed sl_wmp 28088 * lock range for our new lock, but only if noone is 28089 * waiting for it. 28090 */ 28091 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28092 if (sl_wmp->wm_wanted_count == 0) { 28093 if (wmp != NULL) 28094 CHK_N_FREEWMP(un, wmp); 28095 wmp = sl_wmp; 28096 } 28097 sl_wmp = NULL; 28098 /* 28099 * After waking up, need to recheck for availability of 28100 * range. 28101 */ 28102 state = SD_WM_CHK_LIST; 28103 break; 28104 28105 default: 28106 panic("sd_range_lock: " 28107 "Unknown state %d in sd_range_lock", state); 28108 /*NOTREACHED*/ 28109 } /* switch(state) */ 28110 28111 } /* while(state != SD_WM_DONE) */ 28112 28113 mutex_exit(SD_MUTEX(un)); 28114 28115 ASSERT(wmp != NULL); 28116 28117 return (wmp); 28118 } 28119 28120 28121 /* 28122 * Function: sd_get_range() 28123 * 28124 * Description: Find if there any overlapping I/O to this one 28125 * Returns the write-map of 1st such I/O, NULL otherwise. 28126 * 28127 * Arguments: un - sd_lun structure for the device. 28128 * startb - The starting block number 28129 * endb - The end block number 28130 * 28131 * Return Code: wm - pointer to the wmap structure. 28132 */ 28133 28134 static struct sd_w_map * 28135 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28136 { 28137 struct sd_w_map *wmp; 28138 28139 ASSERT(un != NULL); 28140 28141 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28142 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28143 continue; 28144 } 28145 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28146 break; 28147 } 28148 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28149 break; 28150 } 28151 } 28152 28153 return (wmp); 28154 } 28155 28156 28157 /* 28158 * Function: sd_free_inlist_wmap() 28159 * 28160 * Description: Unlink and free a write map struct. 28161 * 28162 * Arguments: un - sd_lun structure for the device. 28163 * wmp - sd_w_map which needs to be unlinked. 28164 */ 28165 28166 static void 28167 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28168 { 28169 ASSERT(un != NULL); 28170 28171 if (un->un_wm == wmp) { 28172 un->un_wm = wmp->wm_next; 28173 } else { 28174 wmp->wm_prev->wm_next = wmp->wm_next; 28175 } 28176 28177 if (wmp->wm_next) { 28178 wmp->wm_next->wm_prev = wmp->wm_prev; 28179 } 28180 28181 wmp->wm_next = wmp->wm_prev = NULL; 28182 28183 kmem_cache_free(un->un_wm_cache, wmp); 28184 } 28185 28186 28187 /* 28188 * Function: sd_range_unlock() 28189 * 28190 * Description: Unlock the range locked by wm. 28191 * Free write map if nobody else is waiting on it. 28192 * 28193 * Arguments: un - sd_lun structure for the device. 28194 * wmp - sd_w_map which needs to be unlinked. 28195 */ 28196 28197 static void 28198 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28199 { 28200 ASSERT(un != NULL); 28201 ASSERT(wm != NULL); 28202 ASSERT(!mutex_owned(SD_MUTEX(un))); 28203 28204 mutex_enter(SD_MUTEX(un)); 28205 28206 if (wm->wm_flags & SD_WTYPE_RMW) { 28207 un->un_rmw_count--; 28208 } 28209 28210 if (wm->wm_wanted_count) { 28211 wm->wm_flags = 0; 28212 /* 28213 * Broadcast that the wmap is available now. 28214 */ 28215 cv_broadcast(&wm->wm_avail); 28216 } else { 28217 /* 28218 * If no one is waiting on the map, it should be free'ed. 28219 */ 28220 sd_free_inlist_wmap(un, wm); 28221 } 28222 28223 mutex_exit(SD_MUTEX(un)); 28224 } 28225 28226 28227 /* 28228 * Function: sd_read_modify_write_task 28229 * 28230 * Description: Called from a taskq thread to initiate the write phase of 28231 * a read-modify-write request. This is used for targets where 28232 * un->un_sys_blocksize != un->un_tgt_blocksize. 28233 * 28234 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28235 * 28236 * Context: Called under taskq thread context. 28237 */ 28238 28239 static void 28240 sd_read_modify_write_task(void *arg) 28241 { 28242 struct sd_mapblocksize_info *bsp; 28243 struct buf *bp; 28244 struct sd_xbuf *xp; 28245 struct sd_lun *un; 28246 28247 bp = arg; /* The bp is given in arg */ 28248 ASSERT(bp != NULL); 28249 28250 /* Get the pointer to the layer-private data struct */ 28251 xp = SD_GET_XBUF(bp); 28252 ASSERT(xp != NULL); 28253 bsp = xp->xb_private; 28254 ASSERT(bsp != NULL); 28255 28256 un = SD_GET_UN(bp); 28257 ASSERT(un != NULL); 28258 ASSERT(!mutex_owned(SD_MUTEX(un))); 28259 28260 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28261 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28262 28263 /* 28264 * This is the write phase of a read-modify-write request, called 28265 * under the context of a taskq thread in response to the completion 28266 * of the read portion of the rmw request completing under interrupt 28267 * context. The write request must be sent from here down the iostart 28268 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28269 * we use the layer index saved in the layer-private data area. 28270 */ 28271 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28272 28273 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28274 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28275 } 28276 28277 28278 /* 28279 * Function: sddump_do_read_of_rmw() 28280 * 28281 * Description: This routine will be called from sddump, If sddump is called 28282 * with an I/O which not aligned on device blocksize boundary 28283 * then the write has to be converted to read-modify-write. 28284 * Do the read part here in order to keep sddump simple. 28285 * Note - That the sd_mutex is held across the call to this 28286 * routine. 28287 * 28288 * Arguments: un - sd_lun 28289 * blkno - block number in terms of media block size. 28290 * nblk - number of blocks. 28291 * bpp - pointer to pointer to the buf structure. On return 28292 * from this function, *bpp points to the valid buffer 28293 * to which the write has to be done. 28294 * 28295 * Return Code: 0 for success or errno-type return code 28296 */ 28297 28298 static int 28299 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28300 struct buf **bpp) 28301 { 28302 int err; 28303 int i; 28304 int rval; 28305 struct buf *bp; 28306 struct scsi_pkt *pkt = NULL; 28307 uint32_t target_blocksize; 28308 28309 ASSERT(un != NULL); 28310 ASSERT(mutex_owned(SD_MUTEX(un))); 28311 28312 target_blocksize = un->un_tgt_blocksize; 28313 28314 mutex_exit(SD_MUTEX(un)); 28315 28316 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28317 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28318 if (bp == NULL) { 28319 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28320 "no resources for dumping; giving up"); 28321 err = ENOMEM; 28322 goto done; 28323 } 28324 28325 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28326 blkno, nblk); 28327 if (rval != 0) { 28328 scsi_free_consistent_buf(bp); 28329 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28330 "no resources for dumping; giving up"); 28331 err = ENOMEM; 28332 goto done; 28333 } 28334 28335 pkt->pkt_flags |= FLAG_NOINTR; 28336 28337 err = EIO; 28338 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28339 28340 /* 28341 * Scsi_poll returns 0 (success) if the command completes and 28342 * the status block is STATUS_GOOD. We should only check 28343 * errors if this condition is not true. Even then we should 28344 * send our own request sense packet only if we have a check 28345 * condition and auto request sense has not been performed by 28346 * the hba. 28347 */ 28348 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28349 28350 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28351 err = 0; 28352 break; 28353 } 28354 28355 /* 28356 * Check CMD_DEV_GONE 1st, give up if device is gone, 28357 * no need to read RQS data. 28358 */ 28359 if (pkt->pkt_reason == CMD_DEV_GONE) { 28360 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28361 "Error while dumping state with rmw..." 28362 "Device is gone\n"); 28363 break; 28364 } 28365 28366 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28367 SD_INFO(SD_LOG_DUMP, un, 28368 "sddump: read failed with CHECK, try # %d\n", i); 28369 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28370 (void) sd_send_polled_RQS(un); 28371 } 28372 28373 continue; 28374 } 28375 28376 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28377 int reset_retval = 0; 28378 28379 SD_INFO(SD_LOG_DUMP, un, 28380 "sddump: read failed with BUSY, try # %d\n", i); 28381 28382 if (un->un_f_lun_reset_enabled == TRUE) { 28383 reset_retval = scsi_reset(SD_ADDRESS(un), 28384 RESET_LUN); 28385 } 28386 if (reset_retval == 0) { 28387 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28388 } 28389 (void) sd_send_polled_RQS(un); 28390 28391 } else { 28392 SD_INFO(SD_LOG_DUMP, un, 28393 "sddump: read failed with 0x%x, try # %d\n", 28394 SD_GET_PKT_STATUS(pkt), i); 28395 mutex_enter(SD_MUTEX(un)); 28396 sd_reset_target(un, pkt); 28397 mutex_exit(SD_MUTEX(un)); 28398 } 28399 28400 /* 28401 * If we are not getting anywhere with lun/target resets, 28402 * let's reset the bus. 28403 */ 28404 if (i > SD_NDUMP_RETRIES/2) { 28405 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28406 (void) sd_send_polled_RQS(un); 28407 } 28408 28409 } 28410 scsi_destroy_pkt(pkt); 28411 28412 if (err != 0) { 28413 scsi_free_consistent_buf(bp); 28414 *bpp = NULL; 28415 } else { 28416 *bpp = bp; 28417 } 28418 28419 done: 28420 mutex_enter(SD_MUTEX(un)); 28421 return (err); 28422 } 28423 28424 28425 /* 28426 * Function: sd_failfast_flushq 28427 * 28428 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28429 * in b_flags and move them onto the failfast queue, then kick 28430 * off a thread to return all bp's on the failfast queue to 28431 * their owners with an error set. 28432 * 28433 * Arguments: un - pointer to the soft state struct for the instance. 28434 * 28435 * Context: may execute in interrupt context. 28436 */ 28437 28438 static void 28439 sd_failfast_flushq(struct sd_lun *un) 28440 { 28441 struct buf *bp; 28442 struct buf *next_waitq_bp; 28443 struct buf *prev_waitq_bp = NULL; 28444 28445 ASSERT(un != NULL); 28446 ASSERT(mutex_owned(SD_MUTEX(un))); 28447 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28448 ASSERT(un->un_failfast_bp == NULL); 28449 28450 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28451 "sd_failfast_flushq: entry: un:0x%p\n", un); 28452 28453 /* 28454 * Check if we should flush all bufs when entering failfast state, or 28455 * just those with B_FAILFAST set. 28456 */ 28457 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28458 /* 28459 * Move *all* bp's on the wait queue to the failfast flush 28460 * queue, including those that do NOT have B_FAILFAST set. 28461 */ 28462 if (un->un_failfast_headp == NULL) { 28463 ASSERT(un->un_failfast_tailp == NULL); 28464 un->un_failfast_headp = un->un_waitq_headp; 28465 } else { 28466 ASSERT(un->un_failfast_tailp != NULL); 28467 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28468 } 28469 28470 un->un_failfast_tailp = un->un_waitq_tailp; 28471 28472 /* update kstat for each bp moved out of the waitq */ 28473 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28474 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28475 } 28476 28477 /* empty the waitq */ 28478 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28479 28480 } else { 28481 /* 28482 * Go thru the wait queue, pick off all entries with 28483 * B_FAILFAST set, and move these onto the failfast queue. 28484 */ 28485 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28486 /* 28487 * Save the pointer to the next bp on the wait queue, 28488 * so we get to it on the next iteration of this loop. 28489 */ 28490 next_waitq_bp = bp->av_forw; 28491 28492 /* 28493 * If this bp from the wait queue does NOT have 28494 * B_FAILFAST set, just move on to the next element 28495 * in the wait queue. Note, this is the only place 28496 * where it is correct to set prev_waitq_bp. 28497 */ 28498 if ((bp->b_flags & B_FAILFAST) == 0) { 28499 prev_waitq_bp = bp; 28500 continue; 28501 } 28502 28503 /* 28504 * Remove the bp from the wait queue. 28505 */ 28506 if (bp == un->un_waitq_headp) { 28507 /* The bp is the first element of the waitq. */ 28508 un->un_waitq_headp = next_waitq_bp; 28509 if (un->un_waitq_headp == NULL) { 28510 /* The wait queue is now empty */ 28511 un->un_waitq_tailp = NULL; 28512 } 28513 } else { 28514 /* 28515 * The bp is either somewhere in the middle 28516 * or at the end of the wait queue. 28517 */ 28518 ASSERT(un->un_waitq_headp != NULL); 28519 ASSERT(prev_waitq_bp != NULL); 28520 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28521 == 0); 28522 if (bp == un->un_waitq_tailp) { 28523 /* bp is the last entry on the waitq. */ 28524 ASSERT(next_waitq_bp == NULL); 28525 un->un_waitq_tailp = prev_waitq_bp; 28526 } 28527 prev_waitq_bp->av_forw = next_waitq_bp; 28528 } 28529 bp->av_forw = NULL; 28530 28531 /* 28532 * update kstat since the bp is moved out of 28533 * the waitq 28534 */ 28535 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28536 28537 /* 28538 * Now put the bp onto the failfast queue. 28539 */ 28540 if (un->un_failfast_headp == NULL) { 28541 /* failfast queue is currently empty */ 28542 ASSERT(un->un_failfast_tailp == NULL); 28543 un->un_failfast_headp = 28544 un->un_failfast_tailp = bp; 28545 } else { 28546 /* Add the bp to the end of the failfast q */ 28547 ASSERT(un->un_failfast_tailp != NULL); 28548 ASSERT(un->un_failfast_tailp->b_flags & 28549 B_FAILFAST); 28550 un->un_failfast_tailp->av_forw = bp; 28551 un->un_failfast_tailp = bp; 28552 } 28553 } 28554 } 28555 28556 /* 28557 * Now return all bp's on the failfast queue to their owners. 28558 */ 28559 while ((bp = un->un_failfast_headp) != NULL) { 28560 28561 un->un_failfast_headp = bp->av_forw; 28562 if (un->un_failfast_headp == NULL) { 28563 un->un_failfast_tailp = NULL; 28564 } 28565 28566 /* 28567 * We want to return the bp with a failure error code, but 28568 * we do not want a call to sd_start_cmds() to occur here, 28569 * so use sd_return_failed_command_no_restart() instead of 28570 * sd_return_failed_command(). 28571 */ 28572 sd_return_failed_command_no_restart(un, bp, EIO); 28573 } 28574 28575 /* Flush the xbuf queues if required. */ 28576 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28577 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28578 } 28579 28580 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28581 "sd_failfast_flushq: exit: un:0x%p\n", un); 28582 } 28583 28584 28585 /* 28586 * Function: sd_failfast_flushq_callback 28587 * 28588 * Description: Return TRUE if the given bp meets the criteria for failfast 28589 * flushing. Used with ddi_xbuf_flushq(9F). 28590 * 28591 * Arguments: bp - ptr to buf struct to be examined. 28592 * 28593 * Context: Any 28594 */ 28595 28596 static int 28597 sd_failfast_flushq_callback(struct buf *bp) 28598 { 28599 /* 28600 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28601 * state is entered; OR (2) the given bp has B_FAILFAST set. 28602 */ 28603 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28604 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28605 } 28606 28607 28608 28609 /* 28610 * Function: sd_setup_next_xfer 28611 * 28612 * Description: Prepare next I/O operation using DMA_PARTIAL 28613 * 28614 */ 28615 28616 static int 28617 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28618 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28619 { 28620 ssize_t num_blks_not_xfered; 28621 daddr_t strt_blk_num; 28622 ssize_t bytes_not_xfered; 28623 int rval; 28624 28625 ASSERT(pkt->pkt_resid == 0); 28626 28627 /* 28628 * Calculate next block number and amount to be transferred. 28629 * 28630 * How much data NOT transfered to the HBA yet. 28631 */ 28632 bytes_not_xfered = xp->xb_dma_resid; 28633 28634 /* 28635 * figure how many blocks NOT transfered to the HBA yet. 28636 */ 28637 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28638 28639 /* 28640 * set starting block number to the end of what WAS transfered. 28641 */ 28642 strt_blk_num = xp->xb_blkno + 28643 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28644 28645 /* 28646 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28647 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28648 * the disk mutex here. 28649 */ 28650 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28651 strt_blk_num, num_blks_not_xfered); 28652 28653 if (rval == 0) { 28654 28655 /* 28656 * Success. 28657 * 28658 * Adjust things if there are still more blocks to be 28659 * transfered. 28660 */ 28661 xp->xb_dma_resid = pkt->pkt_resid; 28662 pkt->pkt_resid = 0; 28663 28664 return (1); 28665 } 28666 28667 /* 28668 * There's really only one possible return value from 28669 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28670 * returns NULL. 28671 */ 28672 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28673 28674 bp->b_resid = bp->b_bcount; 28675 bp->b_flags |= B_ERROR; 28676 28677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28678 "Error setting up next portion of DMA transfer\n"); 28679 28680 return (0); 28681 } 28682 28683 /* 28684 * Function: sd_panic_for_res_conflict 28685 * 28686 * Description: Call panic with a string formatted with "Reservation Conflict" 28687 * and a human readable identifier indicating the SD instance 28688 * that experienced the reservation conflict. 28689 * 28690 * Arguments: un - pointer to the soft state struct for the instance. 28691 * 28692 * Context: may execute in interrupt context. 28693 */ 28694 28695 #define SD_RESV_CONFLICT_FMT_LEN 40 28696 void 28697 sd_panic_for_res_conflict(struct sd_lun *un) 28698 { 28699 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28700 char path_str[MAXPATHLEN]; 28701 28702 (void) snprintf(panic_str, sizeof (panic_str), 28703 "Reservation Conflict\nDisk: %s", 28704 ddi_pathname(SD_DEVINFO(un), path_str)); 28705 28706 panic(panic_str); 28707 } 28708 28709 /* 28710 * Note: The following sd_faultinjection_ioctl( ) routines implement 28711 * driver support for handling fault injection for error analysis 28712 * causing faults in multiple layers of the driver. 28713 * 28714 */ 28715 28716 #ifdef SD_FAULT_INJECTION 28717 static uint_t sd_fault_injection_on = 0; 28718 28719 /* 28720 * Function: sd_faultinjection_ioctl() 28721 * 28722 * Description: This routine is the driver entry point for handling 28723 * faultinjection ioctls to inject errors into the 28724 * layer model 28725 * 28726 * Arguments: cmd - the ioctl cmd received 28727 * arg - the arguments from user and returns 28728 */ 28729 28730 static void 28731 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28732 28733 uint_t i = 0; 28734 uint_t rval; 28735 28736 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28737 28738 mutex_enter(SD_MUTEX(un)); 28739 28740 switch (cmd) { 28741 case SDIOCRUN: 28742 /* Allow pushed faults to be injected */ 28743 SD_INFO(SD_LOG_SDTEST, un, 28744 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28745 28746 sd_fault_injection_on = 1; 28747 28748 SD_INFO(SD_LOG_IOERR, un, 28749 "sd_faultinjection_ioctl: run finished\n"); 28750 break; 28751 28752 case SDIOCSTART: 28753 /* Start Injection Session */ 28754 SD_INFO(SD_LOG_SDTEST, un, 28755 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28756 28757 sd_fault_injection_on = 0; 28758 un->sd_injection_mask = 0xFFFFFFFF; 28759 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28760 un->sd_fi_fifo_pkt[i] = NULL; 28761 un->sd_fi_fifo_xb[i] = NULL; 28762 un->sd_fi_fifo_un[i] = NULL; 28763 un->sd_fi_fifo_arq[i] = NULL; 28764 } 28765 un->sd_fi_fifo_start = 0; 28766 un->sd_fi_fifo_end = 0; 28767 28768 mutex_enter(&(un->un_fi_mutex)); 28769 un->sd_fi_log[0] = '\0'; 28770 un->sd_fi_buf_len = 0; 28771 mutex_exit(&(un->un_fi_mutex)); 28772 28773 SD_INFO(SD_LOG_IOERR, un, 28774 "sd_faultinjection_ioctl: start finished\n"); 28775 break; 28776 28777 case SDIOCSTOP: 28778 /* Stop Injection Session */ 28779 SD_INFO(SD_LOG_SDTEST, un, 28780 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28781 sd_fault_injection_on = 0; 28782 un->sd_injection_mask = 0x0; 28783 28784 /* Empty stray or unuseds structs from fifo */ 28785 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28786 if (un->sd_fi_fifo_pkt[i] != NULL) { 28787 kmem_free(un->sd_fi_fifo_pkt[i], 28788 sizeof (struct sd_fi_pkt)); 28789 } 28790 if (un->sd_fi_fifo_xb[i] != NULL) { 28791 kmem_free(un->sd_fi_fifo_xb[i], 28792 sizeof (struct sd_fi_xb)); 28793 } 28794 if (un->sd_fi_fifo_un[i] != NULL) { 28795 kmem_free(un->sd_fi_fifo_un[i], 28796 sizeof (struct sd_fi_un)); 28797 } 28798 if (un->sd_fi_fifo_arq[i] != NULL) { 28799 kmem_free(un->sd_fi_fifo_arq[i], 28800 sizeof (struct sd_fi_arq)); 28801 } 28802 un->sd_fi_fifo_pkt[i] = NULL; 28803 un->sd_fi_fifo_un[i] = NULL; 28804 un->sd_fi_fifo_xb[i] = NULL; 28805 un->sd_fi_fifo_arq[i] = NULL; 28806 } 28807 un->sd_fi_fifo_start = 0; 28808 un->sd_fi_fifo_end = 0; 28809 28810 SD_INFO(SD_LOG_IOERR, un, 28811 "sd_faultinjection_ioctl: stop finished\n"); 28812 break; 28813 28814 case SDIOCINSERTPKT: 28815 /* Store a packet struct to be pushed onto fifo */ 28816 SD_INFO(SD_LOG_SDTEST, un, 28817 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 28818 28819 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28820 28821 sd_fault_injection_on = 0; 28822 28823 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 28824 if (un->sd_fi_fifo_pkt[i] != NULL) { 28825 kmem_free(un->sd_fi_fifo_pkt[i], 28826 sizeof (struct sd_fi_pkt)); 28827 } 28828 if (arg != NULL) { 28829 un->sd_fi_fifo_pkt[i] = 28830 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 28831 if (un->sd_fi_fifo_pkt[i] == NULL) { 28832 /* Alloc failed don't store anything */ 28833 break; 28834 } 28835 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 28836 sizeof (struct sd_fi_pkt), 0); 28837 if (rval == -1) { 28838 kmem_free(un->sd_fi_fifo_pkt[i], 28839 sizeof (struct sd_fi_pkt)); 28840 un->sd_fi_fifo_pkt[i] = NULL; 28841 } 28842 } else { 28843 SD_INFO(SD_LOG_IOERR, un, 28844 "sd_faultinjection_ioctl: pkt null\n"); 28845 } 28846 break; 28847 28848 case SDIOCINSERTXB: 28849 /* Store a xb struct to be pushed onto fifo */ 28850 SD_INFO(SD_LOG_SDTEST, un, 28851 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 28852 28853 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28854 28855 sd_fault_injection_on = 0; 28856 28857 if (un->sd_fi_fifo_xb[i] != NULL) { 28858 kmem_free(un->sd_fi_fifo_xb[i], 28859 sizeof (struct sd_fi_xb)); 28860 un->sd_fi_fifo_xb[i] = NULL; 28861 } 28862 if (arg != NULL) { 28863 un->sd_fi_fifo_xb[i] = 28864 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 28865 if (un->sd_fi_fifo_xb[i] == NULL) { 28866 /* Alloc failed don't store anything */ 28867 break; 28868 } 28869 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 28870 sizeof (struct sd_fi_xb), 0); 28871 28872 if (rval == -1) { 28873 kmem_free(un->sd_fi_fifo_xb[i], 28874 sizeof (struct sd_fi_xb)); 28875 un->sd_fi_fifo_xb[i] = NULL; 28876 } 28877 } else { 28878 SD_INFO(SD_LOG_IOERR, un, 28879 "sd_faultinjection_ioctl: xb null\n"); 28880 } 28881 break; 28882 28883 case SDIOCINSERTUN: 28884 /* Store a un struct to be pushed onto fifo */ 28885 SD_INFO(SD_LOG_SDTEST, un, 28886 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 28887 28888 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28889 28890 sd_fault_injection_on = 0; 28891 28892 if (un->sd_fi_fifo_un[i] != NULL) { 28893 kmem_free(un->sd_fi_fifo_un[i], 28894 sizeof (struct sd_fi_un)); 28895 un->sd_fi_fifo_un[i] = NULL; 28896 } 28897 if (arg != NULL) { 28898 un->sd_fi_fifo_un[i] = 28899 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 28900 if (un->sd_fi_fifo_un[i] == NULL) { 28901 /* Alloc failed don't store anything */ 28902 break; 28903 } 28904 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 28905 sizeof (struct sd_fi_un), 0); 28906 if (rval == -1) { 28907 kmem_free(un->sd_fi_fifo_un[i], 28908 sizeof (struct sd_fi_un)); 28909 un->sd_fi_fifo_un[i] = NULL; 28910 } 28911 28912 } else { 28913 SD_INFO(SD_LOG_IOERR, un, 28914 "sd_faultinjection_ioctl: un null\n"); 28915 } 28916 28917 break; 28918 28919 case SDIOCINSERTARQ: 28920 /* Store a arq struct to be pushed onto fifo */ 28921 SD_INFO(SD_LOG_SDTEST, un, 28922 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 28923 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28924 28925 sd_fault_injection_on = 0; 28926 28927 if (un->sd_fi_fifo_arq[i] != NULL) { 28928 kmem_free(un->sd_fi_fifo_arq[i], 28929 sizeof (struct sd_fi_arq)); 28930 un->sd_fi_fifo_arq[i] = NULL; 28931 } 28932 if (arg != NULL) { 28933 un->sd_fi_fifo_arq[i] = 28934 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 28935 if (un->sd_fi_fifo_arq[i] == NULL) { 28936 /* Alloc failed don't store anything */ 28937 break; 28938 } 28939 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 28940 sizeof (struct sd_fi_arq), 0); 28941 if (rval == -1) { 28942 kmem_free(un->sd_fi_fifo_arq[i], 28943 sizeof (struct sd_fi_arq)); 28944 un->sd_fi_fifo_arq[i] = NULL; 28945 } 28946 28947 } else { 28948 SD_INFO(SD_LOG_IOERR, un, 28949 "sd_faultinjection_ioctl: arq null\n"); 28950 } 28951 28952 break; 28953 28954 case SDIOCPUSH: 28955 /* Push stored xb, pkt, un, and arq onto fifo */ 28956 sd_fault_injection_on = 0; 28957 28958 if (arg != NULL) { 28959 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 28960 if (rval != -1 && 28961 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28962 un->sd_fi_fifo_end += i; 28963 } 28964 } else { 28965 SD_INFO(SD_LOG_IOERR, un, 28966 "sd_faultinjection_ioctl: push arg null\n"); 28967 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28968 un->sd_fi_fifo_end++; 28969 } 28970 } 28971 SD_INFO(SD_LOG_IOERR, un, 28972 "sd_faultinjection_ioctl: push to end=%d\n", 28973 un->sd_fi_fifo_end); 28974 break; 28975 28976 case SDIOCRETRIEVE: 28977 /* Return buffer of log from Injection session */ 28978 SD_INFO(SD_LOG_SDTEST, un, 28979 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 28980 28981 sd_fault_injection_on = 0; 28982 28983 mutex_enter(&(un->un_fi_mutex)); 28984 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 28985 un->sd_fi_buf_len+1, 0); 28986 mutex_exit(&(un->un_fi_mutex)); 28987 28988 if (rval == -1) { 28989 /* 28990 * arg is possibly invalid setting 28991 * it to NULL for return 28992 */ 28993 arg = NULL; 28994 } 28995 break; 28996 } 28997 28998 mutex_exit(SD_MUTEX(un)); 28999 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29000 " exit\n"); 29001 } 29002 29003 29004 /* 29005 * Function: sd_injection_log() 29006 * 29007 * Description: This routine adds buff to the already existing injection log 29008 * for retrieval via faultinjection_ioctl for use in fault 29009 * detection and recovery 29010 * 29011 * Arguments: buf - the string to add to the log 29012 */ 29013 29014 static void 29015 sd_injection_log(char *buf, struct sd_lun *un) 29016 { 29017 uint_t len; 29018 29019 ASSERT(un != NULL); 29020 ASSERT(buf != NULL); 29021 29022 mutex_enter(&(un->un_fi_mutex)); 29023 29024 len = min(strlen(buf), 255); 29025 /* Add logged value to Injection log to be returned later */ 29026 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29027 uint_t offset = strlen((char *)un->sd_fi_log); 29028 char *destp = (char *)un->sd_fi_log + offset; 29029 int i; 29030 for (i = 0; i < len; i++) { 29031 *destp++ = *buf++; 29032 } 29033 un->sd_fi_buf_len += len; 29034 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29035 } 29036 29037 mutex_exit(&(un->un_fi_mutex)); 29038 } 29039 29040 29041 /* 29042 * Function: sd_faultinjection() 29043 * 29044 * Description: This routine takes the pkt and changes its 29045 * content based on error injection scenerio. 29046 * 29047 * Arguments: pktp - packet to be changed 29048 */ 29049 29050 static void 29051 sd_faultinjection(struct scsi_pkt *pktp) 29052 { 29053 uint_t i; 29054 struct sd_fi_pkt *fi_pkt; 29055 struct sd_fi_xb *fi_xb; 29056 struct sd_fi_un *fi_un; 29057 struct sd_fi_arq *fi_arq; 29058 struct buf *bp; 29059 struct sd_xbuf *xb; 29060 struct sd_lun *un; 29061 29062 ASSERT(pktp != NULL); 29063 29064 /* pull bp xb and un from pktp */ 29065 bp = (struct buf *)pktp->pkt_private; 29066 xb = SD_GET_XBUF(bp); 29067 un = SD_GET_UN(bp); 29068 29069 ASSERT(un != NULL); 29070 29071 mutex_enter(SD_MUTEX(un)); 29072 29073 SD_TRACE(SD_LOG_SDTEST, un, 29074 "sd_faultinjection: entry Injection from sdintr\n"); 29075 29076 /* if injection is off return */ 29077 if (sd_fault_injection_on == 0 || 29078 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29079 mutex_exit(SD_MUTEX(un)); 29080 return; 29081 } 29082 29083 SD_INFO(SD_LOG_SDTEST, un, 29084 "sd_faultinjection: is working for copying\n"); 29085 29086 /* take next set off fifo */ 29087 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29088 29089 fi_pkt = un->sd_fi_fifo_pkt[i]; 29090 fi_xb = un->sd_fi_fifo_xb[i]; 29091 fi_un = un->sd_fi_fifo_un[i]; 29092 fi_arq = un->sd_fi_fifo_arq[i]; 29093 29094 29095 /* set variables accordingly */ 29096 /* set pkt if it was on fifo */ 29097 if (fi_pkt != NULL) { 29098 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29099 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29100 if (fi_pkt->pkt_cdbp != 0xff) 29101 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29102 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29103 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29104 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29105 29106 } 29107 /* set xb if it was on fifo */ 29108 if (fi_xb != NULL) { 29109 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29110 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29111 if (fi_xb->xb_retry_count != 0) 29112 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29113 SD_CONDSET(xb, xb, xb_victim_retry_count, 29114 "xb_victim_retry_count"); 29115 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29116 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29117 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29118 29119 /* copy in block data from sense */ 29120 /* 29121 * if (fi_xb->xb_sense_data[0] != -1) { 29122 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29123 * SENSE_LENGTH); 29124 * } 29125 */ 29126 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29127 29128 /* copy in extended sense codes */ 29129 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29130 xb, es_code, "es_code"); 29131 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29132 xb, es_key, "es_key"); 29133 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29134 xb, es_add_code, "es_add_code"); 29135 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29136 xb, es_qual_code, "es_qual_code"); 29137 struct scsi_extended_sense *esp; 29138 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29139 esp->es_class = CLASS_EXTENDED_SENSE; 29140 } 29141 29142 /* set un if it was on fifo */ 29143 if (fi_un != NULL) { 29144 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29145 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29146 SD_CONDSET(un, un, un_reset_retry_count, 29147 "un_reset_retry_count"); 29148 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29149 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29150 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29151 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29152 "un_f_allow_bus_device_reset"); 29153 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29154 29155 } 29156 29157 /* copy in auto request sense if it was on fifo */ 29158 if (fi_arq != NULL) { 29159 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29160 } 29161 29162 /* free structs */ 29163 if (un->sd_fi_fifo_pkt[i] != NULL) { 29164 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29165 } 29166 if (un->sd_fi_fifo_xb[i] != NULL) { 29167 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29168 } 29169 if (un->sd_fi_fifo_un[i] != NULL) { 29170 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29171 } 29172 if (un->sd_fi_fifo_arq[i] != NULL) { 29173 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29174 } 29175 29176 /* 29177 * kmem_free does not gurantee to set to NULL 29178 * since we uses these to determine if we set 29179 * values or not lets confirm they are always 29180 * NULL after free 29181 */ 29182 un->sd_fi_fifo_pkt[i] = NULL; 29183 un->sd_fi_fifo_un[i] = NULL; 29184 un->sd_fi_fifo_xb[i] = NULL; 29185 un->sd_fi_fifo_arq[i] = NULL; 29186 29187 un->sd_fi_fifo_start++; 29188 29189 mutex_exit(SD_MUTEX(un)); 29190 29191 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29192 } 29193 29194 #endif /* SD_FAULT_INJECTION */ 29195 29196 /* 29197 * This routine is invoked in sd_unit_attach(). Before calling it, the 29198 * properties in conf file should be processed already, and "hotpluggable" 29199 * property was processed also. 29200 * 29201 * The sd driver distinguishes 3 different type of devices: removable media, 29202 * non-removable media, and hotpluggable. Below the differences are defined: 29203 * 29204 * 1. Device ID 29205 * 29206 * The device ID of a device is used to identify this device. Refer to 29207 * ddi_devid_register(9F). 29208 * 29209 * For a non-removable media disk device which can provide 0x80 or 0x83 29210 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29211 * device ID is created to identify this device. For other non-removable 29212 * media devices, a default device ID is created only if this device has 29213 * at least 2 alter cylinders. Otherwise, this device has no devid. 29214 * 29215 * ------------------------------------------------------- 29216 * removable media hotpluggable | Can Have Device ID 29217 * ------------------------------------------------------- 29218 * false false | Yes 29219 * false true | Yes 29220 * true x | No 29221 * ------------------------------------------------------ 29222 * 29223 * 29224 * 2. SCSI group 4 commands 29225 * 29226 * In SCSI specs, only some commands in group 4 command set can use 29227 * 8-byte addresses that can be used to access >2TB storage spaces. 29228 * Other commands have no such capability. Without supporting group4, 29229 * it is impossible to make full use of storage spaces of a disk with 29230 * capacity larger than 2TB. 29231 * 29232 * ----------------------------------------------- 29233 * removable media hotpluggable LP64 | Group 29234 * ----------------------------------------------- 29235 * false false false | 1 29236 * false false true | 4 29237 * false true false | 1 29238 * false true true | 4 29239 * true x x | 5 29240 * ----------------------------------------------- 29241 * 29242 * 29243 * 3. Check for VTOC Label 29244 * 29245 * If a direct-access disk has no EFI label, sd will check if it has a 29246 * valid VTOC label. Now, sd also does that check for removable media 29247 * and hotpluggable devices. 29248 * 29249 * -------------------------------------------------------------- 29250 * Direct-Access removable media hotpluggable | Check Label 29251 * ------------------------------------------------------------- 29252 * false false false | No 29253 * false false true | No 29254 * false true false | Yes 29255 * false true true | Yes 29256 * true x x | Yes 29257 * -------------------------------------------------------------- 29258 * 29259 * 29260 * 4. Building default VTOC label 29261 * 29262 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29263 * If those devices have no valid VTOC label, sd(7d) will attempt to 29264 * create default VTOC for them. Currently sd creates default VTOC label 29265 * for all devices on x86 platform (VTOC_16), but only for removable 29266 * media devices on SPARC (VTOC_8). 29267 * 29268 * ----------------------------------------------------------- 29269 * removable media hotpluggable platform | Default Label 29270 * ----------------------------------------------------------- 29271 * false false sparc | No 29272 * false true x86 | Yes 29273 * false true sparc | Yes 29274 * true x x | Yes 29275 * ---------------------------------------------------------- 29276 * 29277 * 29278 * 5. Supported blocksizes of target devices 29279 * 29280 * Sd supports non-512-byte blocksize for removable media devices only. 29281 * For other devices, only 512-byte blocksize is supported. This may be 29282 * changed in near future because some RAID devices require non-512-byte 29283 * blocksize 29284 * 29285 * ----------------------------------------------------------- 29286 * removable media hotpluggable | non-512-byte blocksize 29287 * ----------------------------------------------------------- 29288 * false false | No 29289 * false true | No 29290 * true x | Yes 29291 * ----------------------------------------------------------- 29292 * 29293 * 29294 * 6. Automatic mount & unmount 29295 * 29296 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29297 * if a device is removable media device. It return 1 for removable media 29298 * devices, and 0 for others. 29299 * 29300 * The automatic mounting subsystem should distinguish between the types 29301 * of devices and apply automounting policies to each. 29302 * 29303 * 29304 * 7. fdisk partition management 29305 * 29306 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29307 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29308 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29309 * fdisk partitions on both x86 and SPARC platform. 29310 * 29311 * ----------------------------------------------------------- 29312 * platform removable media USB/1394 | fdisk supported 29313 * ----------------------------------------------------------- 29314 * x86 X X | true 29315 * ------------------------------------------------------------ 29316 * sparc X X | false 29317 * ------------------------------------------------------------ 29318 * 29319 * 29320 * 8. MBOOT/MBR 29321 * 29322 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29323 * read/write mboot for removable media devices on sparc platform. 29324 * 29325 * ----------------------------------------------------------- 29326 * platform removable media USB/1394 | mboot supported 29327 * ----------------------------------------------------------- 29328 * x86 X X | true 29329 * ------------------------------------------------------------ 29330 * sparc false false | false 29331 * sparc false true | true 29332 * sparc true false | true 29333 * sparc true true | true 29334 * ------------------------------------------------------------ 29335 * 29336 * 29337 * 9. error handling during opening device 29338 * 29339 * If failed to open a disk device, an errno is returned. For some kinds 29340 * of errors, different errno is returned depending on if this device is 29341 * a removable media device. This brings USB/1394 hard disks in line with 29342 * expected hard disk behavior. It is not expected that this breaks any 29343 * application. 29344 * 29345 * ------------------------------------------------------ 29346 * removable media hotpluggable | errno 29347 * ------------------------------------------------------ 29348 * false false | EIO 29349 * false true | EIO 29350 * true x | ENXIO 29351 * ------------------------------------------------------ 29352 * 29353 * 29354 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29355 * 29356 * These IOCTLs are applicable only to removable media devices. 29357 * 29358 * ----------------------------------------------------------- 29359 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29360 * ----------------------------------------------------------- 29361 * false false | No 29362 * false true | No 29363 * true x | Yes 29364 * ----------------------------------------------------------- 29365 * 29366 * 29367 * 12. Kstats for partitions 29368 * 29369 * sd creates partition kstat for non-removable media devices. USB and 29370 * Firewire hard disks now have partition kstats 29371 * 29372 * ------------------------------------------------------ 29373 * removable media hotpluggable | kstat 29374 * ------------------------------------------------------ 29375 * false false | Yes 29376 * false true | Yes 29377 * true x | No 29378 * ------------------------------------------------------ 29379 * 29380 * 29381 * 13. Removable media & hotpluggable properties 29382 * 29383 * Sd driver creates a "removable-media" property for removable media 29384 * devices. Parent nexus drivers create a "hotpluggable" property if 29385 * it supports hotplugging. 29386 * 29387 * --------------------------------------------------------------------- 29388 * removable media hotpluggable | "removable-media" " hotpluggable" 29389 * --------------------------------------------------------------------- 29390 * false false | No No 29391 * false true | No Yes 29392 * true false | Yes No 29393 * true true | Yes Yes 29394 * --------------------------------------------------------------------- 29395 * 29396 * 29397 * 14. Power Management 29398 * 29399 * sd only power manages removable media devices or devices that support 29400 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29401 * 29402 * A parent nexus that supports hotplugging can also set "pm-capable" 29403 * if the disk can be power managed. 29404 * 29405 * ------------------------------------------------------------ 29406 * removable media hotpluggable pm-capable | power manage 29407 * ------------------------------------------------------------ 29408 * false false false | No 29409 * false false true | Yes 29410 * false true false | No 29411 * false true true | Yes 29412 * true x x | Yes 29413 * ------------------------------------------------------------ 29414 * 29415 * USB and firewire hard disks can now be power managed independently 29416 * of the framebuffer 29417 * 29418 * 29419 * 15. Support for USB disks with capacity larger than 1TB 29420 * 29421 * Currently, sd doesn't permit a fixed disk device with capacity 29422 * larger than 1TB to be used in a 32-bit operating system environment. 29423 * However, sd doesn't do that for removable media devices. Instead, it 29424 * assumes that removable media devices cannot have a capacity larger 29425 * than 1TB. Therefore, using those devices on 32-bit system is partially 29426 * supported, which can cause some unexpected results. 29427 * 29428 * --------------------------------------------------------------------- 29429 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29430 * --------------------------------------------------------------------- 29431 * false false | true | no 29432 * false true | true | no 29433 * true false | true | Yes 29434 * true true | true | Yes 29435 * --------------------------------------------------------------------- 29436 * 29437 * 29438 * 16. Check write-protection at open time 29439 * 29440 * When a removable media device is being opened for writing without NDELAY 29441 * flag, sd will check if this device is writable. If attempting to open 29442 * without NDELAY flag a write-protected device, this operation will abort. 29443 * 29444 * ------------------------------------------------------------ 29445 * removable media USB/1394 | WP Check 29446 * ------------------------------------------------------------ 29447 * false false | No 29448 * false true | No 29449 * true false | Yes 29450 * true true | Yes 29451 * ------------------------------------------------------------ 29452 * 29453 * 29454 * 17. syslog when corrupted VTOC is encountered 29455 * 29456 * Currently, if an invalid VTOC is encountered, sd only print syslog 29457 * for fixed SCSI disks. 29458 * ------------------------------------------------------------ 29459 * removable media USB/1394 | print syslog 29460 * ------------------------------------------------------------ 29461 * false false | Yes 29462 * false true | No 29463 * true false | No 29464 * true true | No 29465 * ------------------------------------------------------------ 29466 */ 29467 static void 29468 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29469 { 29470 int pm_capable_prop; 29471 29472 ASSERT(un->un_sd); 29473 ASSERT(un->un_sd->sd_inq); 29474 29475 /* 29476 * Enable SYNC CACHE support for all devices. 29477 */ 29478 un->un_f_sync_cache_supported = TRUE; 29479 29480 /* 29481 * Set the sync cache required flag to false. 29482 * This would ensure that there is no SYNC CACHE 29483 * sent when there are no writes 29484 */ 29485 un->un_f_sync_cache_required = FALSE; 29486 29487 if (un->un_sd->sd_inq->inq_rmb) { 29488 /* 29489 * The media of this device is removable. And for this kind 29490 * of devices, it is possible to change medium after opening 29491 * devices. Thus we should support this operation. 29492 */ 29493 un->un_f_has_removable_media = TRUE; 29494 29495 /* 29496 * support non-512-byte blocksize of removable media devices 29497 */ 29498 un->un_f_non_devbsize_supported = TRUE; 29499 29500 /* 29501 * Assume that all removable media devices support DOOR_LOCK 29502 */ 29503 un->un_f_doorlock_supported = TRUE; 29504 29505 /* 29506 * For a removable media device, it is possible to be opened 29507 * with NDELAY flag when there is no media in drive, in this 29508 * case we don't care if device is writable. But if without 29509 * NDELAY flag, we need to check if media is write-protected. 29510 */ 29511 un->un_f_chk_wp_open = TRUE; 29512 29513 /* 29514 * need to start a SCSI watch thread to monitor media state, 29515 * when media is being inserted or ejected, notify syseventd. 29516 */ 29517 un->un_f_monitor_media_state = TRUE; 29518 29519 /* 29520 * Some devices don't support START_STOP_UNIT command. 29521 * Therefore, we'd better check if a device supports it 29522 * before sending it. 29523 */ 29524 un->un_f_check_start_stop = TRUE; 29525 29526 /* 29527 * support eject media ioctl: 29528 * FDEJECT, DKIOCEJECT, CDROMEJECT 29529 */ 29530 un->un_f_eject_media_supported = TRUE; 29531 29532 /* 29533 * Because many removable-media devices don't support 29534 * LOG_SENSE, we couldn't use this command to check if 29535 * a removable media device support power-management. 29536 * We assume that they support power-management via 29537 * START_STOP_UNIT command and can be spun up and down 29538 * without limitations. 29539 */ 29540 un->un_f_pm_supported = TRUE; 29541 29542 /* 29543 * Need to create a zero length (Boolean) property 29544 * removable-media for the removable media devices. 29545 * Note that the return value of the property is not being 29546 * checked, since if unable to create the property 29547 * then do not want the attach to fail altogether. Consistent 29548 * with other property creation in attach. 29549 */ 29550 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29551 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29552 29553 } else { 29554 /* 29555 * create device ID for device 29556 */ 29557 un->un_f_devid_supported = TRUE; 29558 29559 /* 29560 * Spin up non-removable-media devices once it is attached 29561 */ 29562 un->un_f_attach_spinup = TRUE; 29563 29564 /* 29565 * According to SCSI specification, Sense data has two kinds of 29566 * format: fixed format, and descriptor format. At present, we 29567 * don't support descriptor format sense data for removable 29568 * media. 29569 */ 29570 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29571 un->un_f_descr_format_supported = TRUE; 29572 } 29573 29574 /* 29575 * kstats are created only for non-removable media devices. 29576 * 29577 * Set this in sd.conf to 0 in order to disable kstats. The 29578 * default is 1, so they are enabled by default. 29579 */ 29580 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29581 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29582 "enable-partition-kstats", 1)); 29583 29584 /* 29585 * Check if HBA has set the "pm-capable" property. 29586 * If "pm-capable" exists and is non-zero then we can 29587 * power manage the device without checking the start/stop 29588 * cycle count log sense page. 29589 * 29590 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29591 * then we should not power manage the device. 29592 * 29593 * If "pm-capable" doesn't exist then pm_capable_prop will 29594 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29595 * sd will check the start/stop cycle count log sense page 29596 * and power manage the device if the cycle count limit has 29597 * not been exceeded. 29598 */ 29599 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29600 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29601 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29602 un->un_f_log_sense_supported = TRUE; 29603 } else { 29604 /* 29605 * pm-capable property exists. 29606 * 29607 * Convert "TRUE" values for pm_capable_prop to 29608 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29609 * later. "TRUE" values are any values except 29610 * SD_PM_CAPABLE_FALSE (0) and 29611 * SD_PM_CAPABLE_UNDEFINED (-1) 29612 */ 29613 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29614 un->un_f_log_sense_supported = FALSE; 29615 } else { 29616 un->un_f_pm_supported = TRUE; 29617 } 29618 29619 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29620 "sd_unit_attach: un:0x%p pm-capable " 29621 "property set to %d.\n", un, un->un_f_pm_supported); 29622 } 29623 } 29624 29625 if (un->un_f_is_hotpluggable) { 29626 29627 /* 29628 * Have to watch hotpluggable devices as well, since 29629 * that's the only way for userland applications to 29630 * detect hot removal while device is busy/mounted. 29631 */ 29632 un->un_f_monitor_media_state = TRUE; 29633 29634 un->un_f_check_start_stop = TRUE; 29635 29636 } 29637 } 29638 29639 /* 29640 * sd_tg_rdwr: 29641 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29642 * in sys block size, req_length in bytes. 29643 * 29644 */ 29645 static int 29646 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29647 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29648 { 29649 struct sd_lun *un; 29650 int path_flag = (int)(uintptr_t)tg_cookie; 29651 char *dkl = NULL; 29652 diskaddr_t real_addr = start_block; 29653 diskaddr_t first_byte, end_block; 29654 29655 size_t buffer_size = reqlength; 29656 int rval = 0; 29657 diskaddr_t cap; 29658 uint32_t lbasize; 29659 sd_ssc_t *ssc; 29660 29661 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29662 if (un == NULL) 29663 return (ENXIO); 29664 29665 if (cmd != TG_READ && cmd != TG_WRITE) 29666 return (EINVAL); 29667 29668 ssc = sd_ssc_init(un); 29669 mutex_enter(SD_MUTEX(un)); 29670 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29671 mutex_exit(SD_MUTEX(un)); 29672 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29673 &lbasize, path_flag); 29674 if (rval != 0) 29675 goto done1; 29676 mutex_enter(SD_MUTEX(un)); 29677 sd_update_block_info(un, lbasize, cap); 29678 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29679 mutex_exit(SD_MUTEX(un)); 29680 rval = EIO; 29681 goto done; 29682 } 29683 } 29684 29685 if (NOT_DEVBSIZE(un)) { 29686 /* 29687 * sys_blocksize != tgt_blocksize, need to re-adjust 29688 * blkno and save the index to beginning of dk_label 29689 */ 29690 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29691 real_addr = first_byte / un->un_tgt_blocksize; 29692 29693 end_block = (first_byte + reqlength + 29694 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29695 29696 /* round up buffer size to multiple of target block size */ 29697 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29698 29699 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29700 "label_addr: 0x%x allocation size: 0x%x\n", 29701 real_addr, buffer_size); 29702 29703 if (((first_byte % un->un_tgt_blocksize) != 0) || 29704 (reqlength % un->un_tgt_blocksize) != 0) 29705 /* the request is not aligned */ 29706 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29707 } 29708 29709 /* 29710 * The MMC standard allows READ CAPACITY to be 29711 * inaccurate by a bounded amount (in the interest of 29712 * response latency). As a result, failed READs are 29713 * commonplace (due to the reading of metadata and not 29714 * data). Depending on the per-Vendor/drive Sense data, 29715 * the failed READ can cause many (unnecessary) retries. 29716 */ 29717 29718 if (ISCD(un) && (cmd == TG_READ) && 29719 (un->un_f_blockcount_is_valid == TRUE) && 29720 ((start_block == (un->un_blockcount - 1))|| 29721 (start_block == (un->un_blockcount - 2)))) { 29722 path_flag = SD_PATH_DIRECT_PRIORITY; 29723 } 29724 29725 mutex_exit(SD_MUTEX(un)); 29726 if (cmd == TG_READ) { 29727 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29728 buffer_size, real_addr, path_flag); 29729 if (dkl != NULL) 29730 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29731 real_addr), bufaddr, reqlength); 29732 } else { 29733 if (dkl) { 29734 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29735 real_addr, path_flag); 29736 if (rval) { 29737 goto done1; 29738 } 29739 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29740 real_addr), reqlength); 29741 } 29742 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29743 buffer_size, real_addr, path_flag); 29744 } 29745 29746 done1: 29747 if (dkl != NULL) 29748 kmem_free(dkl, buffer_size); 29749 29750 if (rval != 0) { 29751 if (rval == EIO) 29752 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29753 else 29754 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29755 } 29756 done: 29757 sd_ssc_fini(ssc); 29758 return (rval); 29759 } 29760 29761 29762 static int 29763 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29764 { 29765 29766 struct sd_lun *un; 29767 diskaddr_t cap; 29768 uint32_t lbasize; 29769 int path_flag = (int)(uintptr_t)tg_cookie; 29770 int ret = 0; 29771 29772 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29773 if (un == NULL) 29774 return (ENXIO); 29775 29776 switch (cmd) { 29777 case TG_GETPHYGEOM: 29778 case TG_GETVIRTGEOM: 29779 case TG_GETCAPACITY: 29780 case TG_GETBLOCKSIZE: 29781 mutex_enter(SD_MUTEX(un)); 29782 29783 if ((un->un_f_blockcount_is_valid == TRUE) && 29784 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29785 cap = un->un_blockcount; 29786 lbasize = un->un_tgt_blocksize; 29787 mutex_exit(SD_MUTEX(un)); 29788 } else { 29789 sd_ssc_t *ssc; 29790 mutex_exit(SD_MUTEX(un)); 29791 ssc = sd_ssc_init(un); 29792 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29793 &lbasize, path_flag); 29794 if (ret != 0) { 29795 if (ret == EIO) 29796 sd_ssc_assessment(ssc, 29797 SD_FMT_STATUS_CHECK); 29798 else 29799 sd_ssc_assessment(ssc, 29800 SD_FMT_IGNORE); 29801 sd_ssc_fini(ssc); 29802 return (ret); 29803 } 29804 sd_ssc_fini(ssc); 29805 mutex_enter(SD_MUTEX(un)); 29806 sd_update_block_info(un, lbasize, cap); 29807 if ((un->un_f_blockcount_is_valid == FALSE) || 29808 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 29809 mutex_exit(SD_MUTEX(un)); 29810 return (EIO); 29811 } 29812 mutex_exit(SD_MUTEX(un)); 29813 } 29814 29815 if (cmd == TG_GETCAPACITY) { 29816 *(diskaddr_t *)arg = cap; 29817 return (0); 29818 } 29819 29820 if (cmd == TG_GETBLOCKSIZE) { 29821 *(uint32_t *)arg = lbasize; 29822 return (0); 29823 } 29824 29825 if (cmd == TG_GETPHYGEOM) 29826 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 29827 cap, lbasize, path_flag); 29828 else 29829 /* TG_GETVIRTGEOM */ 29830 ret = sd_get_virtual_geometry(un, 29831 (cmlb_geom_t *)arg, cap, lbasize); 29832 29833 return (ret); 29834 29835 case TG_GETATTR: 29836 mutex_enter(SD_MUTEX(un)); 29837 ((tg_attribute_t *)arg)->media_is_writable = 29838 un->un_f_mmc_writable_media; 29839 mutex_exit(SD_MUTEX(un)); 29840 return (0); 29841 default: 29842 return (ENOTTY); 29843 29844 } 29845 } 29846 29847 /* 29848 * Function: sd_ssc_ereport_post 29849 * 29850 * Description: Will be called when SD driver need to post an ereport. 29851 * 29852 * Context: Kernel thread or interrupt context. 29853 */ 29854 static void 29855 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 29856 { 29857 int uscsi_path_instance = 0; 29858 uchar_t uscsi_pkt_reason; 29859 uint32_t uscsi_pkt_state; 29860 uint32_t uscsi_pkt_statistics; 29861 uint64_t uscsi_ena; 29862 uchar_t op_code; 29863 uint8_t *sensep; 29864 union scsi_cdb *cdbp; 29865 uint_t cdblen = 0; 29866 uint_t senlen = 0; 29867 struct sd_lun *un; 29868 dev_info_t *dip; 29869 char *devid; 29870 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 29871 SSC_FLAGS_INVALID_STATUS | 29872 SSC_FLAGS_INVALID_SENSE | 29873 SSC_FLAGS_INVALID_DATA; 29874 char assessment[16]; 29875 29876 ASSERT(ssc != NULL); 29877 ASSERT(ssc->ssc_uscsi_cmd != NULL); 29878 ASSERT(ssc->ssc_uscsi_info != NULL); 29879 29880 un = ssc->ssc_un; 29881 ASSERT(un != NULL); 29882 29883 dip = un->un_sd->sd_dev; 29884 29885 /* 29886 * Get the devid: 29887 * devid will only be passed to non-transport error reports. 29888 */ 29889 devid = DEVI(dip)->devi_devid_str; 29890 29891 /* 29892 * If we are syncing or dumping, the command will not be executed 29893 * so we bypass this situation. 29894 */ 29895 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 29896 (un->un_state == SD_STATE_DUMPING)) 29897 return; 29898 29899 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 29900 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 29901 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 29902 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 29903 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 29904 29905 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 29906 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 29907 29908 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 29909 if (cdbp == NULL) { 29910 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29911 "sd_ssc_ereport_post meet empty cdb\n"); 29912 return; 29913 } 29914 29915 op_code = cdbp->scc_cmd; 29916 29917 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 29918 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 29919 ssc->ssc_uscsi_cmd->uscsi_rqresid); 29920 29921 if (senlen > 0) 29922 ASSERT(sensep != NULL); 29923 29924 /* 29925 * Initialize drv_assess to corresponding values. 29926 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 29927 * on the sense-key returned back. 29928 */ 29929 switch (drv_assess) { 29930 case SD_FM_DRV_RECOVERY: 29931 (void) sprintf(assessment, "%s", "recovered"); 29932 break; 29933 case SD_FM_DRV_RETRY: 29934 (void) sprintf(assessment, "%s", "retry"); 29935 break; 29936 case SD_FM_DRV_NOTICE: 29937 (void) sprintf(assessment, "%s", "info"); 29938 break; 29939 case SD_FM_DRV_FATAL: 29940 default: 29941 (void) sprintf(assessment, "%s", "unknown"); 29942 } 29943 /* 29944 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 29945 * command, we will post ereport.io.scsi.cmd.disk.recovered. 29946 * driver-assessment will always be "recovered" here. 29947 */ 29948 if (drv_assess == SD_FM_DRV_RECOVERY) { 29949 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29950 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 29951 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29952 "driver-assessment", DATA_TYPE_STRING, assessment, 29953 "op-code", DATA_TYPE_UINT8, op_code, 29954 "cdb", DATA_TYPE_UINT8_ARRAY, 29955 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29956 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29957 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 29958 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 29959 NULL); 29960 return; 29961 } 29962 29963 /* 29964 * If there is un-expected/un-decodable data, we should post 29965 * ereport.io.scsi.cmd.disk.dev.uderr. 29966 * driver-assessment will be set based on parameter drv_assess. 29967 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 29968 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 29969 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 29970 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 29971 */ 29972 if (ssc->ssc_flags & ssc_invalid_flags) { 29973 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 29974 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29975 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 29976 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29977 "driver-assessment", DATA_TYPE_STRING, 29978 drv_assess == SD_FM_DRV_FATAL ? 29979 "fail" : assessment, 29980 "op-code", DATA_TYPE_UINT8, op_code, 29981 "cdb", DATA_TYPE_UINT8_ARRAY, 29982 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29983 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29984 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 29985 "pkt-stats", DATA_TYPE_UINT32, 29986 uscsi_pkt_statistics, 29987 "stat-code", DATA_TYPE_UINT8, 29988 ssc->ssc_uscsi_cmd->uscsi_status, 29989 "un-decode-info", DATA_TYPE_STRING, 29990 ssc->ssc_info, 29991 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 29992 senlen, sensep, 29993 NULL); 29994 } else { 29995 /* 29996 * For other type of invalid data, the 29997 * un-decode-value field would be empty because the 29998 * un-decodable content could be seen from upper 29999 * level payload or inside un-decode-info. 30000 */ 30001 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30002 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30003 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30004 "driver-assessment", DATA_TYPE_STRING, 30005 drv_assess == SD_FM_DRV_FATAL ? 30006 "fail" : assessment, 30007 "op-code", DATA_TYPE_UINT8, op_code, 30008 "cdb", DATA_TYPE_UINT8_ARRAY, 30009 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30010 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30011 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30012 "pkt-stats", DATA_TYPE_UINT32, 30013 uscsi_pkt_statistics, 30014 "stat-code", DATA_TYPE_UINT8, 30015 ssc->ssc_uscsi_cmd->uscsi_status, 30016 "un-decode-info", DATA_TYPE_STRING, 30017 ssc->ssc_info, 30018 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30019 0, NULL, 30020 NULL); 30021 } 30022 ssc->ssc_flags &= ~ssc_invalid_flags; 30023 return; 30024 } 30025 30026 if (uscsi_pkt_reason != CMD_CMPLT || 30027 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30028 /* 30029 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30030 * set inside sd_start_cmds due to errors(bad packet or 30031 * fatal transport error), we should take it as a 30032 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30033 * driver-assessment will be set based on drv_assess. 30034 * We will set devid to NULL because it is a transport 30035 * error. 30036 */ 30037 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30038 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30039 30040 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30041 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30042 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30043 "driver-assessment", DATA_TYPE_STRING, 30044 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30045 "op-code", DATA_TYPE_UINT8, op_code, 30046 "cdb", DATA_TYPE_UINT8_ARRAY, 30047 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30048 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30049 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30050 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30051 NULL); 30052 } else { 30053 /* 30054 * If we got here, we have a completed command, and we need 30055 * to further investigate the sense data to see what kind 30056 * of ereport we should post. 30057 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30058 * if sense-key == 0x3. 30059 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30060 * driver-assessment will be set based on the parameter 30061 * drv_assess. 30062 */ 30063 if (senlen > 0) { 30064 /* 30065 * Here we have sense data available. 30066 */ 30067 uint8_t sense_key; 30068 sense_key = scsi_sense_key(sensep); 30069 if (sense_key == 0x3) { 30070 /* 30071 * sense-key == 0x3(medium error), 30072 * driver-assessment should be "fatal" if 30073 * drv_assess is SD_FM_DRV_FATAL. 30074 */ 30075 scsi_fm_ereport_post(un->un_sd, 30076 uscsi_path_instance, 30077 "cmd.disk.dev.rqs.merr", 30078 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30079 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30080 "driver-assessment", 30081 DATA_TYPE_STRING, 30082 drv_assess == SD_FM_DRV_FATAL ? 30083 "fatal" : assessment, 30084 "op-code", 30085 DATA_TYPE_UINT8, op_code, 30086 "cdb", 30087 DATA_TYPE_UINT8_ARRAY, cdblen, 30088 ssc->ssc_uscsi_cmd->uscsi_cdb, 30089 "pkt-reason", 30090 DATA_TYPE_UINT8, uscsi_pkt_reason, 30091 "pkt-state", 30092 DATA_TYPE_UINT8, uscsi_pkt_state, 30093 "pkt-stats", 30094 DATA_TYPE_UINT32, 30095 uscsi_pkt_statistics, 30096 "stat-code", 30097 DATA_TYPE_UINT8, 30098 ssc->ssc_uscsi_cmd->uscsi_status, 30099 "key", 30100 DATA_TYPE_UINT8, 30101 scsi_sense_key(sensep), 30102 "asc", 30103 DATA_TYPE_UINT8, 30104 scsi_sense_asc(sensep), 30105 "ascq", 30106 DATA_TYPE_UINT8, 30107 scsi_sense_ascq(sensep), 30108 "sense-data", 30109 DATA_TYPE_UINT8_ARRAY, 30110 senlen, sensep, 30111 "lba", 30112 DATA_TYPE_UINT64, 30113 ssc->ssc_uscsi_info->ui_lba, 30114 NULL); 30115 } else { 30116 /* 30117 * if sense-key == 0x4(hardware 30118 * error), driver-assessment should 30119 * be "fatal" if drv_assess is 30120 * SD_FM_DRV_FATAL. 30121 */ 30122 scsi_fm_ereport_post(un->un_sd, 30123 uscsi_path_instance, 30124 "cmd.disk.dev.rqs.derr", 30125 uscsi_ena, devid, DDI_NOSLEEP, 30126 FM_VERSION, 30127 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30128 "driver-assessment", 30129 DATA_TYPE_STRING, 30130 drv_assess == SD_FM_DRV_FATAL ? 30131 (sense_key == 0x4 ? 30132 "fatal" : "fail") : assessment, 30133 "op-code", 30134 DATA_TYPE_UINT8, op_code, 30135 "cdb", 30136 DATA_TYPE_UINT8_ARRAY, cdblen, 30137 ssc->ssc_uscsi_cmd->uscsi_cdb, 30138 "pkt-reason", 30139 DATA_TYPE_UINT8, uscsi_pkt_reason, 30140 "pkt-state", 30141 DATA_TYPE_UINT8, uscsi_pkt_state, 30142 "pkt-stats", 30143 DATA_TYPE_UINT32, 30144 uscsi_pkt_statistics, 30145 "stat-code", 30146 DATA_TYPE_UINT8, 30147 ssc->ssc_uscsi_cmd->uscsi_status, 30148 "key", 30149 DATA_TYPE_UINT8, 30150 scsi_sense_key(sensep), 30151 "asc", 30152 DATA_TYPE_UINT8, 30153 scsi_sense_asc(sensep), 30154 "ascq", 30155 DATA_TYPE_UINT8, 30156 scsi_sense_ascq(sensep), 30157 "sense-data", 30158 DATA_TYPE_UINT8_ARRAY, 30159 senlen, sensep, 30160 NULL); 30161 } 30162 } else { 30163 /* 30164 * For stat_code == STATUS_GOOD, this is not a 30165 * hardware error. 30166 */ 30167 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 30168 return; 30169 30170 /* 30171 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30172 * stat-code but with sense data unavailable. 30173 * driver-assessment will be set based on parameter 30174 * drv_assess. 30175 */ 30176 scsi_fm_ereport_post(un->un_sd, 30177 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30178 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30179 FM_EREPORT_VERS0, 30180 "driver-assessment", DATA_TYPE_STRING, 30181 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30182 "op-code", DATA_TYPE_UINT8, op_code, 30183 "cdb", 30184 DATA_TYPE_UINT8_ARRAY, 30185 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30186 "pkt-reason", 30187 DATA_TYPE_UINT8, uscsi_pkt_reason, 30188 "pkt-state", 30189 DATA_TYPE_UINT8, uscsi_pkt_state, 30190 "pkt-stats", 30191 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30192 "stat-code", 30193 DATA_TYPE_UINT8, 30194 ssc->ssc_uscsi_cmd->uscsi_status, 30195 NULL); 30196 } 30197 } 30198 } 30199 30200 /* 30201 * Function: sd_ssc_extract_info 30202 * 30203 * Description: Extract information available to help generate ereport. 30204 * 30205 * Context: Kernel thread or interrupt context. 30206 */ 30207 static void 30208 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30209 struct buf *bp, struct sd_xbuf *xp) 30210 { 30211 size_t senlen = 0; 30212 union scsi_cdb *cdbp; 30213 int path_instance; 30214 /* 30215 * Need scsi_cdb_size array to determine the cdb length. 30216 */ 30217 extern uchar_t scsi_cdb_size[]; 30218 30219 ASSERT(un != NULL); 30220 ASSERT(pktp != NULL); 30221 ASSERT(bp != NULL); 30222 ASSERT(xp != NULL); 30223 ASSERT(ssc != NULL); 30224 ASSERT(mutex_owned(SD_MUTEX(un))); 30225 30226 /* 30227 * Transfer the cdb buffer pointer here. 30228 */ 30229 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30230 30231 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30232 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30233 30234 /* 30235 * Transfer the sense data buffer pointer if sense data is available, 30236 * calculate the sense data length first. 30237 */ 30238 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 30239 (xp->xb_sense_state & STATE_ARQ_DONE)) { 30240 /* 30241 * For arq case, we will enter here. 30242 */ 30243 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30244 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30245 } else { 30246 senlen = SENSE_LENGTH; 30247 } 30248 } else { 30249 /* 30250 * For non-arq case, we will enter this branch. 30251 */ 30252 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30253 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 30254 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30255 } 30256 30257 } 30258 30259 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30260 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30261 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30262 30263 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30264 30265 /* 30266 * Only transfer path_instance when scsi_pkt was properly allocated. 30267 */ 30268 path_instance = pktp->pkt_path_instance; 30269 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30270 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30271 else 30272 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30273 30274 /* 30275 * Copy in the other fields we may need when posting ereport. 30276 */ 30277 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30278 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30279 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30280 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30281 30282 /* 30283 * For partially read/write command, we will not create ena 30284 * in case of a successful command be reconized as recovered. 30285 */ 30286 if ((pktp->pkt_reason == CMD_CMPLT) && 30287 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 30288 (senlen == 0)) { 30289 return; 30290 } 30291 30292 /* 30293 * To associate ereports of a single command execution flow, we 30294 * need a shared ena for a specific command. 30295 */ 30296 if (xp->xb_ena == 0) 30297 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30298 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30299 } 30300