1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_IS_PARALLEL_SCSI(un) \ 779 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 780 #define SD_IS_SERIAL(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 782 783 /* 784 * Definitions used by device id registration routines 785 */ 786 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 787 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 788 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 789 790 static kmutex_t sd_sense_mutex = {0}; 791 792 /* 793 * Macros for updates of the driver state 794 */ 795 #define New_state(un, s) \ 796 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 797 #define Restore_state(un) \ 798 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 799 800 static struct sd_cdbinfo sd_cdbtab[] = { 801 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 802 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 803 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 804 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 805 }; 806 807 /* 808 * Specifies the number of seconds that must have elapsed since the last 809 * cmd. has completed for a device to be declared idle to the PM framework. 810 */ 811 static int sd_pm_idletime = 1; 812 813 /* 814 * Internal function prototypes 815 */ 816 817 #if (defined(__fibre)) 818 /* 819 * These #defines are to avoid namespace collisions that occur because this 820 * code is currently used to compile two separate driver modules: sd and ssd. 821 * All function names need to be treated this way (even if declared static) 822 * in order to allow the debugger to resolve the names properly. 823 * It is anticipated that in the near future the ssd module will be obsoleted, 824 * at which time this ugliness should go away. 825 */ 826 #define sd_log_trace ssd_log_trace 827 #define sd_log_info ssd_log_info 828 #define sd_log_err ssd_log_err 829 #define sdprobe ssdprobe 830 #define sdinfo ssdinfo 831 #define sd_prop_op ssd_prop_op 832 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 833 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 834 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 835 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 836 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 837 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 838 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 839 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 840 #define sd_spin_up_unit ssd_spin_up_unit 841 #define sd_enable_descr_sense ssd_enable_descr_sense 842 #define sd_reenable_dsense_task ssd_reenable_dsense_task 843 #define sd_set_mmc_caps ssd_set_mmc_caps 844 #define sd_read_unit_properties ssd_read_unit_properties 845 #define sd_process_sdconf_file ssd_process_sdconf_file 846 #define sd_process_sdconf_table ssd_process_sdconf_table 847 #define sd_sdconf_id_match ssd_sdconf_id_match 848 #define sd_blank_cmp ssd_blank_cmp 849 #define sd_chk_vers1_data ssd_chk_vers1_data 850 #define sd_set_vers1_properties ssd_set_vers1_properties 851 852 #define sd_get_physical_geometry ssd_get_physical_geometry 853 #define sd_get_virtual_geometry ssd_get_virtual_geometry 854 #define sd_update_block_info ssd_update_block_info 855 #define sd_register_devid ssd_register_devid 856 #define sd_get_devid ssd_get_devid 857 #define sd_create_devid ssd_create_devid 858 #define sd_write_deviceid ssd_write_deviceid 859 #define sd_check_vpd_page_support ssd_check_vpd_page_support 860 #define sd_setup_pm ssd_setup_pm 861 #define sd_create_pm_components ssd_create_pm_components 862 #define sd_ddi_suspend ssd_ddi_suspend 863 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 864 #define sd_ddi_resume ssd_ddi_resume 865 #define sd_ddi_pm_resume ssd_ddi_pm_resume 866 #define sdpower ssdpower 867 #define sdattach ssdattach 868 #define sddetach ssddetach 869 #define sd_unit_attach ssd_unit_attach 870 #define sd_unit_detach ssd_unit_detach 871 #define sd_set_unit_attributes ssd_set_unit_attributes 872 #define sd_create_errstats ssd_create_errstats 873 #define sd_set_errstats ssd_set_errstats 874 #define sd_set_pstats ssd_set_pstats 875 #define sddump ssddump 876 #define sd_scsi_poll ssd_scsi_poll 877 #define sd_send_polled_RQS ssd_send_polled_RQS 878 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 879 #define sd_init_event_callbacks ssd_init_event_callbacks 880 #define sd_event_callback ssd_event_callback 881 #define sd_cache_control ssd_cache_control 882 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 883 #define sd_get_nv_sup ssd_get_nv_sup 884 #define sd_make_device ssd_make_device 885 #define sdopen ssdopen 886 #define sdclose ssdclose 887 #define sd_ready_and_valid ssd_ready_and_valid 888 #define sdmin ssdmin 889 #define sdread ssdread 890 #define sdwrite ssdwrite 891 #define sdaread ssdaread 892 #define sdawrite ssdawrite 893 #define sdstrategy ssdstrategy 894 #define sdioctl ssdioctl 895 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 896 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 897 #define sd_checksum_iostart ssd_checksum_iostart 898 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 899 #define sd_pm_iostart ssd_pm_iostart 900 #define sd_core_iostart ssd_core_iostart 901 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 902 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 903 #define sd_checksum_iodone ssd_checksum_iodone 904 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 905 #define sd_pm_iodone ssd_pm_iodone 906 #define sd_initpkt_for_buf ssd_initpkt_for_buf 907 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 908 #define sd_setup_rw_pkt ssd_setup_rw_pkt 909 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 910 #define sd_buf_iodone ssd_buf_iodone 911 #define sd_uscsi_strategy ssd_uscsi_strategy 912 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 913 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 914 #define sd_uscsi_iodone ssd_uscsi_iodone 915 #define sd_xbuf_strategy ssd_xbuf_strategy 916 #define sd_xbuf_init ssd_xbuf_init 917 #define sd_pm_entry ssd_pm_entry 918 #define sd_pm_exit ssd_pm_exit 919 920 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 921 #define sd_pm_timeout_handler ssd_pm_timeout_handler 922 923 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 924 #define sdintr ssdintr 925 #define sd_start_cmds ssd_start_cmds 926 #define sd_send_scsi_cmd ssd_send_scsi_cmd 927 #define sd_bioclone_alloc ssd_bioclone_alloc 928 #define sd_bioclone_free ssd_bioclone_free 929 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 930 #define sd_shadow_buf_free ssd_shadow_buf_free 931 #define sd_print_transport_rejected_message \ 932 ssd_print_transport_rejected_message 933 #define sd_retry_command ssd_retry_command 934 #define sd_set_retry_bp ssd_set_retry_bp 935 #define sd_send_request_sense_command ssd_send_request_sense_command 936 #define sd_start_retry_command ssd_start_retry_command 937 #define sd_start_direct_priority_command \ 938 ssd_start_direct_priority_command 939 #define sd_return_failed_command ssd_return_failed_command 940 #define sd_return_failed_command_no_restart \ 941 ssd_return_failed_command_no_restart 942 #define sd_return_command ssd_return_command 943 #define sd_sync_with_callback ssd_sync_with_callback 944 #define sdrunout ssdrunout 945 #define sd_mark_rqs_busy ssd_mark_rqs_busy 946 #define sd_mark_rqs_idle ssd_mark_rqs_idle 947 #define sd_reduce_throttle ssd_reduce_throttle 948 #define sd_restore_throttle ssd_restore_throttle 949 #define sd_print_incomplete_msg ssd_print_incomplete_msg 950 #define sd_init_cdb_limits ssd_init_cdb_limits 951 #define sd_pkt_status_good ssd_pkt_status_good 952 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 953 #define sd_pkt_status_busy ssd_pkt_status_busy 954 #define sd_pkt_status_reservation_conflict \ 955 ssd_pkt_status_reservation_conflict 956 #define sd_pkt_status_qfull ssd_pkt_status_qfull 957 #define sd_handle_request_sense ssd_handle_request_sense 958 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 959 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 960 #define sd_validate_sense_data ssd_validate_sense_data 961 #define sd_decode_sense ssd_decode_sense 962 #define sd_print_sense_msg ssd_print_sense_msg 963 #define sd_sense_key_no_sense ssd_sense_key_no_sense 964 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 965 #define sd_sense_key_not_ready ssd_sense_key_not_ready 966 #define sd_sense_key_medium_or_hardware_error \ 967 ssd_sense_key_medium_or_hardware_error 968 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 969 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 970 #define sd_sense_key_fail_command ssd_sense_key_fail_command 971 #define sd_sense_key_blank_check ssd_sense_key_blank_check 972 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 973 #define sd_sense_key_default ssd_sense_key_default 974 #define sd_print_retry_msg ssd_print_retry_msg 975 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 976 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 977 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 978 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 979 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 980 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 981 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 982 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 983 #define sd_pkt_reason_default ssd_pkt_reason_default 984 #define sd_reset_target ssd_reset_target 985 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 986 #define sd_start_stop_unit_task ssd_start_stop_unit_task 987 #define sd_taskq_create ssd_taskq_create 988 #define sd_taskq_delete ssd_taskq_delete 989 #define sd_target_change_task ssd_target_change_task 990 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 991 #define sd_media_change_task ssd_media_change_task 992 #define sd_handle_mchange ssd_handle_mchange 993 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 994 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 995 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 996 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 997 #define sd_send_scsi_feature_GET_CONFIGURATION \ 998 sd_send_scsi_feature_GET_CONFIGURATION 999 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1000 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1001 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1002 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1003 ssd_send_scsi_PERSISTENT_RESERVE_IN 1004 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1005 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1006 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1007 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1008 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1009 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1010 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1011 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1012 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1013 #define sd_alloc_rqs ssd_alloc_rqs 1014 #define sd_free_rqs ssd_free_rqs 1015 #define sd_dump_memory ssd_dump_memory 1016 #define sd_get_media_info ssd_get_media_info 1017 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1018 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1019 #define sd_strtok_r ssd_strtok_r 1020 #define sd_set_properties ssd_set_properties 1021 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1022 #define sd_setup_next_xfer ssd_setup_next_xfer 1023 #define sd_dkio_get_temp ssd_dkio_get_temp 1024 #define sd_check_mhd ssd_check_mhd 1025 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1026 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1027 #define sd_sname ssd_sname 1028 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1029 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1030 #define sd_take_ownership ssd_take_ownership 1031 #define sd_reserve_release ssd_reserve_release 1032 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1033 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1034 #define sd_persistent_reservation_in_read_keys \ 1035 ssd_persistent_reservation_in_read_keys 1036 #define sd_persistent_reservation_in_read_resv \ 1037 ssd_persistent_reservation_in_read_resv 1038 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1039 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1040 #define sd_mhdioc_release ssd_mhdioc_release 1041 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1042 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1043 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1044 #define sr_change_blkmode ssr_change_blkmode 1045 #define sr_change_speed ssr_change_speed 1046 #define sr_atapi_change_speed ssr_atapi_change_speed 1047 #define sr_pause_resume ssr_pause_resume 1048 #define sr_play_msf ssr_play_msf 1049 #define sr_play_trkind ssr_play_trkind 1050 #define sr_read_all_subcodes ssr_read_all_subcodes 1051 #define sr_read_subchannel ssr_read_subchannel 1052 #define sr_read_tocentry ssr_read_tocentry 1053 #define sr_read_tochdr ssr_read_tochdr 1054 #define sr_read_cdda ssr_read_cdda 1055 #define sr_read_cdxa ssr_read_cdxa 1056 #define sr_read_mode1 ssr_read_mode1 1057 #define sr_read_mode2 ssr_read_mode2 1058 #define sr_read_cd_mode2 ssr_read_cd_mode2 1059 #define sr_sector_mode ssr_sector_mode 1060 #define sr_eject ssr_eject 1061 #define sr_ejected ssr_ejected 1062 #define sr_check_wp ssr_check_wp 1063 #define sd_check_media ssd_check_media 1064 #define sd_media_watch_cb ssd_media_watch_cb 1065 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1066 #define sr_volume_ctrl ssr_volume_ctrl 1067 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1068 #define sd_log_page_supported ssd_log_page_supported 1069 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1070 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1071 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1072 #define sd_range_lock ssd_range_lock 1073 #define sd_get_range ssd_get_range 1074 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1075 #define sd_range_unlock ssd_range_unlock 1076 #define sd_read_modify_write_task ssd_read_modify_write_task 1077 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1078 1079 #define sd_iostart_chain ssd_iostart_chain 1080 #define sd_iodone_chain ssd_iodone_chain 1081 #define sd_initpkt_map ssd_initpkt_map 1082 #define sd_destroypkt_map ssd_destroypkt_map 1083 #define sd_chain_type_map ssd_chain_type_map 1084 #define sd_chain_index_map ssd_chain_index_map 1085 1086 #define sd_failfast_flushctl ssd_failfast_flushctl 1087 #define sd_failfast_flushq ssd_failfast_flushq 1088 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1089 1090 #define sd_is_lsi ssd_is_lsi 1091 #define sd_tg_rdwr ssd_tg_rdwr 1092 #define sd_tg_getinfo ssd_tg_getinfo 1093 1094 #endif /* #if (defined(__fibre)) */ 1095 1096 1097 int _init(void); 1098 int _fini(void); 1099 int _info(struct modinfo *modinfop); 1100 1101 /*PRINTFLIKE3*/ 1102 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1103 /*PRINTFLIKE3*/ 1104 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1105 /*PRINTFLIKE3*/ 1106 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1107 1108 static int sdprobe(dev_info_t *devi); 1109 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1110 void **result); 1111 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1112 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1113 1114 /* 1115 * Smart probe for parallel scsi 1116 */ 1117 static void sd_scsi_probe_cache_init(void); 1118 static void sd_scsi_probe_cache_fini(void); 1119 static void sd_scsi_clear_probe_cache(void); 1120 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1121 1122 /* 1123 * Attached luns on target for parallel scsi 1124 */ 1125 static void sd_scsi_target_lun_init(void); 1126 static void sd_scsi_target_lun_fini(void); 1127 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1128 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1129 1130 static int sd_spin_up_unit(sd_ssc_t *ssc); 1131 1132 /* 1133 * Using sd_ssc_init to establish sd_ssc_t struct 1134 * Using sd_ssc_send to send uscsi internal command 1135 * Using sd_ssc_fini to free sd_ssc_t struct 1136 */ 1137 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1138 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1139 int flag, enum uio_seg dataspace, int path_flag); 1140 static void sd_ssc_fini(sd_ssc_t *ssc); 1141 1142 /* 1143 * Using sd_ssc_assessment to set correct type-of-assessment 1144 * Using sd_ssc_post to post ereport & system log 1145 * sd_ssc_post will call sd_ssc_print to print system log 1146 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1147 */ 1148 static void sd_ssc_assessment(sd_ssc_t *ssc, 1149 enum sd_type_assessment tp_assess); 1150 1151 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1152 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1153 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1154 enum sd_driver_assessment drv_assess); 1155 1156 /* 1157 * Using sd_ssc_set_info to mark an un-decodable-data error. 1158 * Using sd_ssc_extract_info to transfer information from internal 1159 * data structures to sd_ssc_t. 1160 */ 1161 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, 1162 const char *fmt, ...); 1163 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1164 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1165 1166 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1167 enum uio_seg dataspace, int path_flag); 1168 1169 #ifdef _LP64 1170 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1171 static void sd_reenable_dsense_task(void *arg); 1172 #endif /* _LP64 */ 1173 1174 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1175 1176 static void sd_read_unit_properties(struct sd_lun *un); 1177 static int sd_process_sdconf_file(struct sd_lun *un); 1178 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1179 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1180 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1181 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1182 int *data_list, sd_tunables *values); 1183 static void sd_process_sdconf_table(struct sd_lun *un); 1184 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1185 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1186 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1187 int list_len, char *dataname_ptr); 1188 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1189 sd_tunables *prop_list); 1190 1191 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1192 int reservation_flag); 1193 static int sd_get_devid(sd_ssc_t *ssc); 1194 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1195 static int sd_write_deviceid(sd_ssc_t *ssc); 1196 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1197 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1198 1199 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1200 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1201 1202 static int sd_ddi_suspend(dev_info_t *devi); 1203 static int sd_ddi_pm_suspend(struct sd_lun *un); 1204 static int sd_ddi_resume(dev_info_t *devi); 1205 static int sd_ddi_pm_resume(struct sd_lun *un); 1206 static int sdpower(dev_info_t *devi, int component, int level); 1207 1208 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1209 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1210 static int sd_unit_attach(dev_info_t *devi); 1211 static int sd_unit_detach(dev_info_t *devi); 1212 1213 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1214 static void sd_create_errstats(struct sd_lun *un, int instance); 1215 static void sd_set_errstats(struct sd_lun *un); 1216 static void sd_set_pstats(struct sd_lun *un); 1217 1218 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1219 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1220 static int sd_send_polled_RQS(struct sd_lun *un); 1221 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1222 1223 #if (defined(__fibre)) 1224 /* 1225 * Event callbacks (photon) 1226 */ 1227 static void sd_init_event_callbacks(struct sd_lun *un); 1228 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1229 #endif 1230 1231 /* 1232 * Defines for sd_cache_control 1233 */ 1234 1235 #define SD_CACHE_ENABLE 1 1236 #define SD_CACHE_DISABLE 0 1237 #define SD_CACHE_NOCHANGE -1 1238 1239 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1240 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1241 static void sd_get_nv_sup(sd_ssc_t *ssc); 1242 static dev_t sd_make_device(dev_info_t *devi); 1243 1244 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1245 uint64_t capacity); 1246 1247 /* 1248 * Driver entry point functions. 1249 */ 1250 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1251 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1252 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1253 1254 static void sdmin(struct buf *bp); 1255 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1256 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1257 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1258 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1259 1260 static int sdstrategy(struct buf *bp); 1261 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1262 1263 /* 1264 * Function prototypes for layering functions in the iostart chain. 1265 */ 1266 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1267 struct buf *bp); 1268 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1269 struct buf *bp); 1270 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1271 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1275 1276 /* 1277 * Function prototypes for layering functions in the iodone chain. 1278 */ 1279 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1281 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1282 struct buf *bp); 1283 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1284 struct buf *bp); 1285 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 1290 /* 1291 * Prototypes for functions to support buf(9S) based IO. 1292 */ 1293 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1294 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1295 static void sd_destroypkt_for_buf(struct buf *); 1296 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1297 struct buf *bp, int flags, 1298 int (*callback)(caddr_t), caddr_t callback_arg, 1299 diskaddr_t lba, uint32_t blockcount); 1300 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1301 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1302 1303 /* 1304 * Prototypes for functions to support USCSI IO. 1305 */ 1306 static int sd_uscsi_strategy(struct buf *bp); 1307 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1308 static void sd_destroypkt_for_uscsi(struct buf *); 1309 1310 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1311 uchar_t chain_type, void *pktinfop); 1312 1313 static int sd_pm_entry(struct sd_lun *un); 1314 static void sd_pm_exit(struct sd_lun *un); 1315 1316 static void sd_pm_idletimeout_handler(void *arg); 1317 1318 /* 1319 * sd_core internal functions (used at the sd_core_io layer). 1320 */ 1321 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1322 static void sdintr(struct scsi_pkt *pktp); 1323 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1324 1325 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1326 enum uio_seg dataspace, int path_flag); 1327 1328 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1329 daddr_t blkno, int (*func)(struct buf *)); 1330 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1331 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1332 static void sd_bioclone_free(struct buf *bp); 1333 static void sd_shadow_buf_free(struct buf *bp); 1334 1335 static void sd_print_transport_rejected_message(struct sd_lun *un, 1336 struct sd_xbuf *xp, int code); 1337 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1342 void *arg, int code); 1343 1344 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1345 int retry_check_flag, 1346 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1347 int c), 1348 void *user_arg, int failure_code, clock_t retry_delay, 1349 void (*statp)(kstat_io_t *)); 1350 1351 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1352 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1353 1354 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1355 struct scsi_pkt *pktp); 1356 static void sd_start_retry_command(void *arg); 1357 static void sd_start_direct_priority_command(void *arg); 1358 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1359 int errcode); 1360 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1361 struct buf *bp, int errcode); 1362 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1363 static void sd_sync_with_callback(struct sd_lun *un); 1364 static int sdrunout(caddr_t arg); 1365 1366 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1367 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1368 1369 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1370 static void sd_restore_throttle(void *arg); 1371 1372 static void sd_init_cdb_limits(struct sd_lun *un); 1373 1374 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 1377 /* 1378 * Error handling functions 1379 */ 1380 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1385 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 1389 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, size_t actual_len); 1395 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1404 uint8_t *sense_datap, 1405 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_not_ready(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_unit_attention(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1420 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1421 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1422 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_default(struct sd_lun *un, 1424 uint8_t *sense_datap, 1425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1428 void *arg, int flag); 1429 1430 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1435 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1436 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1448 1449 static void sd_start_stop_unit_callback(void *arg); 1450 static void sd_start_stop_unit_task(void *arg); 1451 1452 static void sd_taskq_create(void); 1453 static void sd_taskq_delete(void); 1454 static void sd_target_change_task(void *arg); 1455 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1456 static void sd_media_change_task(void *arg); 1457 1458 static int sd_handle_mchange(struct sd_lun *un); 1459 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1460 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1461 uint32_t *lbap, int path_flag); 1462 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1463 uint32_t *lbap, int path_flag); 1464 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1465 int path_flag); 1466 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1467 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1468 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1469 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1470 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1471 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1472 uchar_t usr_cmd, uchar_t *usr_bufp); 1473 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1474 struct dk_callback *dkc); 1475 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1476 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1478 uchar_t *bufaddr, uint_t buflen, int path_flag); 1479 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1482 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1483 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1484 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1485 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1486 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1487 size_t buflen, daddr_t start_block, int path_flag); 1488 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1489 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1490 path_flag) 1491 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1492 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1493 path_flag) 1494 1495 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1496 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1497 uint16_t param_ptr, int path_flag); 1498 1499 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1500 static void sd_free_rqs(struct sd_lun *un); 1501 1502 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1503 uchar_t *data, int len, int fmt); 1504 static void sd_panic_for_res_conflict(struct sd_lun *un); 1505 1506 /* 1507 * Disk Ioctl Function Prototypes 1508 */ 1509 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1510 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1511 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1512 1513 /* 1514 * Multi-host Ioctl Prototypes 1515 */ 1516 static int sd_check_mhd(dev_t dev, int interval); 1517 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1519 static char *sd_sname(uchar_t status); 1520 static void sd_mhd_resvd_recover(void *arg); 1521 static void sd_resv_reclaim_thread(); 1522 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1523 static int sd_reserve_release(dev_t dev, int cmd); 1524 static void sd_rmv_resv_reclaim_req(dev_t dev); 1525 static void sd_mhd_reset_notify_cb(caddr_t arg); 1526 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1527 mhioc_inkeys_t *usrp, int flag); 1528 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1529 mhioc_inresvs_t *usrp, int flag); 1530 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1531 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1532 static int sd_mhdioc_release(dev_t dev); 1533 static int sd_mhdioc_register_devid(dev_t dev); 1534 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1536 1537 /* 1538 * SCSI removable prototypes 1539 */ 1540 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1541 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1542 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1543 static int sr_pause_resume(dev_t dev, int mode); 1544 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1545 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1546 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1547 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1548 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1555 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1556 static int sr_eject(dev_t dev); 1557 static void sr_ejected(register struct sd_lun *un); 1558 static int sr_check_wp(dev_t dev); 1559 static int sd_check_media(dev_t dev, enum dkio_state state); 1560 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1561 static void sd_delayed_cv_broadcast(void *arg); 1562 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1563 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1564 1565 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1566 1567 /* 1568 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1569 */ 1570 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1571 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1572 static void sd_wm_cache_destructor(void *wm, void *un); 1573 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1574 daddr_t endb, ushort_t typ); 1575 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1576 daddr_t endb); 1577 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1578 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1579 static void sd_read_modify_write_task(void * arg); 1580 static int 1581 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1582 struct buf **bpp); 1583 1584 1585 /* 1586 * Function prototypes for failfast support. 1587 */ 1588 static void sd_failfast_flushq(struct sd_lun *un); 1589 static int sd_failfast_flushq_callback(struct buf *bp); 1590 1591 /* 1592 * Function prototypes to check for lsi devices 1593 */ 1594 static void sd_is_lsi(struct sd_lun *un); 1595 1596 /* 1597 * Function prototypes for partial DMA support 1598 */ 1599 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1600 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1601 1602 1603 /* Function prototypes for cmlb */ 1604 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1605 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1606 1607 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1608 1609 /* 1610 * Constants for failfast support: 1611 * 1612 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1613 * failfast processing being performed. 1614 * 1615 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1616 * failfast processing on all bufs with B_FAILFAST set. 1617 */ 1618 1619 #define SD_FAILFAST_INACTIVE 0 1620 #define SD_FAILFAST_ACTIVE 1 1621 1622 /* 1623 * Bitmask to control behavior of buf(9S) flushes when a transition to 1624 * the failfast state occurs. Optional bits include: 1625 * 1626 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1627 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1628 * be flushed. 1629 * 1630 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1631 * driver, in addition to the regular wait queue. This includes the xbuf 1632 * queues. When clear, only the driver's wait queue will be flushed. 1633 */ 1634 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1635 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1636 1637 /* 1638 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1639 * to flush all queues within the driver. 1640 */ 1641 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1642 1643 1644 /* 1645 * SD Testing Fault Injection 1646 */ 1647 #ifdef SD_FAULT_INJECTION 1648 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1649 static void sd_faultinjection(struct scsi_pkt *pktp); 1650 static void sd_injection_log(char *buf, struct sd_lun *un); 1651 #endif 1652 1653 /* 1654 * Device driver ops vector 1655 */ 1656 static struct cb_ops sd_cb_ops = { 1657 sdopen, /* open */ 1658 sdclose, /* close */ 1659 sdstrategy, /* strategy */ 1660 nodev, /* print */ 1661 sddump, /* dump */ 1662 sdread, /* read */ 1663 sdwrite, /* write */ 1664 sdioctl, /* ioctl */ 1665 nodev, /* devmap */ 1666 nodev, /* mmap */ 1667 nodev, /* segmap */ 1668 nochpoll, /* poll */ 1669 sd_prop_op, /* cb_prop_op */ 1670 0, /* streamtab */ 1671 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1672 CB_REV, /* cb_rev */ 1673 sdaread, /* async I/O read entry point */ 1674 sdawrite /* async I/O write entry point */ 1675 }; 1676 1677 static struct dev_ops sd_ops = { 1678 DEVO_REV, /* devo_rev, */ 1679 0, /* refcnt */ 1680 sdinfo, /* info */ 1681 nulldev, /* identify */ 1682 sdprobe, /* probe */ 1683 sdattach, /* attach */ 1684 sddetach, /* detach */ 1685 nodev, /* reset */ 1686 &sd_cb_ops, /* driver operations */ 1687 NULL, /* bus operations */ 1688 sdpower /* power */ 1689 }; 1690 1691 1692 /* 1693 * This is the loadable module wrapper. 1694 */ 1695 #include <sys/modctl.h> 1696 1697 static struct modldrv modldrv = { 1698 &mod_driverops, /* Type of module. This one is a driver */ 1699 SD_MODULE_NAME, /* Module name. */ 1700 &sd_ops /* driver ops */ 1701 }; 1702 1703 1704 static struct modlinkage modlinkage = { 1705 MODREV_1, 1706 &modldrv, 1707 NULL 1708 }; 1709 1710 static cmlb_tg_ops_t sd_tgops = { 1711 TG_DK_OPS_VERSION_1, 1712 sd_tg_rdwr, 1713 sd_tg_getinfo 1714 }; 1715 1716 static struct scsi_asq_key_strings sd_additional_codes[] = { 1717 0x81, 0, "Logical Unit is Reserved", 1718 0x85, 0, "Audio Address Not Valid", 1719 0xb6, 0, "Media Load Mechanism Failed", 1720 0xB9, 0, "Audio Play Operation Aborted", 1721 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1722 0x53, 2, "Medium removal prevented", 1723 0x6f, 0, "Authentication failed during key exchange", 1724 0x6f, 1, "Key not present", 1725 0x6f, 2, "Key not established", 1726 0x6f, 3, "Read without proper authentication", 1727 0x6f, 4, "Mismatched region to this logical unit", 1728 0x6f, 5, "Region reset count error", 1729 0xffff, 0x0, NULL 1730 }; 1731 1732 1733 /* 1734 * Struct for passing printing information for sense data messages 1735 */ 1736 struct sd_sense_info { 1737 int ssi_severity; 1738 int ssi_pfa_flag; 1739 }; 1740 1741 /* 1742 * Table of function pointers for iostart-side routines. Separate "chains" 1743 * of layered function calls are formed by placing the function pointers 1744 * sequentially in the desired order. Functions are called according to an 1745 * incrementing table index ordering. The last function in each chain must 1746 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1747 * in the sd_iodone_chain[] array. 1748 * 1749 * Note: It may seem more natural to organize both the iostart and iodone 1750 * functions together, into an array of structures (or some similar 1751 * organization) with a common index, rather than two separate arrays which 1752 * must be maintained in synchronization. The purpose of this division is 1753 * to achieve improved performance: individual arrays allows for more 1754 * effective cache line utilization on certain platforms. 1755 */ 1756 1757 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1758 1759 1760 static sd_chain_t sd_iostart_chain[] = { 1761 1762 /* Chain for buf IO for disk drive targets (PM enabled) */ 1763 sd_mapblockaddr_iostart, /* Index: 0 */ 1764 sd_pm_iostart, /* Index: 1 */ 1765 sd_core_iostart, /* Index: 2 */ 1766 1767 /* Chain for buf IO for disk drive targets (PM disabled) */ 1768 sd_mapblockaddr_iostart, /* Index: 3 */ 1769 sd_core_iostart, /* Index: 4 */ 1770 1771 /* Chain for buf IO for removable-media targets (PM enabled) */ 1772 sd_mapblockaddr_iostart, /* Index: 5 */ 1773 sd_mapblocksize_iostart, /* Index: 6 */ 1774 sd_pm_iostart, /* Index: 7 */ 1775 sd_core_iostart, /* Index: 8 */ 1776 1777 /* Chain for buf IO for removable-media targets (PM disabled) */ 1778 sd_mapblockaddr_iostart, /* Index: 9 */ 1779 sd_mapblocksize_iostart, /* Index: 10 */ 1780 sd_core_iostart, /* Index: 11 */ 1781 1782 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1783 sd_mapblockaddr_iostart, /* Index: 12 */ 1784 sd_checksum_iostart, /* Index: 13 */ 1785 sd_pm_iostart, /* Index: 14 */ 1786 sd_core_iostart, /* Index: 15 */ 1787 1788 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1789 sd_mapblockaddr_iostart, /* Index: 16 */ 1790 sd_checksum_iostart, /* Index: 17 */ 1791 sd_core_iostart, /* Index: 18 */ 1792 1793 /* Chain for USCSI commands (all targets) */ 1794 sd_pm_iostart, /* Index: 19 */ 1795 sd_core_iostart, /* Index: 20 */ 1796 1797 /* Chain for checksumming USCSI commands (all targets) */ 1798 sd_checksum_uscsi_iostart, /* Index: 21 */ 1799 sd_pm_iostart, /* Index: 22 */ 1800 sd_core_iostart, /* Index: 23 */ 1801 1802 /* Chain for "direct" USCSI commands (all targets) */ 1803 sd_core_iostart, /* Index: 24 */ 1804 1805 /* Chain for "direct priority" USCSI commands (all targets) */ 1806 sd_core_iostart, /* Index: 25 */ 1807 }; 1808 1809 /* 1810 * Macros to locate the first function of each iostart chain in the 1811 * sd_iostart_chain[] array. These are located by the index in the array. 1812 */ 1813 #define SD_CHAIN_DISK_IOSTART 0 1814 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1815 #define SD_CHAIN_RMMEDIA_IOSTART 5 1816 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1817 #define SD_CHAIN_CHKSUM_IOSTART 12 1818 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1819 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1820 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1821 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1822 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1823 1824 1825 /* 1826 * Table of function pointers for the iodone-side routines for the driver- 1827 * internal layering mechanism. The calling sequence for iodone routines 1828 * uses a decrementing table index, so the last routine called in a chain 1829 * must be at the lowest array index location for that chain. The last 1830 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1831 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1832 * of the functions in an iodone side chain must correspond to the ordering 1833 * of the iostart routines for that chain. Note that there is no iodone 1834 * side routine that corresponds to sd_core_iostart(), so there is no 1835 * entry in the table for this. 1836 */ 1837 1838 static sd_chain_t sd_iodone_chain[] = { 1839 1840 /* Chain for buf IO for disk drive targets (PM enabled) */ 1841 sd_buf_iodone, /* Index: 0 */ 1842 sd_mapblockaddr_iodone, /* Index: 1 */ 1843 sd_pm_iodone, /* Index: 2 */ 1844 1845 /* Chain for buf IO for disk drive targets (PM disabled) */ 1846 sd_buf_iodone, /* Index: 3 */ 1847 sd_mapblockaddr_iodone, /* Index: 4 */ 1848 1849 /* Chain for buf IO for removable-media targets (PM enabled) */ 1850 sd_buf_iodone, /* Index: 5 */ 1851 sd_mapblockaddr_iodone, /* Index: 6 */ 1852 sd_mapblocksize_iodone, /* Index: 7 */ 1853 sd_pm_iodone, /* Index: 8 */ 1854 1855 /* Chain for buf IO for removable-media targets (PM disabled) */ 1856 sd_buf_iodone, /* Index: 9 */ 1857 sd_mapblockaddr_iodone, /* Index: 10 */ 1858 sd_mapblocksize_iodone, /* Index: 11 */ 1859 1860 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1861 sd_buf_iodone, /* Index: 12 */ 1862 sd_mapblockaddr_iodone, /* Index: 13 */ 1863 sd_checksum_iodone, /* Index: 14 */ 1864 sd_pm_iodone, /* Index: 15 */ 1865 1866 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1867 sd_buf_iodone, /* Index: 16 */ 1868 sd_mapblockaddr_iodone, /* Index: 17 */ 1869 sd_checksum_iodone, /* Index: 18 */ 1870 1871 /* Chain for USCSI commands (non-checksum targets) */ 1872 sd_uscsi_iodone, /* Index: 19 */ 1873 sd_pm_iodone, /* Index: 20 */ 1874 1875 /* Chain for USCSI commands (checksum targets) */ 1876 sd_uscsi_iodone, /* Index: 21 */ 1877 sd_checksum_uscsi_iodone, /* Index: 22 */ 1878 sd_pm_iodone, /* Index: 22 */ 1879 1880 /* Chain for "direct" USCSI commands (all targets) */ 1881 sd_uscsi_iodone, /* Index: 24 */ 1882 1883 /* Chain for "direct priority" USCSI commands (all targets) */ 1884 sd_uscsi_iodone, /* Index: 25 */ 1885 }; 1886 1887 1888 /* 1889 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1890 * each iodone-side chain. These are located by the array index, but as the 1891 * iodone side functions are called in a decrementing-index order, the 1892 * highest index number in each chain must be specified (as these correspond 1893 * to the first function in the iodone chain that will be called by the core 1894 * at IO completion time). 1895 */ 1896 1897 #define SD_CHAIN_DISK_IODONE 2 1898 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1899 #define SD_CHAIN_RMMEDIA_IODONE 8 1900 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1901 #define SD_CHAIN_CHKSUM_IODONE 15 1902 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1903 #define SD_CHAIN_USCSI_CMD_IODONE 20 1904 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1905 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1906 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1907 1908 1909 1910 1911 /* 1912 * Array to map a layering chain index to the appropriate initpkt routine. 1913 * The redundant entries are present so that the index used for accessing 1914 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1915 * with this table as well. 1916 */ 1917 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1918 1919 static sd_initpkt_t sd_initpkt_map[] = { 1920 1921 /* Chain for buf IO for disk drive targets (PM enabled) */ 1922 sd_initpkt_for_buf, /* Index: 0 */ 1923 sd_initpkt_for_buf, /* Index: 1 */ 1924 sd_initpkt_for_buf, /* Index: 2 */ 1925 1926 /* Chain for buf IO for disk drive targets (PM disabled) */ 1927 sd_initpkt_for_buf, /* Index: 3 */ 1928 sd_initpkt_for_buf, /* Index: 4 */ 1929 1930 /* Chain for buf IO for removable-media targets (PM enabled) */ 1931 sd_initpkt_for_buf, /* Index: 5 */ 1932 sd_initpkt_for_buf, /* Index: 6 */ 1933 sd_initpkt_for_buf, /* Index: 7 */ 1934 sd_initpkt_for_buf, /* Index: 8 */ 1935 1936 /* Chain for buf IO for removable-media targets (PM disabled) */ 1937 sd_initpkt_for_buf, /* Index: 9 */ 1938 sd_initpkt_for_buf, /* Index: 10 */ 1939 sd_initpkt_for_buf, /* Index: 11 */ 1940 1941 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1942 sd_initpkt_for_buf, /* Index: 12 */ 1943 sd_initpkt_for_buf, /* Index: 13 */ 1944 sd_initpkt_for_buf, /* Index: 14 */ 1945 sd_initpkt_for_buf, /* Index: 15 */ 1946 1947 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1948 sd_initpkt_for_buf, /* Index: 16 */ 1949 sd_initpkt_for_buf, /* Index: 17 */ 1950 sd_initpkt_for_buf, /* Index: 18 */ 1951 1952 /* Chain for USCSI commands (non-checksum targets) */ 1953 sd_initpkt_for_uscsi, /* Index: 19 */ 1954 sd_initpkt_for_uscsi, /* Index: 20 */ 1955 1956 /* Chain for USCSI commands (checksum targets) */ 1957 sd_initpkt_for_uscsi, /* Index: 21 */ 1958 sd_initpkt_for_uscsi, /* Index: 22 */ 1959 sd_initpkt_for_uscsi, /* Index: 22 */ 1960 1961 /* Chain for "direct" USCSI commands (all targets) */ 1962 sd_initpkt_for_uscsi, /* Index: 24 */ 1963 1964 /* Chain for "direct priority" USCSI commands (all targets) */ 1965 sd_initpkt_for_uscsi, /* Index: 25 */ 1966 1967 }; 1968 1969 1970 /* 1971 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1972 * The redundant entries are present so that the index used for accessing 1973 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1974 * with this table as well. 1975 */ 1976 typedef void (*sd_destroypkt_t)(struct buf *); 1977 1978 static sd_destroypkt_t sd_destroypkt_map[] = { 1979 1980 /* Chain for buf IO for disk drive targets (PM enabled) */ 1981 sd_destroypkt_for_buf, /* Index: 0 */ 1982 sd_destroypkt_for_buf, /* Index: 1 */ 1983 sd_destroypkt_for_buf, /* Index: 2 */ 1984 1985 /* Chain for buf IO for disk drive targets (PM disabled) */ 1986 sd_destroypkt_for_buf, /* Index: 3 */ 1987 sd_destroypkt_for_buf, /* Index: 4 */ 1988 1989 /* Chain for buf IO for removable-media targets (PM enabled) */ 1990 sd_destroypkt_for_buf, /* Index: 5 */ 1991 sd_destroypkt_for_buf, /* Index: 6 */ 1992 sd_destroypkt_for_buf, /* Index: 7 */ 1993 sd_destroypkt_for_buf, /* Index: 8 */ 1994 1995 /* Chain for buf IO for removable-media targets (PM disabled) */ 1996 sd_destroypkt_for_buf, /* Index: 9 */ 1997 sd_destroypkt_for_buf, /* Index: 10 */ 1998 sd_destroypkt_for_buf, /* Index: 11 */ 1999 2000 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2001 sd_destroypkt_for_buf, /* Index: 12 */ 2002 sd_destroypkt_for_buf, /* Index: 13 */ 2003 sd_destroypkt_for_buf, /* Index: 14 */ 2004 sd_destroypkt_for_buf, /* Index: 15 */ 2005 2006 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2007 sd_destroypkt_for_buf, /* Index: 16 */ 2008 sd_destroypkt_for_buf, /* Index: 17 */ 2009 sd_destroypkt_for_buf, /* Index: 18 */ 2010 2011 /* Chain for USCSI commands (non-checksum targets) */ 2012 sd_destroypkt_for_uscsi, /* Index: 19 */ 2013 sd_destroypkt_for_uscsi, /* Index: 20 */ 2014 2015 /* Chain for USCSI commands (checksum targets) */ 2016 sd_destroypkt_for_uscsi, /* Index: 21 */ 2017 sd_destroypkt_for_uscsi, /* Index: 22 */ 2018 sd_destroypkt_for_uscsi, /* Index: 22 */ 2019 2020 /* Chain for "direct" USCSI commands (all targets) */ 2021 sd_destroypkt_for_uscsi, /* Index: 24 */ 2022 2023 /* Chain for "direct priority" USCSI commands (all targets) */ 2024 sd_destroypkt_for_uscsi, /* Index: 25 */ 2025 2026 }; 2027 2028 2029 2030 /* 2031 * Array to map a layering chain index to the appropriate chain "type". 2032 * The chain type indicates a specific property/usage of the chain. 2033 * The redundant entries are present so that the index used for accessing 2034 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2035 * with this table as well. 2036 */ 2037 2038 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2039 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2040 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2041 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2042 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2043 /* (for error recovery) */ 2044 2045 static int sd_chain_type_map[] = { 2046 2047 /* Chain for buf IO for disk drive targets (PM enabled) */ 2048 SD_CHAIN_BUFIO, /* Index: 0 */ 2049 SD_CHAIN_BUFIO, /* Index: 1 */ 2050 SD_CHAIN_BUFIO, /* Index: 2 */ 2051 2052 /* Chain for buf IO for disk drive targets (PM disabled) */ 2053 SD_CHAIN_BUFIO, /* Index: 3 */ 2054 SD_CHAIN_BUFIO, /* Index: 4 */ 2055 2056 /* Chain for buf IO for removable-media targets (PM enabled) */ 2057 SD_CHAIN_BUFIO, /* Index: 5 */ 2058 SD_CHAIN_BUFIO, /* Index: 6 */ 2059 SD_CHAIN_BUFIO, /* Index: 7 */ 2060 SD_CHAIN_BUFIO, /* Index: 8 */ 2061 2062 /* Chain for buf IO for removable-media targets (PM disabled) */ 2063 SD_CHAIN_BUFIO, /* Index: 9 */ 2064 SD_CHAIN_BUFIO, /* Index: 10 */ 2065 SD_CHAIN_BUFIO, /* Index: 11 */ 2066 2067 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2068 SD_CHAIN_BUFIO, /* Index: 12 */ 2069 SD_CHAIN_BUFIO, /* Index: 13 */ 2070 SD_CHAIN_BUFIO, /* Index: 14 */ 2071 SD_CHAIN_BUFIO, /* Index: 15 */ 2072 2073 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2074 SD_CHAIN_BUFIO, /* Index: 16 */ 2075 SD_CHAIN_BUFIO, /* Index: 17 */ 2076 SD_CHAIN_BUFIO, /* Index: 18 */ 2077 2078 /* Chain for USCSI commands (non-checksum targets) */ 2079 SD_CHAIN_USCSI, /* Index: 19 */ 2080 SD_CHAIN_USCSI, /* Index: 20 */ 2081 2082 /* Chain for USCSI commands (checksum targets) */ 2083 SD_CHAIN_USCSI, /* Index: 21 */ 2084 SD_CHAIN_USCSI, /* Index: 22 */ 2085 SD_CHAIN_USCSI, /* Index: 22 */ 2086 2087 /* Chain for "direct" USCSI commands (all targets) */ 2088 SD_CHAIN_DIRECT, /* Index: 24 */ 2089 2090 /* Chain for "direct priority" USCSI commands (all targets) */ 2091 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2092 }; 2093 2094 2095 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2096 #define SD_IS_BUFIO(xp) \ 2097 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2098 2099 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2100 #define SD_IS_DIRECT_PRIORITY(xp) \ 2101 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2102 2103 2104 2105 /* 2106 * Struct, array, and macros to map a specific chain to the appropriate 2107 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2108 * 2109 * The sd_chain_index_map[] array is used at attach time to set the various 2110 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2111 * chain to be used with the instance. This allows different instances to use 2112 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2113 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2114 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2115 * dynamically & without the use of locking; and (2) a layer may update the 2116 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2117 * to allow for deferred processing of an IO within the same chain from a 2118 * different execution context. 2119 */ 2120 2121 struct sd_chain_index { 2122 int sci_iostart_index; 2123 int sci_iodone_index; 2124 }; 2125 2126 static struct sd_chain_index sd_chain_index_map[] = { 2127 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2128 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2129 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2130 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2131 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2132 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2133 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2134 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2135 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2136 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2137 }; 2138 2139 2140 /* 2141 * The following are indexes into the sd_chain_index_map[] array. 2142 */ 2143 2144 /* un->un_buf_chain_type must be set to one of these */ 2145 #define SD_CHAIN_INFO_DISK 0 2146 #define SD_CHAIN_INFO_DISK_NO_PM 1 2147 #define SD_CHAIN_INFO_RMMEDIA 2 2148 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2149 #define SD_CHAIN_INFO_CHKSUM 4 2150 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2151 2152 /* un->un_uscsi_chain_type must be set to one of these */ 2153 #define SD_CHAIN_INFO_USCSI_CMD 6 2154 /* USCSI with PM disabled is the same as DIRECT */ 2155 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2156 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2157 2158 /* un->un_direct_chain_type must be set to one of these */ 2159 #define SD_CHAIN_INFO_DIRECT_CMD 8 2160 2161 /* un->un_priority_chain_type must be set to one of these */ 2162 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2163 2164 /* size for devid inquiries */ 2165 #define MAX_INQUIRY_SIZE 0xF0 2166 2167 /* 2168 * Macros used by functions to pass a given buf(9S) struct along to the 2169 * next function in the layering chain for further processing. 2170 * 2171 * In the following macros, passing more than three arguments to the called 2172 * routines causes the optimizer for the SPARC compiler to stop doing tail 2173 * call elimination which results in significant performance degradation. 2174 */ 2175 #define SD_BEGIN_IOSTART(index, un, bp) \ 2176 ((*(sd_iostart_chain[index]))(index, un, bp)) 2177 2178 #define SD_BEGIN_IODONE(index, un, bp) \ 2179 ((*(sd_iodone_chain[index]))(index, un, bp)) 2180 2181 #define SD_NEXT_IOSTART(index, un, bp) \ 2182 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2183 2184 #define SD_NEXT_IODONE(index, un, bp) \ 2185 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2186 2187 /* 2188 * Function: _init 2189 * 2190 * Description: This is the driver _init(9E) entry point. 2191 * 2192 * Return Code: Returns the value from mod_install(9F) or 2193 * ddi_soft_state_init(9F) as appropriate. 2194 * 2195 * Context: Called when driver module loaded. 2196 */ 2197 2198 int 2199 _init(void) 2200 { 2201 int err; 2202 2203 /* establish driver name from module name */ 2204 sd_label = (char *)mod_modname(&modlinkage); 2205 2206 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2207 SD_MAXUNIT); 2208 2209 if (err != 0) { 2210 return (err); 2211 } 2212 2213 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2214 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2215 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2216 2217 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2218 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2219 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2220 2221 /* 2222 * it's ok to init here even for fibre device 2223 */ 2224 sd_scsi_probe_cache_init(); 2225 2226 sd_scsi_target_lun_init(); 2227 2228 /* 2229 * Creating taskq before mod_install ensures that all callers (threads) 2230 * that enter the module after a successful mod_install encounter 2231 * a valid taskq. 2232 */ 2233 sd_taskq_create(); 2234 2235 err = mod_install(&modlinkage); 2236 if (err != 0) { 2237 /* delete taskq if install fails */ 2238 sd_taskq_delete(); 2239 2240 mutex_destroy(&sd_detach_mutex); 2241 mutex_destroy(&sd_log_mutex); 2242 mutex_destroy(&sd_label_mutex); 2243 2244 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2245 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2246 cv_destroy(&sd_tr.srq_inprocess_cv); 2247 2248 sd_scsi_probe_cache_fini(); 2249 2250 sd_scsi_target_lun_fini(); 2251 2252 ddi_soft_state_fini(&sd_state); 2253 return (err); 2254 } 2255 2256 return (err); 2257 } 2258 2259 2260 /* 2261 * Function: _fini 2262 * 2263 * Description: This is the driver _fini(9E) entry point. 2264 * 2265 * Return Code: Returns the value from mod_remove(9F) 2266 * 2267 * Context: Called when driver module is unloaded. 2268 */ 2269 2270 int 2271 _fini(void) 2272 { 2273 int err; 2274 2275 if ((err = mod_remove(&modlinkage)) != 0) { 2276 return (err); 2277 } 2278 2279 sd_taskq_delete(); 2280 2281 mutex_destroy(&sd_detach_mutex); 2282 mutex_destroy(&sd_log_mutex); 2283 mutex_destroy(&sd_label_mutex); 2284 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2285 2286 sd_scsi_probe_cache_fini(); 2287 2288 sd_scsi_target_lun_fini(); 2289 2290 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2291 cv_destroy(&sd_tr.srq_inprocess_cv); 2292 2293 ddi_soft_state_fini(&sd_state); 2294 2295 return (err); 2296 } 2297 2298 2299 /* 2300 * Function: _info 2301 * 2302 * Description: This is the driver _info(9E) entry point. 2303 * 2304 * Arguments: modinfop - pointer to the driver modinfo structure 2305 * 2306 * Return Code: Returns the value from mod_info(9F). 2307 * 2308 * Context: Kernel thread context 2309 */ 2310 2311 int 2312 _info(struct modinfo *modinfop) 2313 { 2314 return (mod_info(&modlinkage, modinfop)); 2315 } 2316 2317 2318 /* 2319 * The following routines implement the driver message logging facility. 2320 * They provide component- and level- based debug output filtering. 2321 * Output may also be restricted to messages for a single instance by 2322 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2323 * to NULL, then messages for all instances are printed. 2324 * 2325 * These routines have been cloned from each other due to the language 2326 * constraints of macros and variable argument list processing. 2327 */ 2328 2329 2330 /* 2331 * Function: sd_log_err 2332 * 2333 * Description: This routine is called by the SD_ERROR macro for debug 2334 * logging of error conditions. 2335 * 2336 * Arguments: comp - driver component being logged 2337 * dev - pointer to driver info structure 2338 * fmt - error string and format to be logged 2339 */ 2340 2341 static void 2342 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2343 { 2344 va_list ap; 2345 dev_info_t *dev; 2346 2347 ASSERT(un != NULL); 2348 dev = SD_DEVINFO(un); 2349 ASSERT(dev != NULL); 2350 2351 /* 2352 * Filter messages based on the global component and level masks. 2353 * Also print if un matches the value of sd_debug_un, or if 2354 * sd_debug_un is set to NULL. 2355 */ 2356 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2357 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2358 mutex_enter(&sd_log_mutex); 2359 va_start(ap, fmt); 2360 (void) vsprintf(sd_log_buf, fmt, ap); 2361 va_end(ap); 2362 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2363 mutex_exit(&sd_log_mutex); 2364 } 2365 #ifdef SD_FAULT_INJECTION 2366 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2367 if (un->sd_injection_mask & comp) { 2368 mutex_enter(&sd_log_mutex); 2369 va_start(ap, fmt); 2370 (void) vsprintf(sd_log_buf, fmt, ap); 2371 va_end(ap); 2372 sd_injection_log(sd_log_buf, un); 2373 mutex_exit(&sd_log_mutex); 2374 } 2375 #endif 2376 } 2377 2378 2379 /* 2380 * Function: sd_log_info 2381 * 2382 * Description: This routine is called by the SD_INFO macro for debug 2383 * logging of general purpose informational conditions. 2384 * 2385 * Arguments: comp - driver component being logged 2386 * dev - pointer to driver info structure 2387 * fmt - info string and format to be logged 2388 */ 2389 2390 static void 2391 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2392 { 2393 va_list ap; 2394 dev_info_t *dev; 2395 2396 ASSERT(un != NULL); 2397 dev = SD_DEVINFO(un); 2398 ASSERT(dev != NULL); 2399 2400 /* 2401 * Filter messages based on the global component and level masks. 2402 * Also print if un matches the value of sd_debug_un, or if 2403 * sd_debug_un is set to NULL. 2404 */ 2405 if ((sd_component_mask & component) && 2406 (sd_level_mask & SD_LOGMASK_INFO) && 2407 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2408 mutex_enter(&sd_log_mutex); 2409 va_start(ap, fmt); 2410 (void) vsprintf(sd_log_buf, fmt, ap); 2411 va_end(ap); 2412 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2413 mutex_exit(&sd_log_mutex); 2414 } 2415 #ifdef SD_FAULT_INJECTION 2416 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2417 if (un->sd_injection_mask & component) { 2418 mutex_enter(&sd_log_mutex); 2419 va_start(ap, fmt); 2420 (void) vsprintf(sd_log_buf, fmt, ap); 2421 va_end(ap); 2422 sd_injection_log(sd_log_buf, un); 2423 mutex_exit(&sd_log_mutex); 2424 } 2425 #endif 2426 } 2427 2428 2429 /* 2430 * Function: sd_log_trace 2431 * 2432 * Description: This routine is called by the SD_TRACE macro for debug 2433 * logging of trace conditions (i.e. function entry/exit). 2434 * 2435 * Arguments: comp - driver component being logged 2436 * dev - pointer to driver info structure 2437 * fmt - trace string and format to be logged 2438 */ 2439 2440 static void 2441 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2442 { 2443 va_list ap; 2444 dev_info_t *dev; 2445 2446 ASSERT(un != NULL); 2447 dev = SD_DEVINFO(un); 2448 ASSERT(dev != NULL); 2449 2450 /* 2451 * Filter messages based on the global component and level masks. 2452 * Also print if un matches the value of sd_debug_un, or if 2453 * sd_debug_un is set to NULL. 2454 */ 2455 if ((sd_component_mask & component) && 2456 (sd_level_mask & SD_LOGMASK_TRACE) && 2457 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2458 mutex_enter(&sd_log_mutex); 2459 va_start(ap, fmt); 2460 (void) vsprintf(sd_log_buf, fmt, ap); 2461 va_end(ap); 2462 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2463 mutex_exit(&sd_log_mutex); 2464 } 2465 #ifdef SD_FAULT_INJECTION 2466 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2467 if (un->sd_injection_mask & component) { 2468 mutex_enter(&sd_log_mutex); 2469 va_start(ap, fmt); 2470 (void) vsprintf(sd_log_buf, fmt, ap); 2471 va_end(ap); 2472 sd_injection_log(sd_log_buf, un); 2473 mutex_exit(&sd_log_mutex); 2474 } 2475 #endif 2476 } 2477 2478 2479 /* 2480 * Function: sdprobe 2481 * 2482 * Description: This is the driver probe(9e) entry point function. 2483 * 2484 * Arguments: devi - opaque device info handle 2485 * 2486 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2487 * DDI_PROBE_FAILURE: If the probe failed. 2488 * DDI_PROBE_PARTIAL: If the instance is not present now, 2489 * but may be present in the future. 2490 */ 2491 2492 static int 2493 sdprobe(dev_info_t *devi) 2494 { 2495 struct scsi_device *devp; 2496 int rval; 2497 int instance; 2498 2499 /* 2500 * if it wasn't for pln, sdprobe could actually be nulldev 2501 * in the "__fibre" case. 2502 */ 2503 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2504 return (DDI_PROBE_DONTCARE); 2505 } 2506 2507 devp = ddi_get_driver_private(devi); 2508 2509 if (devp == NULL) { 2510 /* Ooops... nexus driver is mis-configured... */ 2511 return (DDI_PROBE_FAILURE); 2512 } 2513 2514 instance = ddi_get_instance(devi); 2515 2516 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2517 return (DDI_PROBE_PARTIAL); 2518 } 2519 2520 /* 2521 * Call the SCSA utility probe routine to see if we actually 2522 * have a target at this SCSI nexus. 2523 */ 2524 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2525 case SCSIPROBE_EXISTS: 2526 switch (devp->sd_inq->inq_dtype) { 2527 case DTYPE_DIRECT: 2528 rval = DDI_PROBE_SUCCESS; 2529 break; 2530 case DTYPE_RODIRECT: 2531 /* CDs etc. Can be removable media */ 2532 rval = DDI_PROBE_SUCCESS; 2533 break; 2534 case DTYPE_OPTICAL: 2535 /* 2536 * Rewritable optical driver HP115AA 2537 * Can also be removable media 2538 */ 2539 2540 /* 2541 * Do not attempt to bind to DTYPE_OPTICAL if 2542 * pre solaris 9 sparc sd behavior is required 2543 * 2544 * If first time through and sd_dtype_optical_bind 2545 * has not been set in /etc/system check properties 2546 */ 2547 2548 if (sd_dtype_optical_bind < 0) { 2549 sd_dtype_optical_bind = ddi_prop_get_int 2550 (DDI_DEV_T_ANY, devi, 0, 2551 "optical-device-bind", 1); 2552 } 2553 2554 if (sd_dtype_optical_bind == 0) { 2555 rval = DDI_PROBE_FAILURE; 2556 } else { 2557 rval = DDI_PROBE_SUCCESS; 2558 } 2559 break; 2560 2561 case DTYPE_NOTPRESENT: 2562 default: 2563 rval = DDI_PROBE_FAILURE; 2564 break; 2565 } 2566 break; 2567 default: 2568 rval = DDI_PROBE_PARTIAL; 2569 break; 2570 } 2571 2572 /* 2573 * This routine checks for resource allocation prior to freeing, 2574 * so it will take care of the "smart probing" case where a 2575 * scsi_probe() may or may not have been issued and will *not* 2576 * free previously-freed resources. 2577 */ 2578 scsi_unprobe(devp); 2579 return (rval); 2580 } 2581 2582 2583 /* 2584 * Function: sdinfo 2585 * 2586 * Description: This is the driver getinfo(9e) entry point function. 2587 * Given the device number, return the devinfo pointer from 2588 * the scsi_device structure or the instance number 2589 * associated with the dev_t. 2590 * 2591 * Arguments: dip - pointer to device info structure 2592 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2593 * DDI_INFO_DEVT2INSTANCE) 2594 * arg - driver dev_t 2595 * resultp - user buffer for request response 2596 * 2597 * Return Code: DDI_SUCCESS 2598 * DDI_FAILURE 2599 */ 2600 /* ARGSUSED */ 2601 static int 2602 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2603 { 2604 struct sd_lun *un; 2605 dev_t dev; 2606 int instance; 2607 int error; 2608 2609 switch (infocmd) { 2610 case DDI_INFO_DEVT2DEVINFO: 2611 dev = (dev_t)arg; 2612 instance = SDUNIT(dev); 2613 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2614 return (DDI_FAILURE); 2615 } 2616 *result = (void *) SD_DEVINFO(un); 2617 error = DDI_SUCCESS; 2618 break; 2619 case DDI_INFO_DEVT2INSTANCE: 2620 dev = (dev_t)arg; 2621 instance = SDUNIT(dev); 2622 *result = (void *)(uintptr_t)instance; 2623 error = DDI_SUCCESS; 2624 break; 2625 default: 2626 error = DDI_FAILURE; 2627 } 2628 return (error); 2629 } 2630 2631 /* 2632 * Function: sd_prop_op 2633 * 2634 * Description: This is the driver prop_op(9e) entry point function. 2635 * Return the number of blocks for the partition in question 2636 * or forward the request to the property facilities. 2637 * 2638 * Arguments: dev - device number 2639 * dip - pointer to device info structure 2640 * prop_op - property operator 2641 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2642 * name - pointer to property name 2643 * valuep - pointer or address of the user buffer 2644 * lengthp - property length 2645 * 2646 * Return Code: DDI_PROP_SUCCESS 2647 * DDI_PROP_NOT_FOUND 2648 * DDI_PROP_UNDEFINED 2649 * DDI_PROP_NO_MEMORY 2650 * DDI_PROP_BUF_TOO_SMALL 2651 */ 2652 2653 static int 2654 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2655 char *name, caddr_t valuep, int *lengthp) 2656 { 2657 struct sd_lun *un; 2658 2659 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2660 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2661 name, valuep, lengthp)); 2662 2663 return (cmlb_prop_op(un->un_cmlbhandle, 2664 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2665 SDPART(dev), (void *)SD_PATH_DIRECT)); 2666 } 2667 2668 /* 2669 * The following functions are for smart probing: 2670 * sd_scsi_probe_cache_init() 2671 * sd_scsi_probe_cache_fini() 2672 * sd_scsi_clear_probe_cache() 2673 * sd_scsi_probe_with_cache() 2674 */ 2675 2676 /* 2677 * Function: sd_scsi_probe_cache_init 2678 * 2679 * Description: Initializes the probe response cache mutex and head pointer. 2680 * 2681 * Context: Kernel thread context 2682 */ 2683 2684 static void 2685 sd_scsi_probe_cache_init(void) 2686 { 2687 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2688 sd_scsi_probe_cache_head = NULL; 2689 } 2690 2691 2692 /* 2693 * Function: sd_scsi_probe_cache_fini 2694 * 2695 * Description: Frees all resources associated with the probe response cache. 2696 * 2697 * Context: Kernel thread context 2698 */ 2699 2700 static void 2701 sd_scsi_probe_cache_fini(void) 2702 { 2703 struct sd_scsi_probe_cache *cp; 2704 struct sd_scsi_probe_cache *ncp; 2705 2706 /* Clean up our smart probing linked list */ 2707 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2708 ncp = cp->next; 2709 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2710 } 2711 sd_scsi_probe_cache_head = NULL; 2712 mutex_destroy(&sd_scsi_probe_cache_mutex); 2713 } 2714 2715 2716 /* 2717 * Function: sd_scsi_clear_probe_cache 2718 * 2719 * Description: This routine clears the probe response cache. This is 2720 * done when open() returns ENXIO so that when deferred 2721 * attach is attempted (possibly after a device has been 2722 * turned on) we will retry the probe. Since we don't know 2723 * which target we failed to open, we just clear the 2724 * entire cache. 2725 * 2726 * Context: Kernel thread context 2727 */ 2728 2729 static void 2730 sd_scsi_clear_probe_cache(void) 2731 { 2732 struct sd_scsi_probe_cache *cp; 2733 int i; 2734 2735 mutex_enter(&sd_scsi_probe_cache_mutex); 2736 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2737 /* 2738 * Reset all entries to SCSIPROBE_EXISTS. This will 2739 * force probing to be performed the next time 2740 * sd_scsi_probe_with_cache is called. 2741 */ 2742 for (i = 0; i < NTARGETS_WIDE; i++) { 2743 cp->cache[i] = SCSIPROBE_EXISTS; 2744 } 2745 } 2746 mutex_exit(&sd_scsi_probe_cache_mutex); 2747 } 2748 2749 2750 /* 2751 * Function: sd_scsi_probe_with_cache 2752 * 2753 * Description: This routine implements support for a scsi device probe 2754 * with cache. The driver maintains a cache of the target 2755 * responses to scsi probes. If we get no response from a 2756 * target during a probe inquiry, we remember that, and we 2757 * avoid additional calls to scsi_probe on non-zero LUNs 2758 * on the same target until the cache is cleared. By doing 2759 * so we avoid the 1/4 sec selection timeout for nonzero 2760 * LUNs. lun0 of a target is always probed. 2761 * 2762 * Arguments: devp - Pointer to a scsi_device(9S) structure 2763 * waitfunc - indicates what the allocator routines should 2764 * do when resources are not available. This value 2765 * is passed on to scsi_probe() when that routine 2766 * is called. 2767 * 2768 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2769 * otherwise the value returned by scsi_probe(9F). 2770 * 2771 * Context: Kernel thread context 2772 */ 2773 2774 static int 2775 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2776 { 2777 struct sd_scsi_probe_cache *cp; 2778 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2779 int lun, tgt; 2780 2781 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2782 SCSI_ADDR_PROP_LUN, 0); 2783 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2784 SCSI_ADDR_PROP_TARGET, -1); 2785 2786 /* Make sure caching enabled and target in range */ 2787 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2788 /* do it the old way (no cache) */ 2789 return (scsi_probe(devp, waitfn)); 2790 } 2791 2792 mutex_enter(&sd_scsi_probe_cache_mutex); 2793 2794 /* Find the cache for this scsi bus instance */ 2795 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2796 if (cp->pdip == pdip) { 2797 break; 2798 } 2799 } 2800 2801 /* If we can't find a cache for this pdip, create one */ 2802 if (cp == NULL) { 2803 int i; 2804 2805 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2806 KM_SLEEP); 2807 cp->pdip = pdip; 2808 cp->next = sd_scsi_probe_cache_head; 2809 sd_scsi_probe_cache_head = cp; 2810 for (i = 0; i < NTARGETS_WIDE; i++) { 2811 cp->cache[i] = SCSIPROBE_EXISTS; 2812 } 2813 } 2814 2815 mutex_exit(&sd_scsi_probe_cache_mutex); 2816 2817 /* Recompute the cache for this target if LUN zero */ 2818 if (lun == 0) { 2819 cp->cache[tgt] = SCSIPROBE_EXISTS; 2820 } 2821 2822 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2823 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2824 return (SCSIPROBE_NORESP); 2825 } 2826 2827 /* Do the actual probe; save & return the result */ 2828 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2829 } 2830 2831 2832 /* 2833 * Function: sd_scsi_target_lun_init 2834 * 2835 * Description: Initializes the attached lun chain mutex and head pointer. 2836 * 2837 * Context: Kernel thread context 2838 */ 2839 2840 static void 2841 sd_scsi_target_lun_init(void) 2842 { 2843 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2844 sd_scsi_target_lun_head = NULL; 2845 } 2846 2847 2848 /* 2849 * Function: sd_scsi_target_lun_fini 2850 * 2851 * Description: Frees all resources associated with the attached lun 2852 * chain 2853 * 2854 * Context: Kernel thread context 2855 */ 2856 2857 static void 2858 sd_scsi_target_lun_fini(void) 2859 { 2860 struct sd_scsi_hba_tgt_lun *cp; 2861 struct sd_scsi_hba_tgt_lun *ncp; 2862 2863 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2864 ncp = cp->next; 2865 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2866 } 2867 sd_scsi_target_lun_head = NULL; 2868 mutex_destroy(&sd_scsi_target_lun_mutex); 2869 } 2870 2871 2872 /* 2873 * Function: sd_scsi_get_target_lun_count 2874 * 2875 * Description: This routine will check in the attached lun chain to see 2876 * how many luns are attached on the required SCSI controller 2877 * and target. Currently, some capabilities like tagged queue 2878 * are supported per target based by HBA. So all luns in a 2879 * target have the same capabilities. Based on this assumption, 2880 * sd should only set these capabilities once per target. This 2881 * function is called when sd needs to decide how many luns 2882 * already attached on a target. 2883 * 2884 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2885 * controller device. 2886 * target - The target ID on the controller's SCSI bus. 2887 * 2888 * Return Code: The number of luns attached on the required target and 2889 * controller. 2890 * -1 if target ID is not in parallel SCSI scope or the given 2891 * dip is not in the chain. 2892 * 2893 * Context: Kernel thread context 2894 */ 2895 2896 static int 2897 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2898 { 2899 struct sd_scsi_hba_tgt_lun *cp; 2900 2901 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2902 return (-1); 2903 } 2904 2905 mutex_enter(&sd_scsi_target_lun_mutex); 2906 2907 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2908 if (cp->pdip == dip) { 2909 break; 2910 } 2911 } 2912 2913 mutex_exit(&sd_scsi_target_lun_mutex); 2914 2915 if (cp == NULL) { 2916 return (-1); 2917 } 2918 2919 return (cp->nlun[target]); 2920 } 2921 2922 2923 /* 2924 * Function: sd_scsi_update_lun_on_target 2925 * 2926 * Description: This routine is used to update the attached lun chain when a 2927 * lun is attached or detached on a target. 2928 * 2929 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2930 * controller device. 2931 * target - The target ID on the controller's SCSI bus. 2932 * flag - Indicate the lun is attached or detached. 2933 * 2934 * Context: Kernel thread context 2935 */ 2936 2937 static void 2938 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2939 { 2940 struct sd_scsi_hba_tgt_lun *cp; 2941 2942 mutex_enter(&sd_scsi_target_lun_mutex); 2943 2944 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2945 if (cp->pdip == dip) { 2946 break; 2947 } 2948 } 2949 2950 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2951 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2952 KM_SLEEP); 2953 cp->pdip = dip; 2954 cp->next = sd_scsi_target_lun_head; 2955 sd_scsi_target_lun_head = cp; 2956 } 2957 2958 mutex_exit(&sd_scsi_target_lun_mutex); 2959 2960 if (cp != NULL) { 2961 if (flag == SD_SCSI_LUN_ATTACH) { 2962 cp->nlun[target] ++; 2963 } else { 2964 cp->nlun[target] --; 2965 } 2966 } 2967 } 2968 2969 2970 /* 2971 * Function: sd_spin_up_unit 2972 * 2973 * Description: Issues the following commands to spin-up the device: 2974 * START STOP UNIT, and INQUIRY. 2975 * 2976 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 2977 * structure for this target. 2978 * 2979 * Return Code: 0 - success 2980 * EIO - failure 2981 * EACCES - reservation conflict 2982 * 2983 * Context: Kernel thread context 2984 */ 2985 2986 static int 2987 sd_spin_up_unit(sd_ssc_t *ssc) 2988 { 2989 size_t resid = 0; 2990 int has_conflict = FALSE; 2991 uchar_t *bufaddr; 2992 int status; 2993 struct sd_lun *un; 2994 2995 ASSERT(ssc != NULL); 2996 un = ssc->ssc_un; 2997 ASSERT(un != NULL); 2998 2999 /* 3000 * Send a throwaway START UNIT command. 3001 * 3002 * If we fail on this, we don't care presently what precisely 3003 * is wrong. EMC's arrays will also fail this with a check 3004 * condition (0x2/0x4/0x3) if the device is "inactive," but 3005 * we don't want to fail the attach because it may become 3006 * "active" later. 3007 */ 3008 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3009 SD_PATH_DIRECT); 3010 3011 if (status != 0) { 3012 if (status == EACCES) 3013 has_conflict = TRUE; 3014 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3015 } 3016 3017 /* 3018 * Send another INQUIRY command to the target. This is necessary for 3019 * non-removable media direct access devices because their INQUIRY data 3020 * may not be fully qualified until they are spun up (perhaps via the 3021 * START command above). Note: This seems to be needed for some 3022 * legacy devices only.) The INQUIRY command should succeed even if a 3023 * Reservation Conflict is present. 3024 */ 3025 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3026 3027 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3028 != 0) { 3029 kmem_free(bufaddr, SUN_INQSIZE); 3030 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3031 return (EIO); 3032 } 3033 3034 /* 3035 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3036 * Note that this routine does not return a failure here even if the 3037 * INQUIRY command did not return any data. This is a legacy behavior. 3038 */ 3039 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3040 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3041 } 3042 3043 kmem_free(bufaddr, SUN_INQSIZE); 3044 3045 /* If we hit a reservation conflict above, tell the caller. */ 3046 if (has_conflict == TRUE) { 3047 return (EACCES); 3048 } 3049 3050 return (0); 3051 } 3052 3053 #ifdef _LP64 3054 /* 3055 * Function: sd_enable_descr_sense 3056 * 3057 * Description: This routine attempts to select descriptor sense format 3058 * using the Control mode page. Devices that support 64 bit 3059 * LBAs (for >2TB luns) should also implement descriptor 3060 * sense data so we will call this function whenever we see 3061 * a lun larger than 2TB. If for some reason the device 3062 * supports 64 bit LBAs but doesn't support descriptor sense 3063 * presumably the mode select will fail. Everything will 3064 * continue to work normally except that we will not get 3065 * complete sense data for commands that fail with an LBA 3066 * larger than 32 bits. 3067 * 3068 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3069 * structure for this target. 3070 * 3071 * Context: Kernel thread context only 3072 */ 3073 3074 static void 3075 sd_enable_descr_sense(sd_ssc_t *ssc) 3076 { 3077 uchar_t *header; 3078 struct mode_control_scsi3 *ctrl_bufp; 3079 size_t buflen; 3080 size_t bd_len; 3081 int status; 3082 struct sd_lun *un; 3083 3084 ASSERT(ssc != NULL); 3085 un = ssc->ssc_un; 3086 ASSERT(un != NULL); 3087 3088 /* 3089 * Read MODE SENSE page 0xA, Control Mode Page 3090 */ 3091 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3092 sizeof (struct mode_control_scsi3); 3093 header = kmem_zalloc(buflen, KM_SLEEP); 3094 3095 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3096 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3097 3098 if (status != 0) { 3099 SD_ERROR(SD_LOG_COMMON, un, 3100 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3101 goto eds_exit; 3102 } 3103 3104 /* 3105 * Determine size of Block Descriptors in order to locate 3106 * the mode page data. ATAPI devices return 0, SCSI devices 3107 * should return MODE_BLK_DESC_LENGTH. 3108 */ 3109 bd_len = ((struct mode_header *)header)->bdesc_length; 3110 3111 /* Clear the mode data length field for MODE SELECT */ 3112 ((struct mode_header *)header)->length = 0; 3113 3114 ctrl_bufp = (struct mode_control_scsi3 *) 3115 (header + MODE_HEADER_LENGTH + bd_len); 3116 3117 /* 3118 * If the page length is smaller than the expected value, 3119 * the target device doesn't support D_SENSE. Bail out here. 3120 */ 3121 if (ctrl_bufp->mode_page.length < 3122 sizeof (struct mode_control_scsi3) - 2) { 3123 SD_ERROR(SD_LOG_COMMON, un, 3124 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3125 goto eds_exit; 3126 } 3127 3128 /* 3129 * Clear PS bit for MODE SELECT 3130 */ 3131 ctrl_bufp->mode_page.ps = 0; 3132 3133 /* 3134 * Set D_SENSE to enable descriptor sense format. 3135 */ 3136 ctrl_bufp->d_sense = 1; 3137 3138 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3139 3140 /* 3141 * Use MODE SELECT to commit the change to the D_SENSE bit 3142 */ 3143 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3144 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3145 3146 if (status != 0) { 3147 SD_INFO(SD_LOG_COMMON, un, 3148 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3149 } else { 3150 kmem_free(header, buflen); 3151 return; 3152 } 3153 3154 eds_exit: 3155 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3156 kmem_free(header, buflen); 3157 } 3158 3159 /* 3160 * Function: sd_reenable_dsense_task 3161 * 3162 * Description: Re-enable descriptor sense after device or bus reset 3163 * 3164 * Context: Executes in a taskq() thread context 3165 */ 3166 static void 3167 sd_reenable_dsense_task(void *arg) 3168 { 3169 struct sd_lun *un = arg; 3170 sd_ssc_t *ssc; 3171 3172 ASSERT(un != NULL); 3173 3174 ssc = sd_ssc_init(un); 3175 sd_enable_descr_sense(ssc); 3176 sd_ssc_fini(ssc); 3177 } 3178 #endif /* _LP64 */ 3179 3180 /* 3181 * Function: sd_set_mmc_caps 3182 * 3183 * Description: This routine determines if the device is MMC compliant and if 3184 * the device supports CDDA via a mode sense of the CDVD 3185 * capabilities mode page. Also checks if the device is a 3186 * dvdram writable device. 3187 * 3188 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3189 * structure for this target. 3190 * 3191 * Context: Kernel thread context only 3192 */ 3193 3194 static void 3195 sd_set_mmc_caps(sd_ssc_t *ssc) 3196 { 3197 struct mode_header_grp2 *sense_mhp; 3198 uchar_t *sense_page; 3199 caddr_t buf; 3200 int bd_len; 3201 int status; 3202 struct uscsi_cmd com; 3203 int rtn; 3204 uchar_t *out_data_rw, *out_data_hd; 3205 uchar_t *rqbuf_rw, *rqbuf_hd; 3206 struct sd_lun *un; 3207 3208 ASSERT(ssc != NULL); 3209 un = ssc->ssc_un; 3210 ASSERT(un != NULL); 3211 3212 /* 3213 * The flags which will be set in this function are - mmc compliant, 3214 * dvdram writable device, cdda support. Initialize them to FALSE 3215 * and if a capability is detected - it will be set to TRUE. 3216 */ 3217 un->un_f_mmc_cap = FALSE; 3218 un->un_f_dvdram_writable_device = FALSE; 3219 un->un_f_cfg_cdda = FALSE; 3220 3221 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3222 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3223 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3224 3225 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3226 3227 if (status != 0) { 3228 /* command failed; just return */ 3229 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3230 return; 3231 } 3232 /* 3233 * If the mode sense request for the CDROM CAPABILITIES 3234 * page (0x2A) succeeds the device is assumed to be MMC. 3235 */ 3236 un->un_f_mmc_cap = TRUE; 3237 3238 /* Get to the page data */ 3239 sense_mhp = (struct mode_header_grp2 *)buf; 3240 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3241 sense_mhp->bdesc_length_lo; 3242 if (bd_len > MODE_BLK_DESC_LENGTH) { 3243 /* 3244 * We did not get back the expected block descriptor 3245 * length so we cannot determine if the device supports 3246 * CDDA. However, we still indicate the device is MMC 3247 * according to the successful response to the page 3248 * 0x2A mode sense request. 3249 */ 3250 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3251 "sd_set_mmc_caps: Mode Sense returned " 3252 "invalid block descriptor length\n"); 3253 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3254 return; 3255 } 3256 3257 /* See if read CDDA is supported */ 3258 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3259 bd_len); 3260 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3261 3262 /* See if writing DVD RAM is supported. */ 3263 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3264 if (un->un_f_dvdram_writable_device == TRUE) { 3265 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3266 return; 3267 } 3268 3269 /* 3270 * If the device presents DVD or CD capabilities in the mode 3271 * page, we can return here since a RRD will not have 3272 * these capabilities. 3273 */ 3274 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3275 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3276 return; 3277 } 3278 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3279 3280 /* 3281 * If un->un_f_dvdram_writable_device is still FALSE, 3282 * check for a Removable Rigid Disk (RRD). A RRD 3283 * device is identified by the features RANDOM_WRITABLE and 3284 * HARDWARE_DEFECT_MANAGEMENT. 3285 */ 3286 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3287 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3288 3289 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3290 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3291 RANDOM_WRITABLE, SD_PATH_STANDARD); 3292 3293 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3294 3295 if (rtn != 0) { 3296 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3297 kmem_free(rqbuf_rw, SENSE_LENGTH); 3298 return; 3299 } 3300 3301 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3302 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3303 3304 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3305 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3306 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3307 3308 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3309 3310 if (rtn == 0) { 3311 /* 3312 * We have good information, check for random writable 3313 * and hardware defect features. 3314 */ 3315 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3316 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3317 un->un_f_dvdram_writable_device = TRUE; 3318 } 3319 } 3320 3321 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3322 kmem_free(rqbuf_rw, SENSE_LENGTH); 3323 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3324 kmem_free(rqbuf_hd, SENSE_LENGTH); 3325 } 3326 3327 /* 3328 * Function: sd_check_for_writable_cd 3329 * 3330 * Description: This routine determines if the media in the device is 3331 * writable or not. It uses the get configuration command (0x46) 3332 * to determine if the media is writable 3333 * 3334 * Arguments: un - driver soft state (unit) structure 3335 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3336 * chain and the normal command waitq, or 3337 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3338 * "direct" chain and bypass the normal command 3339 * waitq. 3340 * 3341 * Context: Never called at interrupt context. 3342 */ 3343 3344 static void 3345 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3346 { 3347 struct uscsi_cmd com; 3348 uchar_t *out_data; 3349 uchar_t *rqbuf; 3350 int rtn; 3351 uchar_t *out_data_rw, *out_data_hd; 3352 uchar_t *rqbuf_rw, *rqbuf_hd; 3353 struct mode_header_grp2 *sense_mhp; 3354 uchar_t *sense_page; 3355 caddr_t buf; 3356 int bd_len; 3357 int status; 3358 struct sd_lun *un; 3359 3360 ASSERT(ssc != NULL); 3361 un = ssc->ssc_un; 3362 ASSERT(un != NULL); 3363 ASSERT(mutex_owned(SD_MUTEX(un))); 3364 3365 /* 3366 * Initialize the writable media to false, if configuration info. 3367 * tells us otherwise then only we will set it. 3368 */ 3369 un->un_f_mmc_writable_media = FALSE; 3370 mutex_exit(SD_MUTEX(un)); 3371 3372 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3373 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3374 3375 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3376 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3377 3378 if (rtn != 0) 3379 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3380 3381 mutex_enter(SD_MUTEX(un)); 3382 if (rtn == 0) { 3383 /* 3384 * We have good information, check for writable DVD. 3385 */ 3386 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3387 un->un_f_mmc_writable_media = TRUE; 3388 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3389 kmem_free(rqbuf, SENSE_LENGTH); 3390 return; 3391 } 3392 } 3393 3394 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3395 kmem_free(rqbuf, SENSE_LENGTH); 3396 3397 /* 3398 * Determine if this is a RRD type device. 3399 */ 3400 mutex_exit(SD_MUTEX(un)); 3401 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3402 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3403 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3404 3405 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3406 3407 mutex_enter(SD_MUTEX(un)); 3408 if (status != 0) { 3409 /* command failed; just return */ 3410 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3411 return; 3412 } 3413 3414 /* Get to the page data */ 3415 sense_mhp = (struct mode_header_grp2 *)buf; 3416 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3417 if (bd_len > MODE_BLK_DESC_LENGTH) { 3418 /* 3419 * We did not get back the expected block descriptor length so 3420 * we cannot check the mode page. 3421 */ 3422 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3423 "sd_check_for_writable_cd: Mode Sense returned " 3424 "invalid block descriptor length\n"); 3425 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3426 return; 3427 } 3428 3429 /* 3430 * If the device presents DVD or CD capabilities in the mode 3431 * page, we can return here since a RRD device will not have 3432 * these capabilities. 3433 */ 3434 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3435 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3436 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3437 return; 3438 } 3439 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3440 3441 /* 3442 * If un->un_f_mmc_writable_media is still FALSE, 3443 * check for RRD type media. A RRD device is identified 3444 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3445 */ 3446 mutex_exit(SD_MUTEX(un)); 3447 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3448 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3449 3450 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3451 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3452 RANDOM_WRITABLE, path_flag); 3453 3454 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3455 if (rtn != 0) { 3456 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3457 kmem_free(rqbuf_rw, SENSE_LENGTH); 3458 mutex_enter(SD_MUTEX(un)); 3459 return; 3460 } 3461 3462 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3463 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3464 3465 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3466 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3467 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3468 3469 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3470 mutex_enter(SD_MUTEX(un)); 3471 if (rtn == 0) { 3472 /* 3473 * We have good information, check for random writable 3474 * and hardware defect features as current. 3475 */ 3476 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3477 (out_data_rw[10] & 0x1) && 3478 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3479 (out_data_hd[10] & 0x1)) { 3480 un->un_f_mmc_writable_media = TRUE; 3481 } 3482 } 3483 3484 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3485 kmem_free(rqbuf_rw, SENSE_LENGTH); 3486 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3487 kmem_free(rqbuf_hd, SENSE_LENGTH); 3488 } 3489 3490 /* 3491 * Function: sd_read_unit_properties 3492 * 3493 * Description: The following implements a property lookup mechanism. 3494 * Properties for particular disks (keyed on vendor, model 3495 * and rev numbers) are sought in the sd.conf file via 3496 * sd_process_sdconf_file(), and if not found there, are 3497 * looked for in a list hardcoded in this driver via 3498 * sd_process_sdconf_table() Once located the properties 3499 * are used to update the driver unit structure. 3500 * 3501 * Arguments: un - driver soft state (unit) structure 3502 */ 3503 3504 static void 3505 sd_read_unit_properties(struct sd_lun *un) 3506 { 3507 /* 3508 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3509 * the "sd-config-list" property (from the sd.conf file) or if 3510 * there was not a match for the inquiry vid/pid. If this event 3511 * occurs the static driver configuration table is searched for 3512 * a match. 3513 */ 3514 ASSERT(un != NULL); 3515 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3516 sd_process_sdconf_table(un); 3517 } 3518 3519 /* check for LSI device */ 3520 sd_is_lsi(un); 3521 3522 3523 } 3524 3525 3526 /* 3527 * Function: sd_process_sdconf_file 3528 * 3529 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3530 * driver's config file (ie, sd.conf) and update the driver 3531 * soft state structure accordingly. 3532 * 3533 * Arguments: un - driver soft state (unit) structure 3534 * 3535 * Return Code: SD_SUCCESS - The properties were successfully set according 3536 * to the driver configuration file. 3537 * SD_FAILURE - The driver config list was not obtained or 3538 * there was no vid/pid match. This indicates that 3539 * the static config table should be used. 3540 * 3541 * The config file has a property, "sd-config-list". Currently we support 3542 * two kinds of formats. For both formats, the value of this property 3543 * is a list of duplets: 3544 * 3545 * sd-config-list= 3546 * <duplet>, 3547 * [,<duplet>]*; 3548 * 3549 * For the improved format, where 3550 * 3551 * <duplet>:= "<vid+pid>","<tunable-list>" 3552 * 3553 * and 3554 * 3555 * <tunable-list>:= <tunable> [, <tunable> ]*; 3556 * <tunable> = <name> : <value> 3557 * 3558 * The <vid+pid> is the string that is returned by the target device on a 3559 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3560 * to apply to all target devices with the specified <vid+pid>. 3561 * 3562 * Each <tunable> is a "<name> : <value>" pair. 3563 * 3564 * For the old format, the structure of each duplet is as follows: 3565 * 3566 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3567 * 3568 * The first entry of the duplet is the device ID string (the concatenated 3569 * vid & pid; not to be confused with a device_id). This is defined in 3570 * the same way as in the sd_disk_table. 3571 * 3572 * The second part of the duplet is a string that identifies a 3573 * data-property-name-list. The data-property-name-list is defined as 3574 * follows: 3575 * 3576 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3577 * 3578 * The syntax of <data-property-name> depends on the <version> field. 3579 * 3580 * If version = SD_CONF_VERSION_1 we have the following syntax: 3581 * 3582 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3583 * 3584 * where the prop0 value will be used to set prop0 if bit0 set in the 3585 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3586 * 3587 */ 3588 3589 static int 3590 sd_process_sdconf_file(struct sd_lun *un) 3591 { 3592 char **config_list = NULL; 3593 uint_t nelements; 3594 char *vidptr; 3595 int vidlen; 3596 char *dnlist_ptr; 3597 char *dataname_ptr; 3598 char *dataname_lasts; 3599 int *data_list = NULL; 3600 uint_t data_list_len; 3601 int rval = SD_FAILURE; 3602 int i; 3603 3604 ASSERT(un != NULL); 3605 3606 /* Obtain the configuration list associated with the .conf file */ 3607 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3608 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3609 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3610 return (SD_FAILURE); 3611 } 3612 3613 /* 3614 * Compare vids in each duplet to the inquiry vid - if a match is 3615 * made, get the data value and update the soft state structure 3616 * accordingly. 3617 * 3618 * Each duplet should show as a pair of strings, return SD_FAILURE 3619 * otherwise. 3620 */ 3621 if (nelements & 1) { 3622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3623 "sd-config-list should show as pairs of strings.\n"); 3624 if (config_list) 3625 ddi_prop_free(config_list); 3626 return (SD_FAILURE); 3627 } 3628 3629 for (i = 0; i < nelements; i += 2) { 3630 /* 3631 * Note: The assumption here is that each vid entry is on 3632 * a unique line from its associated duplet. 3633 */ 3634 vidptr = config_list[i]; 3635 vidlen = (int)strlen(vidptr); 3636 if ((vidlen == 0) || 3637 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3638 continue; 3639 } 3640 3641 /* 3642 * dnlist contains 1 or more blank separated 3643 * data-property-name entries 3644 */ 3645 dnlist_ptr = config_list[i + 1]; 3646 3647 if (strchr(dnlist_ptr, ':') != NULL) { 3648 /* 3649 * Decode the improved format sd-config-list. 3650 */ 3651 sd_nvpair_str_decode(un, dnlist_ptr); 3652 } else { 3653 /* 3654 * The old format sd-config-list, loop through all 3655 * data-property-name entries in the 3656 * data-property-name-list 3657 * setting the properties for each. 3658 */ 3659 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3660 &dataname_lasts); dataname_ptr != NULL; 3661 dataname_ptr = sd_strtok_r(NULL, " \t", 3662 &dataname_lasts)) { 3663 int version; 3664 3665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3666 "sd_process_sdconf_file: disk:%s, " 3667 "data:%s\n", vidptr, dataname_ptr); 3668 3669 /* Get the data list */ 3670 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3671 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3672 &data_list_len) != DDI_PROP_SUCCESS) { 3673 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3674 "sd_process_sdconf_file: data " 3675 "property (%s) has no value\n", 3676 dataname_ptr); 3677 continue; 3678 } 3679 3680 version = data_list[0]; 3681 3682 if (version == SD_CONF_VERSION_1) { 3683 sd_tunables values; 3684 3685 /* Set the properties */ 3686 if (sd_chk_vers1_data(un, data_list[1], 3687 &data_list[2], data_list_len, 3688 dataname_ptr) == SD_SUCCESS) { 3689 sd_get_tunables_from_conf(un, 3690 data_list[1], &data_list[2], 3691 &values); 3692 sd_set_vers1_properties(un, 3693 data_list[1], &values); 3694 rval = SD_SUCCESS; 3695 } else { 3696 rval = SD_FAILURE; 3697 } 3698 } else { 3699 scsi_log(SD_DEVINFO(un), sd_label, 3700 CE_WARN, "data property %s version " 3701 "0x%x is invalid.", 3702 dataname_ptr, version); 3703 rval = SD_FAILURE; 3704 } 3705 if (data_list) 3706 ddi_prop_free(data_list); 3707 } 3708 } 3709 } 3710 3711 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3712 if (config_list) { 3713 ddi_prop_free(config_list); 3714 } 3715 3716 return (rval); 3717 } 3718 3719 /* 3720 * Function: sd_nvpair_str_decode() 3721 * 3722 * Description: Parse the improved format sd-config-list to get 3723 * each entry of tunable, which includes a name-value pair. 3724 * Then call sd_set_properties() to set the property. 3725 * 3726 * Arguments: un - driver soft state (unit) structure 3727 * nvpair_str - the tunable list 3728 */ 3729 static void 3730 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3731 { 3732 char *nv, *name, *value, *token; 3733 char *nv_lasts, *v_lasts, *x_lasts; 3734 3735 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3736 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3737 token = sd_strtok_r(nv, ":", &v_lasts); 3738 name = sd_strtok_r(token, " \t", &x_lasts); 3739 token = sd_strtok_r(NULL, ":", &v_lasts); 3740 value = sd_strtok_r(token, " \t", &x_lasts); 3741 if (name == NULL || value == NULL) { 3742 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3743 "sd_nvpair_str_decode: " 3744 "name or value is not valid!\n"); 3745 } else { 3746 sd_set_properties(un, name, value); 3747 } 3748 } 3749 } 3750 3751 /* 3752 * Function: sd_strtok_r() 3753 * 3754 * Description: This function uses strpbrk and strspn to break 3755 * string into tokens on sequentially subsequent calls. Return 3756 * NULL when no non-separator characters remain. The first 3757 * argument is NULL for subsequent calls. 3758 */ 3759 static char * 3760 sd_strtok_r(char *string, const char *sepset, char **lasts) 3761 { 3762 char *q, *r; 3763 3764 /* First or subsequent call */ 3765 if (string == NULL) 3766 string = *lasts; 3767 3768 if (string == NULL) 3769 return (NULL); 3770 3771 /* Skip leading separators */ 3772 q = string + strspn(string, sepset); 3773 3774 if (*q == '\0') 3775 return (NULL); 3776 3777 if ((r = strpbrk(q, sepset)) == NULL) 3778 *lasts = NULL; 3779 else { 3780 *r = '\0'; 3781 *lasts = r + 1; 3782 } 3783 return (q); 3784 } 3785 3786 /* 3787 * Function: sd_set_properties() 3788 * 3789 * Description: Set device properties based on the improved 3790 * format sd-config-list. 3791 * 3792 * Arguments: un - driver soft state (unit) structure 3793 * name - supported tunable name 3794 * value - tunable value 3795 */ 3796 static void 3797 sd_set_properties(struct sd_lun *un, char *name, char *value) 3798 { 3799 char *endptr = NULL; 3800 long val = 0; 3801 3802 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3803 if (strcasecmp(value, "true") == 0) { 3804 un->un_f_suppress_cache_flush = TRUE; 3805 } else if (strcasecmp(value, "false") == 0) { 3806 un->un_f_suppress_cache_flush = FALSE; 3807 } else { 3808 goto value_invalid; 3809 } 3810 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3811 "suppress_cache_flush flag set to %d\n", 3812 un->un_f_suppress_cache_flush); 3813 return; 3814 } 3815 3816 if (strcasecmp(name, "controller-type") == 0) { 3817 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3818 un->un_ctype = val; 3819 } else { 3820 goto value_invalid; 3821 } 3822 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3823 "ctype set to %d\n", un->un_ctype); 3824 return; 3825 } 3826 3827 if (strcasecmp(name, "delay-busy") == 0) { 3828 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3829 un->un_busy_timeout = drv_usectohz(val / 1000); 3830 } else { 3831 goto value_invalid; 3832 } 3833 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3834 "busy_timeout set to %d\n", un->un_busy_timeout); 3835 return; 3836 } 3837 3838 if (strcasecmp(name, "disksort") == 0) { 3839 if (strcasecmp(value, "true") == 0) { 3840 un->un_f_disksort_disabled = FALSE; 3841 } else if (strcasecmp(value, "false") == 0) { 3842 un->un_f_disksort_disabled = TRUE; 3843 } else { 3844 goto value_invalid; 3845 } 3846 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3847 "disksort disabled flag set to %d\n", 3848 un->un_f_disksort_disabled); 3849 return; 3850 } 3851 3852 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3853 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3854 un->un_reserve_release_time = val; 3855 } else { 3856 goto value_invalid; 3857 } 3858 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3859 "reservation release timeout set to %d\n", 3860 un->un_reserve_release_time); 3861 return; 3862 } 3863 3864 if (strcasecmp(name, "reset-lun") == 0) { 3865 if (strcasecmp(value, "true") == 0) { 3866 un->un_f_lun_reset_enabled = TRUE; 3867 } else if (strcasecmp(value, "false") == 0) { 3868 un->un_f_lun_reset_enabled = FALSE; 3869 } else { 3870 goto value_invalid; 3871 } 3872 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3873 "lun reset enabled flag set to %d\n", 3874 un->un_f_lun_reset_enabled); 3875 return; 3876 } 3877 3878 if (strcasecmp(name, "retries-busy") == 0) { 3879 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3880 un->un_busy_retry_count = val; 3881 } else { 3882 goto value_invalid; 3883 } 3884 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3885 "busy retry count set to %d\n", un->un_busy_retry_count); 3886 return; 3887 } 3888 3889 if (strcasecmp(name, "retries-timeout") == 0) { 3890 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3891 un->un_retry_count = val; 3892 } else { 3893 goto value_invalid; 3894 } 3895 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3896 "timeout retry count set to %d\n", un->un_retry_count); 3897 return; 3898 } 3899 3900 if (strcasecmp(name, "retries-notready") == 0) { 3901 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3902 un->un_notready_retry_count = val; 3903 } else { 3904 goto value_invalid; 3905 } 3906 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3907 "notready retry count set to %d\n", 3908 un->un_notready_retry_count); 3909 return; 3910 } 3911 3912 if (strcasecmp(name, "retries-reset") == 0) { 3913 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3914 un->un_reset_retry_count = val; 3915 } else { 3916 goto value_invalid; 3917 } 3918 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3919 "reset retry count set to %d\n", 3920 un->un_reset_retry_count); 3921 return; 3922 } 3923 3924 if (strcasecmp(name, "throttle-max") == 0) { 3925 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3926 un->un_saved_throttle = un->un_throttle = val; 3927 } else { 3928 goto value_invalid; 3929 } 3930 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3931 "throttle set to %d\n", un->un_throttle); 3932 } 3933 3934 if (strcasecmp(name, "throttle-min") == 0) { 3935 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3936 un->un_min_throttle = val; 3937 } else { 3938 goto value_invalid; 3939 } 3940 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3941 "min throttle set to %d\n", un->un_min_throttle); 3942 } 3943 3944 /* 3945 * Validate the throttle values. 3946 * If any of the numbers are invalid, set everything to defaults. 3947 */ 3948 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3949 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3950 (un->un_min_throttle > un->un_throttle)) { 3951 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3952 un->un_min_throttle = sd_min_throttle; 3953 } 3954 return; 3955 3956 value_invalid: 3957 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3958 "value of prop %s is invalid\n", name); 3959 } 3960 3961 /* 3962 * Function: sd_get_tunables_from_conf() 3963 * 3964 * 3965 * This function reads the data list from the sd.conf file and pulls 3966 * the values that can have numeric values as arguments and places 3967 * the values in the appropriate sd_tunables member. 3968 * Since the order of the data list members varies across platforms 3969 * This function reads them from the data list in a platform specific 3970 * order and places them into the correct sd_tunable member that is 3971 * consistent across all platforms. 3972 */ 3973 static void 3974 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3975 sd_tunables *values) 3976 { 3977 int i; 3978 int mask; 3979 3980 bzero(values, sizeof (sd_tunables)); 3981 3982 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3983 3984 mask = 1 << i; 3985 if (mask > flags) { 3986 break; 3987 } 3988 3989 switch (mask & flags) { 3990 case 0: /* This mask bit not set in flags */ 3991 continue; 3992 case SD_CONF_BSET_THROTTLE: 3993 values->sdt_throttle = data_list[i]; 3994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3995 "sd_get_tunables_from_conf: throttle = %d\n", 3996 values->sdt_throttle); 3997 break; 3998 case SD_CONF_BSET_CTYPE: 3999 values->sdt_ctype = data_list[i]; 4000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4001 "sd_get_tunables_from_conf: ctype = %d\n", 4002 values->sdt_ctype); 4003 break; 4004 case SD_CONF_BSET_NRR_COUNT: 4005 values->sdt_not_rdy_retries = data_list[i]; 4006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4007 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4008 values->sdt_not_rdy_retries); 4009 break; 4010 case SD_CONF_BSET_BSY_RETRY_COUNT: 4011 values->sdt_busy_retries = data_list[i]; 4012 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4013 "sd_get_tunables_from_conf: busy_retries = %d\n", 4014 values->sdt_busy_retries); 4015 break; 4016 case SD_CONF_BSET_RST_RETRIES: 4017 values->sdt_reset_retries = data_list[i]; 4018 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4019 "sd_get_tunables_from_conf: reset_retries = %d\n", 4020 values->sdt_reset_retries); 4021 break; 4022 case SD_CONF_BSET_RSV_REL_TIME: 4023 values->sdt_reserv_rel_time = data_list[i]; 4024 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4025 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4026 values->sdt_reserv_rel_time); 4027 break; 4028 case SD_CONF_BSET_MIN_THROTTLE: 4029 values->sdt_min_throttle = data_list[i]; 4030 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4031 "sd_get_tunables_from_conf: min_throttle = %d\n", 4032 values->sdt_min_throttle); 4033 break; 4034 case SD_CONF_BSET_DISKSORT_DISABLED: 4035 values->sdt_disk_sort_dis = data_list[i]; 4036 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4037 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4038 values->sdt_disk_sort_dis); 4039 break; 4040 case SD_CONF_BSET_LUN_RESET_ENABLED: 4041 values->sdt_lun_reset_enable = data_list[i]; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4044 "\n", values->sdt_lun_reset_enable); 4045 break; 4046 case SD_CONF_BSET_CACHE_IS_NV: 4047 values->sdt_suppress_cache_flush = data_list[i]; 4048 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4049 "sd_get_tunables_from_conf: \ 4050 suppress_cache_flush = %d" 4051 "\n", values->sdt_suppress_cache_flush); 4052 break; 4053 } 4054 } 4055 } 4056 4057 /* 4058 * Function: sd_process_sdconf_table 4059 * 4060 * Description: Search the static configuration table for a match on the 4061 * inquiry vid/pid and update the driver soft state structure 4062 * according to the table property values for the device. 4063 * 4064 * The form of a configuration table entry is: 4065 * <vid+pid>,<flags>,<property-data> 4066 * "SEAGATE ST42400N",1,0x40000, 4067 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4068 * 4069 * Arguments: un - driver soft state (unit) structure 4070 */ 4071 4072 static void 4073 sd_process_sdconf_table(struct sd_lun *un) 4074 { 4075 char *id = NULL; 4076 int table_index; 4077 int idlen; 4078 4079 ASSERT(un != NULL); 4080 for (table_index = 0; table_index < sd_disk_table_size; 4081 table_index++) { 4082 id = sd_disk_table[table_index].device_id; 4083 idlen = strlen(id); 4084 if (idlen == 0) { 4085 continue; 4086 } 4087 4088 /* 4089 * The static configuration table currently does not 4090 * implement version 10 properties. Additionally, 4091 * multiple data-property-name entries are not 4092 * implemented in the static configuration table. 4093 */ 4094 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_process_sdconf_table: disk %s\n", id); 4097 sd_set_vers1_properties(un, 4098 sd_disk_table[table_index].flags, 4099 sd_disk_table[table_index].properties); 4100 break; 4101 } 4102 } 4103 } 4104 4105 4106 /* 4107 * Function: sd_sdconf_id_match 4108 * 4109 * Description: This local function implements a case sensitive vid/pid 4110 * comparison as well as the boundary cases of wild card and 4111 * multiple blanks. 4112 * 4113 * Note: An implicit assumption made here is that the scsi 4114 * inquiry structure will always keep the vid, pid and 4115 * revision strings in consecutive sequence, so they can be 4116 * read as a single string. If this assumption is not the 4117 * case, a separate string, to be used for the check, needs 4118 * to be built with these strings concatenated. 4119 * 4120 * Arguments: un - driver soft state (unit) structure 4121 * id - table or config file vid/pid 4122 * idlen - length of the vid/pid (bytes) 4123 * 4124 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4125 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4126 */ 4127 4128 static int 4129 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4130 { 4131 struct scsi_inquiry *sd_inq; 4132 int rval = SD_SUCCESS; 4133 4134 ASSERT(un != NULL); 4135 sd_inq = un->un_sd->sd_inq; 4136 ASSERT(id != NULL); 4137 4138 /* 4139 * We use the inq_vid as a pointer to a buffer containing the 4140 * vid and pid and use the entire vid/pid length of the table 4141 * entry for the comparison. This works because the inq_pid 4142 * data member follows inq_vid in the scsi_inquiry structure. 4143 */ 4144 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4145 /* 4146 * The user id string is compared to the inquiry vid/pid 4147 * using a case insensitive comparison and ignoring 4148 * multiple spaces. 4149 */ 4150 rval = sd_blank_cmp(un, id, idlen); 4151 if (rval != SD_SUCCESS) { 4152 /* 4153 * User id strings that start and end with a "*" 4154 * are a special case. These do not have a 4155 * specific vendor, and the product string can 4156 * appear anywhere in the 16 byte PID portion of 4157 * the inquiry data. This is a simple strstr() 4158 * type search for the user id in the inquiry data. 4159 */ 4160 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4161 char *pidptr = &id[1]; 4162 int i; 4163 int j; 4164 int pidstrlen = idlen - 2; 4165 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4166 pidstrlen; 4167 4168 if (j < 0) { 4169 return (SD_FAILURE); 4170 } 4171 for (i = 0; i < j; i++) { 4172 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4173 pidptr, pidstrlen) == 0) { 4174 rval = SD_SUCCESS; 4175 break; 4176 } 4177 } 4178 } 4179 } 4180 } 4181 return (rval); 4182 } 4183 4184 4185 /* 4186 * Function: sd_blank_cmp 4187 * 4188 * Description: If the id string starts and ends with a space, treat 4189 * multiple consecutive spaces as equivalent to a single 4190 * space. For example, this causes a sd_disk_table entry 4191 * of " NEC CDROM " to match a device's id string of 4192 * "NEC CDROM". 4193 * 4194 * Note: The success exit condition for this routine is if 4195 * the pointer to the table entry is '\0' and the cnt of 4196 * the inquiry length is zero. This will happen if the inquiry 4197 * string returned by the device is padded with spaces to be 4198 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4199 * SCSI spec states that the inquiry string is to be padded with 4200 * spaces. 4201 * 4202 * Arguments: un - driver soft state (unit) structure 4203 * id - table or config file vid/pid 4204 * idlen - length of the vid/pid (bytes) 4205 * 4206 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4207 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4208 */ 4209 4210 static int 4211 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4212 { 4213 char *p1; 4214 char *p2; 4215 int cnt; 4216 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4217 sizeof (SD_INQUIRY(un)->inq_pid); 4218 4219 ASSERT(un != NULL); 4220 p2 = un->un_sd->sd_inq->inq_vid; 4221 ASSERT(id != NULL); 4222 p1 = id; 4223 4224 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4225 /* 4226 * Note: string p1 is terminated by a NUL but string p2 4227 * isn't. The end of p2 is determined by cnt. 4228 */ 4229 for (;;) { 4230 /* skip over any extra blanks in both strings */ 4231 while ((*p1 != '\0') && (*p1 == ' ')) { 4232 p1++; 4233 } 4234 while ((cnt != 0) && (*p2 == ' ')) { 4235 p2++; 4236 cnt--; 4237 } 4238 4239 /* compare the two strings */ 4240 if ((cnt == 0) || 4241 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4242 break; 4243 } 4244 while ((cnt > 0) && 4245 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4246 p1++; 4247 p2++; 4248 cnt--; 4249 } 4250 } 4251 } 4252 4253 /* return SD_SUCCESS if both strings match */ 4254 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4255 } 4256 4257 4258 /* 4259 * Function: sd_chk_vers1_data 4260 * 4261 * Description: Verify the version 1 device properties provided by the 4262 * user via the configuration file 4263 * 4264 * Arguments: un - driver soft state (unit) structure 4265 * flags - integer mask indicating properties to be set 4266 * prop_list - integer list of property values 4267 * list_len - number of the elements 4268 * 4269 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4270 * SD_FAILURE - Indicates the user provided data is invalid 4271 */ 4272 4273 static int 4274 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4275 int list_len, char *dataname_ptr) 4276 { 4277 int i; 4278 int mask = 1; 4279 int index = 0; 4280 4281 ASSERT(un != NULL); 4282 4283 /* Check for a NULL property name and list */ 4284 if (dataname_ptr == NULL) { 4285 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4286 "sd_chk_vers1_data: NULL data property name."); 4287 return (SD_FAILURE); 4288 } 4289 if (prop_list == NULL) { 4290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4291 "sd_chk_vers1_data: %s NULL data property list.", 4292 dataname_ptr); 4293 return (SD_FAILURE); 4294 } 4295 4296 /* Display a warning if undefined bits are set in the flags */ 4297 if (flags & ~SD_CONF_BIT_MASK) { 4298 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4299 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4300 "Properties not set.", 4301 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4302 return (SD_FAILURE); 4303 } 4304 4305 /* 4306 * Verify the length of the list by identifying the highest bit set 4307 * in the flags and validating that the property list has a length 4308 * up to the index of this bit. 4309 */ 4310 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4311 if (flags & mask) { 4312 index++; 4313 } 4314 mask = 1 << i; 4315 } 4316 if (list_len < (index + 2)) { 4317 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4318 "sd_chk_vers1_data: " 4319 "Data property list %s size is incorrect. " 4320 "Properties not set.", dataname_ptr); 4321 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4322 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4323 return (SD_FAILURE); 4324 } 4325 return (SD_SUCCESS); 4326 } 4327 4328 4329 /* 4330 * Function: sd_set_vers1_properties 4331 * 4332 * Description: Set version 1 device properties based on a property list 4333 * retrieved from the driver configuration file or static 4334 * configuration table. Version 1 properties have the format: 4335 * 4336 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4337 * 4338 * where the prop0 value will be used to set prop0 if bit0 4339 * is set in the flags 4340 * 4341 * Arguments: un - driver soft state (unit) structure 4342 * flags - integer mask indicating properties to be set 4343 * prop_list - integer list of property values 4344 */ 4345 4346 static void 4347 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4348 { 4349 ASSERT(un != NULL); 4350 4351 /* 4352 * Set the flag to indicate cache is to be disabled. An attempt 4353 * to disable the cache via sd_cache_control() will be made 4354 * later during attach once the basic initialization is complete. 4355 */ 4356 if (flags & SD_CONF_BSET_NOCACHE) { 4357 un->un_f_opt_disable_cache = TRUE; 4358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4359 "sd_set_vers1_properties: caching disabled flag set\n"); 4360 } 4361 4362 /* CD-specific configuration parameters */ 4363 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4364 un->un_f_cfg_playmsf_bcd = TRUE; 4365 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4366 "sd_set_vers1_properties: playmsf_bcd set\n"); 4367 } 4368 if (flags & SD_CONF_BSET_READSUB_BCD) { 4369 un->un_f_cfg_readsub_bcd = TRUE; 4370 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4371 "sd_set_vers1_properties: readsub_bcd set\n"); 4372 } 4373 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4374 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4375 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4376 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4377 } 4378 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4379 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4380 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4381 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4382 } 4383 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4384 un->un_f_cfg_no_read_header = TRUE; 4385 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4386 "sd_set_vers1_properties: no_read_header set\n"); 4387 } 4388 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4389 un->un_f_cfg_read_cd_xd4 = TRUE; 4390 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4391 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4392 } 4393 4394 /* Support for devices which do not have valid/unique serial numbers */ 4395 if (flags & SD_CONF_BSET_FAB_DEVID) { 4396 un->un_f_opt_fab_devid = TRUE; 4397 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4398 "sd_set_vers1_properties: fab_devid bit set\n"); 4399 } 4400 4401 /* Support for user throttle configuration */ 4402 if (flags & SD_CONF_BSET_THROTTLE) { 4403 ASSERT(prop_list != NULL); 4404 un->un_saved_throttle = un->un_throttle = 4405 prop_list->sdt_throttle; 4406 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4407 "sd_set_vers1_properties: throttle set to %d\n", 4408 prop_list->sdt_throttle); 4409 } 4410 4411 /* Set the per disk retry count according to the conf file or table. */ 4412 if (flags & SD_CONF_BSET_NRR_COUNT) { 4413 ASSERT(prop_list != NULL); 4414 if (prop_list->sdt_not_rdy_retries) { 4415 un->un_notready_retry_count = 4416 prop_list->sdt_not_rdy_retries; 4417 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4418 "sd_set_vers1_properties: not ready retry count" 4419 " set to %d\n", un->un_notready_retry_count); 4420 } 4421 } 4422 4423 /* The controller type is reported for generic disk driver ioctls */ 4424 if (flags & SD_CONF_BSET_CTYPE) { 4425 ASSERT(prop_list != NULL); 4426 switch (prop_list->sdt_ctype) { 4427 case CTYPE_CDROM: 4428 un->un_ctype = prop_list->sdt_ctype; 4429 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4430 "sd_set_vers1_properties: ctype set to " 4431 "CTYPE_CDROM\n"); 4432 break; 4433 case CTYPE_CCS: 4434 un->un_ctype = prop_list->sdt_ctype; 4435 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4436 "sd_set_vers1_properties: ctype set to " 4437 "CTYPE_CCS\n"); 4438 break; 4439 case CTYPE_ROD: /* RW optical */ 4440 un->un_ctype = prop_list->sdt_ctype; 4441 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4442 "sd_set_vers1_properties: ctype set to " 4443 "CTYPE_ROD\n"); 4444 break; 4445 default: 4446 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4447 "sd_set_vers1_properties: Could not set " 4448 "invalid ctype value (%d)", 4449 prop_list->sdt_ctype); 4450 } 4451 } 4452 4453 /* Purple failover timeout */ 4454 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4455 ASSERT(prop_list != NULL); 4456 un->un_busy_retry_count = 4457 prop_list->sdt_busy_retries; 4458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4459 "sd_set_vers1_properties: " 4460 "busy retry count set to %d\n", 4461 un->un_busy_retry_count); 4462 } 4463 4464 /* Purple reset retry count */ 4465 if (flags & SD_CONF_BSET_RST_RETRIES) { 4466 ASSERT(prop_list != NULL); 4467 un->un_reset_retry_count = 4468 prop_list->sdt_reset_retries; 4469 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4470 "sd_set_vers1_properties: " 4471 "reset retry count set to %d\n", 4472 un->un_reset_retry_count); 4473 } 4474 4475 /* Purple reservation release timeout */ 4476 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4477 ASSERT(prop_list != NULL); 4478 un->un_reserve_release_time = 4479 prop_list->sdt_reserv_rel_time; 4480 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4481 "sd_set_vers1_properties: " 4482 "reservation release timeout set to %d\n", 4483 un->un_reserve_release_time); 4484 } 4485 4486 /* 4487 * Driver flag telling the driver to verify that no commands are pending 4488 * for a device before issuing a Test Unit Ready. This is a workaround 4489 * for a firmware bug in some Seagate eliteI drives. 4490 */ 4491 if (flags & SD_CONF_BSET_TUR_CHECK) { 4492 un->un_f_cfg_tur_check = TRUE; 4493 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4494 "sd_set_vers1_properties: tur queue check set\n"); 4495 } 4496 4497 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4498 un->un_min_throttle = prop_list->sdt_min_throttle; 4499 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4500 "sd_set_vers1_properties: min throttle set to %d\n", 4501 un->un_min_throttle); 4502 } 4503 4504 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4505 un->un_f_disksort_disabled = 4506 (prop_list->sdt_disk_sort_dis != 0) ? 4507 TRUE : FALSE; 4508 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4509 "sd_set_vers1_properties: disksort disabled " 4510 "flag set to %d\n", 4511 prop_list->sdt_disk_sort_dis); 4512 } 4513 4514 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4515 un->un_f_lun_reset_enabled = 4516 (prop_list->sdt_lun_reset_enable != 0) ? 4517 TRUE : FALSE; 4518 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4519 "sd_set_vers1_properties: lun reset enabled " 4520 "flag set to %d\n", 4521 prop_list->sdt_lun_reset_enable); 4522 } 4523 4524 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4525 un->un_f_suppress_cache_flush = 4526 (prop_list->sdt_suppress_cache_flush != 0) ? 4527 TRUE : FALSE; 4528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4529 "sd_set_vers1_properties: suppress_cache_flush " 4530 "flag set to %d\n", 4531 prop_list->sdt_suppress_cache_flush); 4532 } 4533 4534 /* 4535 * Validate the throttle values. 4536 * If any of the numbers are invalid, set everything to defaults. 4537 */ 4538 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4539 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4540 (un->un_min_throttle > un->un_throttle)) { 4541 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4542 un->un_min_throttle = sd_min_throttle; 4543 } 4544 } 4545 4546 /* 4547 * Function: sd_is_lsi() 4548 * 4549 * Description: Check for lsi devices, step through the static device 4550 * table to match vid/pid. 4551 * 4552 * Args: un - ptr to sd_lun 4553 * 4554 * Notes: When creating new LSI property, need to add the new LSI property 4555 * to this function. 4556 */ 4557 static void 4558 sd_is_lsi(struct sd_lun *un) 4559 { 4560 char *id = NULL; 4561 int table_index; 4562 int idlen; 4563 void *prop; 4564 4565 ASSERT(un != NULL); 4566 for (table_index = 0; table_index < sd_disk_table_size; 4567 table_index++) { 4568 id = sd_disk_table[table_index].device_id; 4569 idlen = strlen(id); 4570 if (idlen == 0) { 4571 continue; 4572 } 4573 4574 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4575 prop = sd_disk_table[table_index].properties; 4576 if (prop == &lsi_properties || 4577 prop == &lsi_oem_properties || 4578 prop == &lsi_properties_scsi || 4579 prop == &symbios_properties) { 4580 un->un_f_cfg_is_lsi = TRUE; 4581 } 4582 break; 4583 } 4584 } 4585 } 4586 4587 /* 4588 * Function: sd_get_physical_geometry 4589 * 4590 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4591 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4592 * target, and use this information to initialize the physical 4593 * geometry cache specified by pgeom_p. 4594 * 4595 * MODE SENSE is an optional command, so failure in this case 4596 * does not necessarily denote an error. We want to use the 4597 * MODE SENSE commands to derive the physical geometry of the 4598 * device, but if either command fails, the logical geometry is 4599 * used as the fallback for disk label geometry in cmlb. 4600 * 4601 * This requires that un->un_blockcount and un->un_tgt_blocksize 4602 * have already been initialized for the current target and 4603 * that the current values be passed as args so that we don't 4604 * end up ever trying to use -1 as a valid value. This could 4605 * happen if either value is reset while we're not holding 4606 * the mutex. 4607 * 4608 * Arguments: un - driver soft state (unit) structure 4609 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4610 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4611 * to use the USCSI "direct" chain and bypass the normal 4612 * command waitq. 4613 * 4614 * Context: Kernel thread only (can sleep). 4615 */ 4616 4617 static int 4618 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4619 diskaddr_t capacity, int lbasize, int path_flag) 4620 { 4621 struct mode_format *page3p; 4622 struct mode_geometry *page4p; 4623 struct mode_header *headerp; 4624 int sector_size; 4625 int nsect; 4626 int nhead; 4627 int ncyl; 4628 int intrlv; 4629 int spc; 4630 diskaddr_t modesense_capacity; 4631 int rpm; 4632 int bd_len; 4633 int mode_header_length; 4634 uchar_t *p3bufp; 4635 uchar_t *p4bufp; 4636 int cdbsize; 4637 int ret = EIO; 4638 sd_ssc_t *ssc; 4639 int status; 4640 4641 ASSERT(un != NULL); 4642 4643 if (lbasize == 0) { 4644 if (ISCD(un)) { 4645 lbasize = 2048; 4646 } else { 4647 lbasize = un->un_sys_blocksize; 4648 } 4649 } 4650 pgeom_p->g_secsize = (unsigned short)lbasize; 4651 4652 /* 4653 * If the unit is a cd/dvd drive MODE SENSE page three 4654 * and MODE SENSE page four are reserved (see SBC spec 4655 * and MMC spec). To prevent soft errors just return 4656 * using the default LBA size. 4657 */ 4658 if (ISCD(un)) 4659 return (ret); 4660 4661 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4662 4663 /* 4664 * Retrieve MODE SENSE page 3 - Format Device Page 4665 */ 4666 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4667 ssc = sd_ssc_init(un); 4668 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4669 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4670 if (status != 0) { 4671 SD_ERROR(SD_LOG_COMMON, un, 4672 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4673 goto page3_exit; 4674 } 4675 4676 /* 4677 * Determine size of Block Descriptors in order to locate the mode 4678 * page data. ATAPI devices return 0, SCSI devices should return 4679 * MODE_BLK_DESC_LENGTH. 4680 */ 4681 headerp = (struct mode_header *)p3bufp; 4682 if (un->un_f_cfg_is_atapi == TRUE) { 4683 struct mode_header_grp2 *mhp = 4684 (struct mode_header_grp2 *)headerp; 4685 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4686 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4687 } else { 4688 mode_header_length = MODE_HEADER_LENGTH; 4689 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4690 } 4691 4692 if (bd_len > MODE_BLK_DESC_LENGTH) { 4693 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4694 "received unexpected bd_len of %d, page3\n", bd_len); 4695 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4696 "sd_get_physical_geometry: received unexpected " 4697 "bd_len of %d, page3", bd_len); 4698 status = EIO; 4699 goto page3_exit; 4700 } 4701 4702 page3p = (struct mode_format *) 4703 ((caddr_t)headerp + mode_header_length + bd_len); 4704 4705 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4706 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4707 "mode sense pg3 code mismatch %d\n", 4708 page3p->mode_page.code); 4709 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4710 "sd_get_physical_geometry: mode sense pg3 code " 4711 "mismatch %d", page3p->mode_page.code); 4712 status = EIO; 4713 goto page3_exit; 4714 } 4715 4716 /* 4717 * Use this physical geometry data only if BOTH MODE SENSE commands 4718 * complete successfully; otherwise, revert to the logical geometry. 4719 * So, we need to save everything in temporary variables. 4720 */ 4721 sector_size = BE_16(page3p->data_bytes_sect); 4722 4723 /* 4724 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4725 */ 4726 if (sector_size == 0) { 4727 sector_size = un->un_sys_blocksize; 4728 } else { 4729 sector_size &= ~(un->un_sys_blocksize - 1); 4730 } 4731 4732 nsect = BE_16(page3p->sect_track); 4733 intrlv = BE_16(page3p->interleave); 4734 4735 SD_INFO(SD_LOG_COMMON, un, 4736 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4737 SD_INFO(SD_LOG_COMMON, un, 4738 " mode page: %d; nsect: %d; sector size: %d;\n", 4739 page3p->mode_page.code, nsect, sector_size); 4740 SD_INFO(SD_LOG_COMMON, un, 4741 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4742 BE_16(page3p->track_skew), 4743 BE_16(page3p->cylinder_skew)); 4744 4745 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4746 4747 /* 4748 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4749 */ 4750 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4751 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4752 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4753 if (status != 0) { 4754 SD_ERROR(SD_LOG_COMMON, un, 4755 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4756 goto page4_exit; 4757 } 4758 4759 /* 4760 * Determine size of Block Descriptors in order to locate the mode 4761 * page data. ATAPI devices return 0, SCSI devices should return 4762 * MODE_BLK_DESC_LENGTH. 4763 */ 4764 headerp = (struct mode_header *)p4bufp; 4765 if (un->un_f_cfg_is_atapi == TRUE) { 4766 struct mode_header_grp2 *mhp = 4767 (struct mode_header_grp2 *)headerp; 4768 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4769 } else { 4770 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4771 } 4772 4773 if (bd_len > MODE_BLK_DESC_LENGTH) { 4774 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4775 "received unexpected bd_len of %d, page4\n", bd_len); 4776 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4777 "sd_get_physical_geometry: received unexpected " 4778 "bd_len of %d, page4", bd_len); 4779 status = EIO; 4780 goto page4_exit; 4781 } 4782 4783 page4p = (struct mode_geometry *) 4784 ((caddr_t)headerp + mode_header_length + bd_len); 4785 4786 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4787 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4788 "mode sense pg4 code mismatch %d\n", 4789 page4p->mode_page.code); 4790 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4791 "sd_get_physical_geometry: mode sense pg4 code " 4792 "mismatch %d", page4p->mode_page.code); 4793 status = EIO; 4794 goto page4_exit; 4795 } 4796 4797 /* 4798 * Stash the data now, after we know that both commands completed. 4799 */ 4800 4801 4802 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4803 spc = nhead * nsect; 4804 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4805 rpm = BE_16(page4p->rpm); 4806 4807 modesense_capacity = spc * ncyl; 4808 4809 SD_INFO(SD_LOG_COMMON, un, 4810 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4811 SD_INFO(SD_LOG_COMMON, un, 4812 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4813 SD_INFO(SD_LOG_COMMON, un, 4814 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4815 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4816 (void *)pgeom_p, capacity); 4817 4818 /* 4819 * Compensate if the drive's geometry is not rectangular, i.e., 4820 * the product of C * H * S returned by MODE SENSE >= that returned 4821 * by read capacity. This is an idiosyncrasy of the original x86 4822 * disk subsystem. 4823 */ 4824 if (modesense_capacity >= capacity) { 4825 SD_INFO(SD_LOG_COMMON, un, 4826 "sd_get_physical_geometry: adjusting acyl; " 4827 "old: %d; new: %d\n", pgeom_p->g_acyl, 4828 (modesense_capacity - capacity + spc - 1) / spc); 4829 if (sector_size != 0) { 4830 /* 1243403: NEC D38x7 drives don't support sec size */ 4831 pgeom_p->g_secsize = (unsigned short)sector_size; 4832 } 4833 pgeom_p->g_nsect = (unsigned short)nsect; 4834 pgeom_p->g_nhead = (unsigned short)nhead; 4835 pgeom_p->g_capacity = capacity; 4836 pgeom_p->g_acyl = 4837 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4838 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4839 } 4840 4841 pgeom_p->g_rpm = (unsigned short)rpm; 4842 pgeom_p->g_intrlv = (unsigned short)intrlv; 4843 ret = 0; 4844 4845 SD_INFO(SD_LOG_COMMON, un, 4846 "sd_get_physical_geometry: mode sense geometry:\n"); 4847 SD_INFO(SD_LOG_COMMON, un, 4848 " nsect: %d; sector size: %d; interlv: %d\n", 4849 nsect, sector_size, intrlv); 4850 SD_INFO(SD_LOG_COMMON, un, 4851 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4852 nhead, ncyl, rpm, modesense_capacity); 4853 SD_INFO(SD_LOG_COMMON, un, 4854 "sd_get_physical_geometry: (cached)\n"); 4855 SD_INFO(SD_LOG_COMMON, un, 4856 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4857 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4858 pgeom_p->g_nhead, pgeom_p->g_nsect); 4859 SD_INFO(SD_LOG_COMMON, un, 4860 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4861 pgeom_p->g_secsize, pgeom_p->g_capacity, 4862 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4863 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4864 4865 page4_exit: 4866 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4867 4868 page3_exit: 4869 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4870 4871 if (status != 0) { 4872 if (status == EIO) { 4873 /* 4874 * Some disks do not support mode sense(6), we 4875 * should ignore this kind of error(sense key is 4876 * 0x5 - illegal request). 4877 */ 4878 uint8_t *sensep; 4879 int senlen; 4880 4881 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4882 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4883 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4884 4885 if (senlen > 0 && 4886 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4887 sd_ssc_assessment(ssc, 4888 SD_FMT_IGNORE_COMPROMISE); 4889 } else { 4890 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4891 } 4892 } else { 4893 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4894 } 4895 } 4896 sd_ssc_fini(ssc); 4897 return (ret); 4898 } 4899 4900 /* 4901 * Function: sd_get_virtual_geometry 4902 * 4903 * Description: Ask the controller to tell us about the target device. 4904 * 4905 * Arguments: un - pointer to softstate 4906 * capacity - disk capacity in #blocks 4907 * lbasize - disk block size in bytes 4908 * 4909 * Context: Kernel thread only 4910 */ 4911 4912 static int 4913 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4914 diskaddr_t capacity, int lbasize) 4915 { 4916 uint_t geombuf; 4917 int spc; 4918 4919 ASSERT(un != NULL); 4920 4921 /* Set sector size, and total number of sectors */ 4922 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4923 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4924 4925 /* Let the HBA tell us its geometry */ 4926 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4927 4928 /* A value of -1 indicates an undefined "geometry" property */ 4929 if (geombuf == (-1)) { 4930 return (EINVAL); 4931 } 4932 4933 /* Initialize the logical geometry cache. */ 4934 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4935 lgeom_p->g_nsect = geombuf & 0xffff; 4936 lgeom_p->g_secsize = un->un_sys_blocksize; 4937 4938 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4939 4940 /* 4941 * Note: The driver originally converted the capacity value from 4942 * target blocks to system blocks. However, the capacity value passed 4943 * to this routine is already in terms of system blocks (this scaling 4944 * is done when the READ CAPACITY command is issued and processed). 4945 * This 'error' may have gone undetected because the usage of g_ncyl 4946 * (which is based upon g_capacity) is very limited within the driver 4947 */ 4948 lgeom_p->g_capacity = capacity; 4949 4950 /* 4951 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4952 * hba may return zero values if the device has been removed. 4953 */ 4954 if (spc == 0) { 4955 lgeom_p->g_ncyl = 0; 4956 } else { 4957 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4958 } 4959 lgeom_p->g_acyl = 0; 4960 4961 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4962 return (0); 4963 4964 } 4965 /* 4966 * Function: sd_update_block_info 4967 * 4968 * Description: Calculate a byte count to sector count bitshift value 4969 * from sector size. 4970 * 4971 * Arguments: un: unit struct. 4972 * lbasize: new target sector size 4973 * capacity: new target capacity, ie. block count 4974 * 4975 * Context: Kernel thread context 4976 */ 4977 4978 static void 4979 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4980 { 4981 if (lbasize != 0) { 4982 un->un_tgt_blocksize = lbasize; 4983 un->un_f_tgt_blocksize_is_valid = TRUE; 4984 } 4985 4986 if (capacity != 0) { 4987 un->un_blockcount = capacity; 4988 un->un_f_blockcount_is_valid = TRUE; 4989 } 4990 } 4991 4992 4993 /* 4994 * Function: sd_register_devid 4995 * 4996 * Description: This routine will obtain the device id information from the 4997 * target, obtain the serial number, and register the device 4998 * id with the ddi framework. 4999 * 5000 * Arguments: devi - the system's dev_info_t for the device. 5001 * un - driver soft state (unit) structure 5002 * reservation_flag - indicates if a reservation conflict 5003 * occurred during attach 5004 * 5005 * Context: Kernel Thread 5006 */ 5007 static void 5008 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5009 { 5010 int rval = 0; 5011 uchar_t *inq80 = NULL; 5012 size_t inq80_len = MAX_INQUIRY_SIZE; 5013 size_t inq80_resid = 0; 5014 uchar_t *inq83 = NULL; 5015 size_t inq83_len = MAX_INQUIRY_SIZE; 5016 size_t inq83_resid = 0; 5017 int dlen, len; 5018 char *sn; 5019 struct sd_lun *un; 5020 5021 ASSERT(ssc != NULL); 5022 un = ssc->ssc_un; 5023 ASSERT(un != NULL); 5024 ASSERT(mutex_owned(SD_MUTEX(un))); 5025 ASSERT((SD_DEVINFO(un)) == devi); 5026 5027 /* 5028 * If transport has already registered a devid for this target 5029 * then that takes precedence over the driver's determination 5030 * of the devid. 5031 */ 5032 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5033 ASSERT(un->un_devid); 5034 return; /* use devid registered by the transport */ 5035 } 5036 5037 /* 5038 * This is the case of antiquated Sun disk drives that have the 5039 * FAB_DEVID property set in the disk_table. These drives 5040 * manage the devid's by storing them in last 2 available sectors 5041 * on the drive and have them fabricated by the ddi layer by calling 5042 * ddi_devid_init and passing the DEVID_FAB flag. 5043 */ 5044 if (un->un_f_opt_fab_devid == TRUE) { 5045 /* 5046 * Depending on EINVAL isn't reliable, since a reserved disk 5047 * may result in invalid geometry, so check to make sure a 5048 * reservation conflict did not occur during attach. 5049 */ 5050 if ((sd_get_devid(ssc) == EINVAL) && 5051 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5052 /* 5053 * The devid is invalid AND there is no reservation 5054 * conflict. Fabricate a new devid. 5055 */ 5056 (void) sd_create_devid(ssc); 5057 } 5058 5059 /* Register the devid if it exists */ 5060 if (un->un_devid != NULL) { 5061 (void) ddi_devid_register(SD_DEVINFO(un), 5062 un->un_devid); 5063 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5064 "sd_register_devid: Devid Fabricated\n"); 5065 } 5066 return; 5067 } 5068 5069 /* 5070 * We check the availability of the World Wide Name (0x83) and Unit 5071 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5072 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5073 * 0x83 is available, that is the best choice. Our next choice is 5074 * 0x80. If neither are available, we munge the devid from the device 5075 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5076 * to fabricate a devid for non-Sun qualified disks. 5077 */ 5078 if (sd_check_vpd_page_support(ssc) == 0) { 5079 /* collect page 80 data if available */ 5080 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5081 5082 mutex_exit(SD_MUTEX(un)); 5083 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5084 5085 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5086 0x01, 0x80, &inq80_resid); 5087 5088 if (rval != 0) { 5089 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5090 kmem_free(inq80, inq80_len); 5091 inq80 = NULL; 5092 inq80_len = 0; 5093 } else if (ddi_prop_exists( 5094 DDI_DEV_T_NONE, SD_DEVINFO(un), 5095 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5096 INQUIRY_SERIAL_NO) == 0) { 5097 /* 5098 * If we don't already have a serial number 5099 * property, do quick verify of data returned 5100 * and define property. 5101 */ 5102 dlen = inq80_len - inq80_resid; 5103 len = (size_t)inq80[3]; 5104 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5105 /* 5106 * Ensure sn termination, skip leading 5107 * blanks, and create property 5108 * 'inquiry-serial-no'. 5109 */ 5110 sn = (char *)&inq80[4]; 5111 sn[len] = 0; 5112 while (*sn && (*sn == ' ')) 5113 sn++; 5114 if (*sn) { 5115 (void) ddi_prop_update_string( 5116 DDI_DEV_T_NONE, 5117 SD_DEVINFO(un), 5118 INQUIRY_SERIAL_NO, sn); 5119 } 5120 } 5121 } 5122 mutex_enter(SD_MUTEX(un)); 5123 } 5124 5125 /* collect page 83 data if available */ 5126 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5127 mutex_exit(SD_MUTEX(un)); 5128 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5129 5130 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5131 0x01, 0x83, &inq83_resid); 5132 5133 if (rval != 0) { 5134 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5135 kmem_free(inq83, inq83_len); 5136 inq83 = NULL; 5137 inq83_len = 0; 5138 } 5139 mutex_enter(SD_MUTEX(un)); 5140 } 5141 } 5142 5143 /* encode best devid possible based on data available */ 5144 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5145 (char *)ddi_driver_name(SD_DEVINFO(un)), 5146 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5147 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5148 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5149 5150 /* devid successfully encoded, register devid */ 5151 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5152 5153 } else { 5154 /* 5155 * Unable to encode a devid based on data available. 5156 * This is not a Sun qualified disk. Older Sun disk 5157 * drives that have the SD_FAB_DEVID property 5158 * set in the disk_table and non Sun qualified 5159 * disks are treated in the same manner. These 5160 * drives manage the devid's by storing them in 5161 * last 2 available sectors on the drive and 5162 * have them fabricated by the ddi layer by 5163 * calling ddi_devid_init and passing the 5164 * DEVID_FAB flag. 5165 * Create a fabricate devid only if there's no 5166 * fabricate devid existed. 5167 */ 5168 if (sd_get_devid(ssc) == EINVAL) { 5169 (void) sd_create_devid(ssc); 5170 } 5171 un->un_f_opt_fab_devid = TRUE; 5172 5173 /* Register the devid if it exists */ 5174 if (un->un_devid != NULL) { 5175 (void) ddi_devid_register(SD_DEVINFO(un), 5176 un->un_devid); 5177 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5178 "sd_register_devid: devid fabricated using " 5179 "ddi framework\n"); 5180 } 5181 } 5182 5183 /* clean up resources */ 5184 if (inq80 != NULL) { 5185 kmem_free(inq80, inq80_len); 5186 } 5187 if (inq83 != NULL) { 5188 kmem_free(inq83, inq83_len); 5189 } 5190 } 5191 5192 5193 5194 /* 5195 * Function: sd_get_devid 5196 * 5197 * Description: This routine will return 0 if a valid device id has been 5198 * obtained from the target and stored in the soft state. If a 5199 * valid device id has not been previously read and stored, a 5200 * read attempt will be made. 5201 * 5202 * Arguments: un - driver soft state (unit) structure 5203 * 5204 * Return Code: 0 if we successfully get the device id 5205 * 5206 * Context: Kernel Thread 5207 */ 5208 5209 static int 5210 sd_get_devid(sd_ssc_t *ssc) 5211 { 5212 struct dk_devid *dkdevid; 5213 ddi_devid_t tmpid; 5214 uint_t *ip; 5215 size_t sz; 5216 diskaddr_t blk; 5217 int status; 5218 int chksum; 5219 int i; 5220 size_t buffer_size; 5221 struct sd_lun *un; 5222 5223 ASSERT(ssc != NULL); 5224 un = ssc->ssc_un; 5225 ASSERT(un != NULL); 5226 ASSERT(mutex_owned(SD_MUTEX(un))); 5227 5228 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5229 un); 5230 5231 if (un->un_devid != NULL) { 5232 return (0); 5233 } 5234 5235 mutex_exit(SD_MUTEX(un)); 5236 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5237 (void *)SD_PATH_DIRECT) != 0) { 5238 mutex_enter(SD_MUTEX(un)); 5239 return (EINVAL); 5240 } 5241 5242 /* 5243 * Read and verify device id, stored in the reserved cylinders at the 5244 * end of the disk. Backup label is on the odd sectors of the last 5245 * track of the last cylinder. Device id will be on track of the next 5246 * to last cylinder. 5247 */ 5248 mutex_enter(SD_MUTEX(un)); 5249 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5250 mutex_exit(SD_MUTEX(un)); 5251 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5252 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5253 SD_PATH_DIRECT); 5254 5255 if (status != 0) { 5256 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5257 goto error; 5258 } 5259 5260 /* Validate the revision */ 5261 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5262 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5263 status = EINVAL; 5264 goto error; 5265 } 5266 5267 /* Calculate the checksum */ 5268 chksum = 0; 5269 ip = (uint_t *)dkdevid; 5270 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5271 i++) { 5272 chksum ^= ip[i]; 5273 } 5274 5275 /* Compare the checksums */ 5276 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5277 status = EINVAL; 5278 goto error; 5279 } 5280 5281 /* Validate the device id */ 5282 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5283 status = EINVAL; 5284 goto error; 5285 } 5286 5287 /* 5288 * Store the device id in the driver soft state 5289 */ 5290 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5291 tmpid = kmem_alloc(sz, KM_SLEEP); 5292 5293 mutex_enter(SD_MUTEX(un)); 5294 5295 un->un_devid = tmpid; 5296 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5297 5298 kmem_free(dkdevid, buffer_size); 5299 5300 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5301 5302 return (status); 5303 error: 5304 mutex_enter(SD_MUTEX(un)); 5305 kmem_free(dkdevid, buffer_size); 5306 return (status); 5307 } 5308 5309 5310 /* 5311 * Function: sd_create_devid 5312 * 5313 * Description: This routine will fabricate the device id and write it 5314 * to the disk. 5315 * 5316 * Arguments: un - driver soft state (unit) structure 5317 * 5318 * Return Code: value of the fabricated device id 5319 * 5320 * Context: Kernel Thread 5321 */ 5322 5323 static ddi_devid_t 5324 sd_create_devid(sd_ssc_t *ssc) 5325 { 5326 struct sd_lun *un; 5327 5328 ASSERT(ssc != NULL); 5329 un = ssc->ssc_un; 5330 ASSERT(un != NULL); 5331 5332 /* Fabricate the devid */ 5333 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5334 == DDI_FAILURE) { 5335 return (NULL); 5336 } 5337 5338 /* Write the devid to disk */ 5339 if (sd_write_deviceid(ssc) != 0) { 5340 ddi_devid_free(un->un_devid); 5341 un->un_devid = NULL; 5342 } 5343 5344 return (un->un_devid); 5345 } 5346 5347 5348 /* 5349 * Function: sd_write_deviceid 5350 * 5351 * Description: This routine will write the device id to the disk 5352 * reserved sector. 5353 * 5354 * Arguments: un - driver soft state (unit) structure 5355 * 5356 * Return Code: EINVAL 5357 * value returned by sd_send_scsi_cmd 5358 * 5359 * Context: Kernel Thread 5360 */ 5361 5362 static int 5363 sd_write_deviceid(sd_ssc_t *ssc) 5364 { 5365 struct dk_devid *dkdevid; 5366 diskaddr_t blk; 5367 uint_t *ip, chksum; 5368 int status; 5369 int i; 5370 struct sd_lun *un; 5371 5372 ASSERT(ssc != NULL); 5373 un = ssc->ssc_un; 5374 ASSERT(un != NULL); 5375 ASSERT(mutex_owned(SD_MUTEX(un))); 5376 5377 mutex_exit(SD_MUTEX(un)); 5378 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5379 (void *)SD_PATH_DIRECT) != 0) { 5380 mutex_enter(SD_MUTEX(un)); 5381 return (-1); 5382 } 5383 5384 5385 /* Allocate the buffer */ 5386 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5387 5388 /* Fill in the revision */ 5389 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5390 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5391 5392 /* Copy in the device id */ 5393 mutex_enter(SD_MUTEX(un)); 5394 bcopy(un->un_devid, &dkdevid->dkd_devid, 5395 ddi_devid_sizeof(un->un_devid)); 5396 mutex_exit(SD_MUTEX(un)); 5397 5398 /* Calculate the checksum */ 5399 chksum = 0; 5400 ip = (uint_t *)dkdevid; 5401 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5402 i++) { 5403 chksum ^= ip[i]; 5404 } 5405 5406 /* Fill-in checksum */ 5407 DKD_FORMCHKSUM(chksum, dkdevid); 5408 5409 /* Write the reserved sector */ 5410 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5411 SD_PATH_DIRECT); 5412 if (status != 0) 5413 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5414 5415 kmem_free(dkdevid, un->un_sys_blocksize); 5416 5417 mutex_enter(SD_MUTEX(un)); 5418 return (status); 5419 } 5420 5421 5422 /* 5423 * Function: sd_check_vpd_page_support 5424 * 5425 * Description: This routine sends an inquiry command with the EVPD bit set and 5426 * a page code of 0x00 to the device. It is used to determine which 5427 * vital product pages are available to find the devid. We are 5428 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5429 * device does not support that command. 5430 * 5431 * Arguments: un - driver soft state (unit) structure 5432 * 5433 * Return Code: 0 - success 5434 * 1 - check condition 5435 * 5436 * Context: This routine can sleep. 5437 */ 5438 5439 static int 5440 sd_check_vpd_page_support(sd_ssc_t *ssc) 5441 { 5442 uchar_t *page_list = NULL; 5443 uchar_t page_length = 0xff; /* Use max possible length */ 5444 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5445 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5446 int rval = 0; 5447 int counter; 5448 struct sd_lun *un; 5449 5450 ASSERT(ssc != NULL); 5451 un = ssc->ssc_un; 5452 ASSERT(un != NULL); 5453 ASSERT(mutex_owned(SD_MUTEX(un))); 5454 5455 mutex_exit(SD_MUTEX(un)); 5456 5457 /* 5458 * We'll set the page length to the maximum to save figuring it out 5459 * with an additional call. 5460 */ 5461 page_list = kmem_zalloc(page_length, KM_SLEEP); 5462 5463 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5464 page_code, NULL); 5465 5466 if (rval != 0) 5467 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5468 5469 mutex_enter(SD_MUTEX(un)); 5470 5471 /* 5472 * Now we must validate that the device accepted the command, as some 5473 * drives do not support it. If the drive does support it, we will 5474 * return 0, and the supported pages will be in un_vpd_page_mask. If 5475 * not, we return -1. 5476 */ 5477 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5478 /* Loop to find one of the 2 pages we need */ 5479 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5480 5481 /* 5482 * Pages are returned in ascending order, and 0x83 is what we 5483 * are hoping for. 5484 */ 5485 while ((page_list[counter] <= 0x86) && 5486 (counter <= (page_list[VPD_PAGE_LENGTH] + 5487 VPD_HEAD_OFFSET))) { 5488 /* 5489 * Add 3 because page_list[3] is the number of 5490 * pages minus 3 5491 */ 5492 5493 switch (page_list[counter]) { 5494 case 0x00: 5495 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5496 break; 5497 case 0x80: 5498 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5499 break; 5500 case 0x81: 5501 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5502 break; 5503 case 0x82: 5504 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5505 break; 5506 case 0x83: 5507 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5508 break; 5509 case 0x86: 5510 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5511 break; 5512 } 5513 counter++; 5514 } 5515 5516 } else { 5517 rval = -1; 5518 5519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5520 "sd_check_vpd_page_support: This drive does not implement " 5521 "VPD pages.\n"); 5522 } 5523 5524 kmem_free(page_list, page_length); 5525 5526 return (rval); 5527 } 5528 5529 5530 /* 5531 * Function: sd_setup_pm 5532 * 5533 * Description: Initialize Power Management on the device 5534 * 5535 * Context: Kernel Thread 5536 */ 5537 5538 static void 5539 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5540 { 5541 uint_t log_page_size; 5542 uchar_t *log_page_data; 5543 int rval = 0; 5544 struct sd_lun *un; 5545 5546 ASSERT(ssc != NULL); 5547 un = ssc->ssc_un; 5548 ASSERT(un != NULL); 5549 5550 /* 5551 * Since we are called from attach, holding a mutex for 5552 * un is unnecessary. Because some of the routines called 5553 * from here require SD_MUTEX to not be held, assert this 5554 * right up front. 5555 */ 5556 ASSERT(!mutex_owned(SD_MUTEX(un))); 5557 /* 5558 * Since the sd device does not have the 'reg' property, 5559 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5560 * The following code is to tell cpr that this device 5561 * DOES need to be suspended and resumed. 5562 */ 5563 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5564 "pm-hardware-state", "needs-suspend-resume"); 5565 5566 /* 5567 * This complies with the new power management framework 5568 * for certain desktop machines. Create the pm_components 5569 * property as a string array property. 5570 */ 5571 if (un->un_f_pm_supported) { 5572 /* 5573 * not all devices have a motor, try it first. 5574 * some devices may return ILLEGAL REQUEST, some 5575 * will hang 5576 * The following START_STOP_UNIT is used to check if target 5577 * device has a motor. 5578 */ 5579 un->un_f_start_stop_supported = TRUE; 5580 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5581 SD_PATH_DIRECT); 5582 5583 if (rval != 0) { 5584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5585 un->un_f_start_stop_supported = FALSE; 5586 } 5587 5588 /* 5589 * create pm properties anyways otherwise the parent can't 5590 * go to sleep 5591 */ 5592 (void) sd_create_pm_components(devi, un); 5593 un->un_f_pm_is_enabled = TRUE; 5594 return; 5595 } 5596 5597 if (!un->un_f_log_sense_supported) { 5598 un->un_power_level = SD_SPINDLE_ON; 5599 un->un_f_pm_is_enabled = FALSE; 5600 return; 5601 } 5602 5603 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5604 5605 #ifdef SDDEBUG 5606 if (sd_force_pm_supported) { 5607 /* Force a successful result */ 5608 rval = 1; 5609 } 5610 #endif 5611 5612 /* 5613 * If the start-stop cycle counter log page is not supported 5614 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5615 * then we should not create the pm_components property. 5616 */ 5617 if (rval == -1) { 5618 /* 5619 * Error. 5620 * Reading log sense failed, most likely this is 5621 * an older drive that does not support log sense. 5622 * If this fails auto-pm is not supported. 5623 */ 5624 un->un_power_level = SD_SPINDLE_ON; 5625 un->un_f_pm_is_enabled = FALSE; 5626 5627 } else if (rval == 0) { 5628 /* 5629 * Page not found. 5630 * The start stop cycle counter is implemented as page 5631 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5632 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5633 */ 5634 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5635 /* 5636 * Page found, use this one. 5637 */ 5638 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5639 un->un_f_pm_is_enabled = TRUE; 5640 } else { 5641 /* 5642 * Error or page not found. 5643 * auto-pm is not supported for this device. 5644 */ 5645 un->un_power_level = SD_SPINDLE_ON; 5646 un->un_f_pm_is_enabled = FALSE; 5647 } 5648 } else { 5649 /* 5650 * Page found, use it. 5651 */ 5652 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5653 un->un_f_pm_is_enabled = TRUE; 5654 } 5655 5656 5657 if (un->un_f_pm_is_enabled == TRUE) { 5658 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5659 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5660 5661 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5662 log_page_size, un->un_start_stop_cycle_page, 5663 0x01, 0, SD_PATH_DIRECT); 5664 5665 if (rval != 0) { 5666 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5667 } 5668 5669 #ifdef SDDEBUG 5670 if (sd_force_pm_supported) { 5671 /* Force a successful result */ 5672 rval = 0; 5673 } 5674 #endif 5675 5676 /* 5677 * If the Log sense for Page( Start/stop cycle counter page) 5678 * succeeds, then power management is supported and we can 5679 * enable auto-pm. 5680 */ 5681 if (rval == 0) { 5682 (void) sd_create_pm_components(devi, un); 5683 } else { 5684 un->un_power_level = SD_SPINDLE_ON; 5685 un->un_f_pm_is_enabled = FALSE; 5686 } 5687 5688 kmem_free(log_page_data, log_page_size); 5689 } 5690 } 5691 5692 5693 /* 5694 * Function: sd_create_pm_components 5695 * 5696 * Description: Initialize PM property. 5697 * 5698 * Context: Kernel thread context 5699 */ 5700 5701 static void 5702 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5703 { 5704 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5705 5706 ASSERT(!mutex_owned(SD_MUTEX(un))); 5707 5708 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5709 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5710 /* 5711 * When components are initially created they are idle, 5712 * power up any non-removables. 5713 * Note: the return value of pm_raise_power can't be used 5714 * for determining if PM should be enabled for this device. 5715 * Even if you check the return values and remove this 5716 * property created above, the PM framework will not honor the 5717 * change after the first call to pm_raise_power. Hence, 5718 * removal of that property does not help if pm_raise_power 5719 * fails. In the case of removable media, the start/stop 5720 * will fail if the media is not present. 5721 */ 5722 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5723 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5724 mutex_enter(SD_MUTEX(un)); 5725 un->un_power_level = SD_SPINDLE_ON; 5726 mutex_enter(&un->un_pm_mutex); 5727 /* Set to on and not busy. */ 5728 un->un_pm_count = 0; 5729 } else { 5730 mutex_enter(SD_MUTEX(un)); 5731 un->un_power_level = SD_SPINDLE_OFF; 5732 mutex_enter(&un->un_pm_mutex); 5733 /* Set to off. */ 5734 un->un_pm_count = -1; 5735 } 5736 mutex_exit(&un->un_pm_mutex); 5737 mutex_exit(SD_MUTEX(un)); 5738 } else { 5739 un->un_power_level = SD_SPINDLE_ON; 5740 un->un_f_pm_is_enabled = FALSE; 5741 } 5742 } 5743 5744 5745 /* 5746 * Function: sd_ddi_suspend 5747 * 5748 * Description: Performs system power-down operations. This includes 5749 * setting the drive state to indicate its suspended so 5750 * that no new commands will be accepted. Also, wait for 5751 * all commands that are in transport or queued to a timer 5752 * for retry to complete. All timeout threads are cancelled. 5753 * 5754 * Return Code: DDI_FAILURE or DDI_SUCCESS 5755 * 5756 * Context: Kernel thread context 5757 */ 5758 5759 static int 5760 sd_ddi_suspend(dev_info_t *devi) 5761 { 5762 struct sd_lun *un; 5763 clock_t wait_cmds_complete; 5764 5765 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5766 if (un == NULL) { 5767 return (DDI_FAILURE); 5768 } 5769 5770 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5771 5772 mutex_enter(SD_MUTEX(un)); 5773 5774 /* Return success if the device is already suspended. */ 5775 if (un->un_state == SD_STATE_SUSPENDED) { 5776 mutex_exit(SD_MUTEX(un)); 5777 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5778 "device already suspended, exiting\n"); 5779 return (DDI_SUCCESS); 5780 } 5781 5782 /* Return failure if the device is being used by HA */ 5783 if (un->un_resvd_status & 5784 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5785 mutex_exit(SD_MUTEX(un)); 5786 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5787 "device in use by HA, exiting\n"); 5788 return (DDI_FAILURE); 5789 } 5790 5791 /* 5792 * Return failure if the device is in a resource wait 5793 * or power changing state. 5794 */ 5795 if ((un->un_state == SD_STATE_RWAIT) || 5796 (un->un_state == SD_STATE_PM_CHANGING)) { 5797 mutex_exit(SD_MUTEX(un)); 5798 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5799 "device in resource wait state, exiting\n"); 5800 return (DDI_FAILURE); 5801 } 5802 5803 5804 un->un_save_state = un->un_last_state; 5805 New_state(un, SD_STATE_SUSPENDED); 5806 5807 /* 5808 * Wait for all commands that are in transport or queued to a timer 5809 * for retry to complete. 5810 * 5811 * While waiting, no new commands will be accepted or sent because of 5812 * the new state we set above. 5813 * 5814 * Wait till current operation has completed. If we are in the resource 5815 * wait state (with an intr outstanding) then we need to wait till the 5816 * intr completes and starts the next cmd. We want to wait for 5817 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5818 */ 5819 wait_cmds_complete = ddi_get_lbolt() + 5820 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5821 5822 while (un->un_ncmds_in_transport != 0) { 5823 /* 5824 * Fail if commands do not finish in the specified time. 5825 */ 5826 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5827 wait_cmds_complete) == -1) { 5828 /* 5829 * Undo the state changes made above. Everything 5830 * must go back to it's original value. 5831 */ 5832 Restore_state(un); 5833 un->un_last_state = un->un_save_state; 5834 /* Wake up any threads that might be waiting. */ 5835 cv_broadcast(&un->un_suspend_cv); 5836 mutex_exit(SD_MUTEX(un)); 5837 SD_ERROR(SD_LOG_IO_PM, un, 5838 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5839 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5840 return (DDI_FAILURE); 5841 } 5842 } 5843 5844 /* 5845 * Cancel SCSI watch thread and timeouts, if any are active 5846 */ 5847 5848 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5849 opaque_t temp_token = un->un_swr_token; 5850 mutex_exit(SD_MUTEX(un)); 5851 scsi_watch_suspend(temp_token); 5852 mutex_enter(SD_MUTEX(un)); 5853 } 5854 5855 if (un->un_reset_throttle_timeid != NULL) { 5856 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5857 un->un_reset_throttle_timeid = NULL; 5858 mutex_exit(SD_MUTEX(un)); 5859 (void) untimeout(temp_id); 5860 mutex_enter(SD_MUTEX(un)); 5861 } 5862 5863 if (un->un_dcvb_timeid != NULL) { 5864 timeout_id_t temp_id = un->un_dcvb_timeid; 5865 un->un_dcvb_timeid = NULL; 5866 mutex_exit(SD_MUTEX(un)); 5867 (void) untimeout(temp_id); 5868 mutex_enter(SD_MUTEX(un)); 5869 } 5870 5871 mutex_enter(&un->un_pm_mutex); 5872 if (un->un_pm_timeid != NULL) { 5873 timeout_id_t temp_id = un->un_pm_timeid; 5874 un->un_pm_timeid = NULL; 5875 mutex_exit(&un->un_pm_mutex); 5876 mutex_exit(SD_MUTEX(un)); 5877 (void) untimeout(temp_id); 5878 mutex_enter(SD_MUTEX(un)); 5879 } else { 5880 mutex_exit(&un->un_pm_mutex); 5881 } 5882 5883 if (un->un_retry_timeid != NULL) { 5884 timeout_id_t temp_id = un->un_retry_timeid; 5885 un->un_retry_timeid = NULL; 5886 mutex_exit(SD_MUTEX(un)); 5887 (void) untimeout(temp_id); 5888 mutex_enter(SD_MUTEX(un)); 5889 5890 if (un->un_retry_bp != NULL) { 5891 un->un_retry_bp->av_forw = un->un_waitq_headp; 5892 un->un_waitq_headp = un->un_retry_bp; 5893 if (un->un_waitq_tailp == NULL) { 5894 un->un_waitq_tailp = un->un_retry_bp; 5895 } 5896 un->un_retry_bp = NULL; 5897 un->un_retry_statp = NULL; 5898 } 5899 } 5900 5901 if (un->un_direct_priority_timeid != NULL) { 5902 timeout_id_t temp_id = un->un_direct_priority_timeid; 5903 un->un_direct_priority_timeid = NULL; 5904 mutex_exit(SD_MUTEX(un)); 5905 (void) untimeout(temp_id); 5906 mutex_enter(SD_MUTEX(un)); 5907 } 5908 5909 if (un->un_f_is_fibre == TRUE) { 5910 /* 5911 * Remove callbacks for insert and remove events 5912 */ 5913 if (un->un_insert_event != NULL) { 5914 mutex_exit(SD_MUTEX(un)); 5915 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5916 mutex_enter(SD_MUTEX(un)); 5917 un->un_insert_event = NULL; 5918 } 5919 5920 if (un->un_remove_event != NULL) { 5921 mutex_exit(SD_MUTEX(un)); 5922 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5923 mutex_enter(SD_MUTEX(un)); 5924 un->un_remove_event = NULL; 5925 } 5926 } 5927 5928 mutex_exit(SD_MUTEX(un)); 5929 5930 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5931 5932 return (DDI_SUCCESS); 5933 } 5934 5935 5936 /* 5937 * Function: sd_ddi_pm_suspend 5938 * 5939 * Description: Set the drive state to low power. 5940 * Someone else is required to actually change the drive 5941 * power level. 5942 * 5943 * Arguments: un - driver soft state (unit) structure 5944 * 5945 * Return Code: DDI_FAILURE or DDI_SUCCESS 5946 * 5947 * Context: Kernel thread context 5948 */ 5949 5950 static int 5951 sd_ddi_pm_suspend(struct sd_lun *un) 5952 { 5953 ASSERT(un != NULL); 5954 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5955 5956 ASSERT(!mutex_owned(SD_MUTEX(un))); 5957 mutex_enter(SD_MUTEX(un)); 5958 5959 /* 5960 * Exit if power management is not enabled for this device, or if 5961 * the device is being used by HA. 5962 */ 5963 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5964 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5965 mutex_exit(SD_MUTEX(un)); 5966 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5967 return (DDI_SUCCESS); 5968 } 5969 5970 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5971 un->un_ncmds_in_driver); 5972 5973 /* 5974 * See if the device is not busy, ie.: 5975 * - we have no commands in the driver for this device 5976 * - not waiting for resources 5977 */ 5978 if ((un->un_ncmds_in_driver == 0) && 5979 (un->un_state != SD_STATE_RWAIT)) { 5980 /* 5981 * The device is not busy, so it is OK to go to low power state. 5982 * Indicate low power, but rely on someone else to actually 5983 * change it. 5984 */ 5985 mutex_enter(&un->un_pm_mutex); 5986 un->un_pm_count = -1; 5987 mutex_exit(&un->un_pm_mutex); 5988 un->un_power_level = SD_SPINDLE_OFF; 5989 } 5990 5991 mutex_exit(SD_MUTEX(un)); 5992 5993 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5994 5995 return (DDI_SUCCESS); 5996 } 5997 5998 5999 /* 6000 * Function: sd_ddi_resume 6001 * 6002 * Description: Performs system power-up operations.. 6003 * 6004 * Return Code: DDI_SUCCESS 6005 * DDI_FAILURE 6006 * 6007 * Context: Kernel thread context 6008 */ 6009 6010 static int 6011 sd_ddi_resume(dev_info_t *devi) 6012 { 6013 struct sd_lun *un; 6014 6015 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6016 if (un == NULL) { 6017 return (DDI_FAILURE); 6018 } 6019 6020 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6021 6022 mutex_enter(SD_MUTEX(un)); 6023 Restore_state(un); 6024 6025 /* 6026 * Restore the state which was saved to give the 6027 * the right state in un_last_state 6028 */ 6029 un->un_last_state = un->un_save_state; 6030 /* 6031 * Note: throttle comes back at full. 6032 * Also note: this MUST be done before calling pm_raise_power 6033 * otherwise the system can get hung in biowait. The scenario where 6034 * this'll happen is under cpr suspend. Writing of the system 6035 * state goes through sddump, which writes 0 to un_throttle. If 6036 * writing the system state then fails, example if the partition is 6037 * too small, then cpr attempts a resume. If throttle isn't restored 6038 * from the saved value until after calling pm_raise_power then 6039 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6040 * in biowait. 6041 */ 6042 un->un_throttle = un->un_saved_throttle; 6043 6044 /* 6045 * The chance of failure is very rare as the only command done in power 6046 * entry point is START command when you transition from 0->1 or 6047 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6048 * which suspend was done. Ignore the return value as the resume should 6049 * not be failed. In the case of removable media the media need not be 6050 * inserted and hence there is a chance that raise power will fail with 6051 * media not present. 6052 */ 6053 if (un->un_f_attach_spinup) { 6054 mutex_exit(SD_MUTEX(un)); 6055 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6056 mutex_enter(SD_MUTEX(un)); 6057 } 6058 6059 /* 6060 * Don't broadcast to the suspend cv and therefore possibly 6061 * start I/O until after power has been restored. 6062 */ 6063 cv_broadcast(&un->un_suspend_cv); 6064 cv_broadcast(&un->un_state_cv); 6065 6066 /* restart thread */ 6067 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6068 scsi_watch_resume(un->un_swr_token); 6069 } 6070 6071 #if (defined(__fibre)) 6072 if (un->un_f_is_fibre == TRUE) { 6073 /* 6074 * Add callbacks for insert and remove events 6075 */ 6076 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6077 sd_init_event_callbacks(un); 6078 } 6079 } 6080 #endif 6081 6082 /* 6083 * Transport any pending commands to the target. 6084 * 6085 * If this is a low-activity device commands in queue will have to wait 6086 * until new commands come in, which may take awhile. Also, we 6087 * specifically don't check un_ncmds_in_transport because we know that 6088 * there really are no commands in progress after the unit was 6089 * suspended and we could have reached the throttle level, been 6090 * suspended, and have no new commands coming in for awhile. Highly 6091 * unlikely, but so is the low-activity disk scenario. 6092 */ 6093 ddi_xbuf_dispatch(un->un_xbuf_attr); 6094 6095 sd_start_cmds(un, NULL); 6096 mutex_exit(SD_MUTEX(un)); 6097 6098 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6099 6100 return (DDI_SUCCESS); 6101 } 6102 6103 6104 /* 6105 * Function: sd_ddi_pm_resume 6106 * 6107 * Description: Set the drive state to powered on. 6108 * Someone else is required to actually change the drive 6109 * power level. 6110 * 6111 * Arguments: un - driver soft state (unit) structure 6112 * 6113 * Return Code: DDI_SUCCESS 6114 * 6115 * Context: Kernel thread context 6116 */ 6117 6118 static int 6119 sd_ddi_pm_resume(struct sd_lun *un) 6120 { 6121 ASSERT(un != NULL); 6122 6123 ASSERT(!mutex_owned(SD_MUTEX(un))); 6124 mutex_enter(SD_MUTEX(un)); 6125 un->un_power_level = SD_SPINDLE_ON; 6126 6127 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6128 mutex_enter(&un->un_pm_mutex); 6129 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6130 un->un_pm_count++; 6131 ASSERT(un->un_pm_count == 0); 6132 /* 6133 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6134 * un_suspend_cv is for a system resume, not a power management 6135 * device resume. (4297749) 6136 * cv_broadcast(&un->un_suspend_cv); 6137 */ 6138 } 6139 mutex_exit(&un->un_pm_mutex); 6140 mutex_exit(SD_MUTEX(un)); 6141 6142 return (DDI_SUCCESS); 6143 } 6144 6145 6146 /* 6147 * Function: sd_pm_idletimeout_handler 6148 * 6149 * Description: A timer routine that's active only while a device is busy. 6150 * The purpose is to extend slightly the pm framework's busy 6151 * view of the device to prevent busy/idle thrashing for 6152 * back-to-back commands. Do this by comparing the current time 6153 * to the time at which the last command completed and when the 6154 * difference is greater than sd_pm_idletime, call 6155 * pm_idle_component. In addition to indicating idle to the pm 6156 * framework, update the chain type to again use the internal pm 6157 * layers of the driver. 6158 * 6159 * Arguments: arg - driver soft state (unit) structure 6160 * 6161 * Context: Executes in a timeout(9F) thread context 6162 */ 6163 6164 static void 6165 sd_pm_idletimeout_handler(void *arg) 6166 { 6167 struct sd_lun *un = arg; 6168 6169 time_t now; 6170 6171 mutex_enter(&sd_detach_mutex); 6172 if (un->un_detach_count != 0) { 6173 /* Abort if the instance is detaching */ 6174 mutex_exit(&sd_detach_mutex); 6175 return; 6176 } 6177 mutex_exit(&sd_detach_mutex); 6178 6179 now = ddi_get_time(); 6180 /* 6181 * Grab both mutexes, in the proper order, since we're accessing 6182 * both PM and softstate variables. 6183 */ 6184 mutex_enter(SD_MUTEX(un)); 6185 mutex_enter(&un->un_pm_mutex); 6186 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6187 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6188 /* 6189 * Update the chain types. 6190 * This takes affect on the next new command received. 6191 */ 6192 if (un->un_f_non_devbsize_supported) { 6193 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6194 } else { 6195 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6196 } 6197 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6198 6199 SD_TRACE(SD_LOG_IO_PM, un, 6200 "sd_pm_idletimeout_handler: idling device\n"); 6201 (void) pm_idle_component(SD_DEVINFO(un), 0); 6202 un->un_pm_idle_timeid = NULL; 6203 } else { 6204 un->un_pm_idle_timeid = 6205 timeout(sd_pm_idletimeout_handler, un, 6206 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6207 } 6208 mutex_exit(&un->un_pm_mutex); 6209 mutex_exit(SD_MUTEX(un)); 6210 } 6211 6212 6213 /* 6214 * Function: sd_pm_timeout_handler 6215 * 6216 * Description: Callback to tell framework we are idle. 6217 * 6218 * Context: timeout(9f) thread context. 6219 */ 6220 6221 static void 6222 sd_pm_timeout_handler(void *arg) 6223 { 6224 struct sd_lun *un = arg; 6225 6226 (void) pm_idle_component(SD_DEVINFO(un), 0); 6227 mutex_enter(&un->un_pm_mutex); 6228 un->un_pm_timeid = NULL; 6229 mutex_exit(&un->un_pm_mutex); 6230 } 6231 6232 6233 /* 6234 * Function: sdpower 6235 * 6236 * Description: PM entry point. 6237 * 6238 * Return Code: DDI_SUCCESS 6239 * DDI_FAILURE 6240 * 6241 * Context: Kernel thread context 6242 */ 6243 6244 static int 6245 sdpower(dev_info_t *devi, int component, int level) 6246 { 6247 struct sd_lun *un; 6248 int instance; 6249 int rval = DDI_SUCCESS; 6250 uint_t i, log_page_size, maxcycles, ncycles; 6251 uchar_t *log_page_data; 6252 int log_sense_page; 6253 int medium_present; 6254 time_t intvlp; 6255 dev_t dev; 6256 struct pm_trans_data sd_pm_tran_data; 6257 uchar_t save_state; 6258 int sval; 6259 uchar_t state_before_pm; 6260 int got_semaphore_here; 6261 sd_ssc_t *ssc; 6262 6263 instance = ddi_get_instance(devi); 6264 6265 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6266 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6267 component != 0) { 6268 return (DDI_FAILURE); 6269 } 6270 6271 dev = sd_make_device(SD_DEVINFO(un)); 6272 ssc = sd_ssc_init(un); 6273 6274 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6275 6276 /* 6277 * Must synchronize power down with close. 6278 * Attempt to decrement/acquire the open/close semaphore, 6279 * but do NOT wait on it. If it's not greater than zero, 6280 * ie. it can't be decremented without waiting, then 6281 * someone else, either open or close, already has it 6282 * and the try returns 0. Use that knowledge here to determine 6283 * if it's OK to change the device power level. 6284 * Also, only increment it on exit if it was decremented, ie. gotten, 6285 * here. 6286 */ 6287 got_semaphore_here = sema_tryp(&un->un_semoclose); 6288 6289 mutex_enter(SD_MUTEX(un)); 6290 6291 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6292 un->un_ncmds_in_driver); 6293 6294 /* 6295 * If un_ncmds_in_driver is non-zero it indicates commands are 6296 * already being processed in the driver, or if the semaphore was 6297 * not gotten here it indicates an open or close is being processed. 6298 * At the same time somebody is requesting to go low power which 6299 * can't happen, therefore we need to return failure. 6300 */ 6301 if ((level == SD_SPINDLE_OFF) && 6302 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6303 mutex_exit(SD_MUTEX(un)); 6304 6305 if (got_semaphore_here != 0) { 6306 sema_v(&un->un_semoclose); 6307 } 6308 SD_TRACE(SD_LOG_IO_PM, un, 6309 "sdpower: exit, device has queued cmds.\n"); 6310 6311 goto sdpower_failed; 6312 } 6313 6314 /* 6315 * if it is OFFLINE that means the disk is completely dead 6316 * in our case we have to put the disk in on or off by sending commands 6317 * Of course that will fail anyway so return back here. 6318 * 6319 * Power changes to a device that's OFFLINE or SUSPENDED 6320 * are not allowed. 6321 */ 6322 if ((un->un_state == SD_STATE_OFFLINE) || 6323 (un->un_state == SD_STATE_SUSPENDED)) { 6324 mutex_exit(SD_MUTEX(un)); 6325 6326 if (got_semaphore_here != 0) { 6327 sema_v(&un->un_semoclose); 6328 } 6329 SD_TRACE(SD_LOG_IO_PM, un, 6330 "sdpower: exit, device is off-line.\n"); 6331 6332 goto sdpower_failed; 6333 } 6334 6335 /* 6336 * Change the device's state to indicate it's power level 6337 * is being changed. Do this to prevent a power off in the 6338 * middle of commands, which is especially bad on devices 6339 * that are really powered off instead of just spun down. 6340 */ 6341 state_before_pm = un->un_state; 6342 un->un_state = SD_STATE_PM_CHANGING; 6343 6344 mutex_exit(SD_MUTEX(un)); 6345 6346 /* 6347 * If "pm-capable" property is set to TRUE by HBA drivers, 6348 * bypass the following checking, otherwise, check the log 6349 * sense information for this device 6350 */ 6351 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6352 /* 6353 * Get the log sense information to understand whether the 6354 * the powercycle counts have gone beyond the threshhold. 6355 */ 6356 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6357 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6358 6359 mutex_enter(SD_MUTEX(un)); 6360 log_sense_page = un->un_start_stop_cycle_page; 6361 mutex_exit(SD_MUTEX(un)); 6362 6363 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6364 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6365 6366 if (rval != 0) { 6367 if (rval == EIO) 6368 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6369 else 6370 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6371 } 6372 6373 #ifdef SDDEBUG 6374 if (sd_force_pm_supported) { 6375 /* Force a successful result */ 6376 rval = 0; 6377 } 6378 #endif 6379 if (rval != 0) { 6380 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6381 "Log Sense Failed\n"); 6382 6383 kmem_free(log_page_data, log_page_size); 6384 /* Cannot support power management on those drives */ 6385 6386 if (got_semaphore_here != 0) { 6387 sema_v(&un->un_semoclose); 6388 } 6389 /* 6390 * On exit put the state back to it's original value 6391 * and broadcast to anyone waiting for the power 6392 * change completion. 6393 */ 6394 mutex_enter(SD_MUTEX(un)); 6395 un->un_state = state_before_pm; 6396 cv_broadcast(&un->un_suspend_cv); 6397 mutex_exit(SD_MUTEX(un)); 6398 SD_TRACE(SD_LOG_IO_PM, un, 6399 "sdpower: exit, Log Sense Failed.\n"); 6400 6401 goto sdpower_failed; 6402 } 6403 6404 /* 6405 * From the page data - Convert the essential information to 6406 * pm_trans_data 6407 */ 6408 maxcycles = 6409 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6410 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6411 6412 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6413 6414 ncycles = 6415 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6416 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6417 6418 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6419 6420 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6421 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6422 log_page_data[8+i]; 6423 } 6424 6425 kmem_free(log_page_data, log_page_size); 6426 6427 /* 6428 * Call pm_trans_check routine to get the Ok from 6429 * the global policy 6430 */ 6431 6432 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6433 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6434 6435 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6436 #ifdef SDDEBUG 6437 if (sd_force_pm_supported) { 6438 /* Force a successful result */ 6439 rval = 1; 6440 } 6441 #endif 6442 switch (rval) { 6443 case 0: 6444 /* 6445 * Not Ok to Power cycle or error in parameters passed 6446 * Would have given the advised time to consider power 6447 * cycle. Based on the new intvlp parameter we are 6448 * supposed to pretend we are busy so that pm framework 6449 * will never call our power entry point. Because of 6450 * that install a timeout handler and wait for the 6451 * recommended time to elapse so that power management 6452 * can be effective again. 6453 * 6454 * To effect this behavior, call pm_busy_component to 6455 * indicate to the framework this device is busy. 6456 * By not adjusting un_pm_count the rest of PM in 6457 * the driver will function normally, and independent 6458 * of this but because the framework is told the device 6459 * is busy it won't attempt powering down until it gets 6460 * a matching idle. The timeout handler sends this. 6461 * Note: sd_pm_entry can't be called here to do this 6462 * because sdpower may have been called as a result 6463 * of a call to pm_raise_power from within sd_pm_entry. 6464 * 6465 * If a timeout handler is already active then 6466 * don't install another. 6467 */ 6468 mutex_enter(&un->un_pm_mutex); 6469 if (un->un_pm_timeid == NULL) { 6470 un->un_pm_timeid = 6471 timeout(sd_pm_timeout_handler, 6472 un, intvlp * drv_usectohz(1000000)); 6473 mutex_exit(&un->un_pm_mutex); 6474 (void) pm_busy_component(SD_DEVINFO(un), 0); 6475 } else { 6476 mutex_exit(&un->un_pm_mutex); 6477 } 6478 if (got_semaphore_here != 0) { 6479 sema_v(&un->un_semoclose); 6480 } 6481 /* 6482 * On exit put the state back to it's original value 6483 * and broadcast to anyone waiting for the power 6484 * change completion. 6485 */ 6486 mutex_enter(SD_MUTEX(un)); 6487 un->un_state = state_before_pm; 6488 cv_broadcast(&un->un_suspend_cv); 6489 mutex_exit(SD_MUTEX(un)); 6490 6491 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6492 "trans check Failed, not ok to power cycle.\n"); 6493 6494 goto sdpower_failed; 6495 case -1: 6496 if (got_semaphore_here != 0) { 6497 sema_v(&un->un_semoclose); 6498 } 6499 /* 6500 * On exit put the state back to it's original value 6501 * and broadcast to anyone waiting for the power 6502 * change completion. 6503 */ 6504 mutex_enter(SD_MUTEX(un)); 6505 un->un_state = state_before_pm; 6506 cv_broadcast(&un->un_suspend_cv); 6507 mutex_exit(SD_MUTEX(un)); 6508 SD_TRACE(SD_LOG_IO_PM, un, 6509 "sdpower: exit, trans check command Failed.\n"); 6510 6511 goto sdpower_failed; 6512 } 6513 } 6514 6515 if (level == SD_SPINDLE_OFF) { 6516 /* 6517 * Save the last state... if the STOP FAILS we need it 6518 * for restoring 6519 */ 6520 mutex_enter(SD_MUTEX(un)); 6521 save_state = un->un_last_state; 6522 /* 6523 * There must not be any cmds. getting processed 6524 * in the driver when we get here. Power to the 6525 * device is potentially going off. 6526 */ 6527 ASSERT(un->un_ncmds_in_driver == 0); 6528 mutex_exit(SD_MUTEX(un)); 6529 6530 /* 6531 * For now suspend the device completely before spindle is 6532 * turned off 6533 */ 6534 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6535 if (got_semaphore_here != 0) { 6536 sema_v(&un->un_semoclose); 6537 } 6538 /* 6539 * On exit put the state back to it's original value 6540 * and broadcast to anyone waiting for the power 6541 * change completion. 6542 */ 6543 mutex_enter(SD_MUTEX(un)); 6544 un->un_state = state_before_pm; 6545 cv_broadcast(&un->un_suspend_cv); 6546 mutex_exit(SD_MUTEX(un)); 6547 SD_TRACE(SD_LOG_IO_PM, un, 6548 "sdpower: exit, PM suspend Failed.\n"); 6549 6550 goto sdpower_failed; 6551 } 6552 } 6553 6554 /* 6555 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6556 * close, or strategy. Dump no long uses this routine, it uses it's 6557 * own code so it can be done in polled mode. 6558 */ 6559 6560 medium_present = TRUE; 6561 6562 /* 6563 * When powering up, issue a TUR in case the device is at unit 6564 * attention. Don't do retries. Bypass the PM layer, otherwise 6565 * a deadlock on un_pm_busy_cv will occur. 6566 */ 6567 if (level == SD_SPINDLE_ON) { 6568 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6569 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6570 if (sval != 0) 6571 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6572 } 6573 6574 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6575 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6576 6577 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6578 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6579 SD_PATH_DIRECT); 6580 if (sval != 0) { 6581 if (sval == EIO) 6582 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6583 else 6584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6585 } 6586 6587 /* Command failed, check for media present. */ 6588 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6589 medium_present = FALSE; 6590 } 6591 6592 /* 6593 * The conditions of interest here are: 6594 * if a spindle off with media present fails, 6595 * then restore the state and return an error. 6596 * else if a spindle on fails, 6597 * then return an error (there's no state to restore). 6598 * In all other cases we setup for the new state 6599 * and return success. 6600 */ 6601 switch (level) { 6602 case SD_SPINDLE_OFF: 6603 if ((medium_present == TRUE) && (sval != 0)) { 6604 /* The stop command from above failed */ 6605 rval = DDI_FAILURE; 6606 /* 6607 * The stop command failed, and we have media 6608 * present. Put the level back by calling the 6609 * sd_pm_resume() and set the state back to 6610 * it's previous value. 6611 */ 6612 (void) sd_ddi_pm_resume(un); 6613 mutex_enter(SD_MUTEX(un)); 6614 un->un_last_state = save_state; 6615 mutex_exit(SD_MUTEX(un)); 6616 break; 6617 } 6618 /* 6619 * The stop command from above succeeded. 6620 */ 6621 if (un->un_f_monitor_media_state) { 6622 /* 6623 * Terminate watch thread in case of removable media 6624 * devices going into low power state. This is as per 6625 * the requirements of pm framework, otherwise commands 6626 * will be generated for the device (through watch 6627 * thread), even when the device is in low power state. 6628 */ 6629 mutex_enter(SD_MUTEX(un)); 6630 un->un_f_watcht_stopped = FALSE; 6631 if (un->un_swr_token != NULL) { 6632 opaque_t temp_token = un->un_swr_token; 6633 un->un_f_watcht_stopped = TRUE; 6634 un->un_swr_token = NULL; 6635 mutex_exit(SD_MUTEX(un)); 6636 (void) scsi_watch_request_terminate(temp_token, 6637 SCSI_WATCH_TERMINATE_ALL_WAIT); 6638 } else { 6639 mutex_exit(SD_MUTEX(un)); 6640 } 6641 } 6642 break; 6643 6644 default: /* The level requested is spindle on... */ 6645 /* 6646 * Legacy behavior: return success on a failed spinup 6647 * if there is no media in the drive. 6648 * Do this by looking at medium_present here. 6649 */ 6650 if ((sval != 0) && medium_present) { 6651 /* The start command from above failed */ 6652 rval = DDI_FAILURE; 6653 break; 6654 } 6655 /* 6656 * The start command from above succeeded 6657 * Resume the devices now that we have 6658 * started the disks 6659 */ 6660 (void) sd_ddi_pm_resume(un); 6661 6662 /* 6663 * Resume the watch thread since it was suspended 6664 * when the device went into low power mode. 6665 */ 6666 if (un->un_f_monitor_media_state) { 6667 mutex_enter(SD_MUTEX(un)); 6668 if (un->un_f_watcht_stopped == TRUE) { 6669 opaque_t temp_token; 6670 6671 un->un_f_watcht_stopped = FALSE; 6672 mutex_exit(SD_MUTEX(un)); 6673 temp_token = scsi_watch_request_submit( 6674 SD_SCSI_DEVP(un), 6675 sd_check_media_time, 6676 SENSE_LENGTH, sd_media_watch_cb, 6677 (caddr_t)dev); 6678 mutex_enter(SD_MUTEX(un)); 6679 un->un_swr_token = temp_token; 6680 } 6681 mutex_exit(SD_MUTEX(un)); 6682 } 6683 } 6684 if (got_semaphore_here != 0) { 6685 sema_v(&un->un_semoclose); 6686 } 6687 /* 6688 * On exit put the state back to it's original value 6689 * and broadcast to anyone waiting for the power 6690 * change completion. 6691 */ 6692 mutex_enter(SD_MUTEX(un)); 6693 un->un_state = state_before_pm; 6694 cv_broadcast(&un->un_suspend_cv); 6695 mutex_exit(SD_MUTEX(un)); 6696 6697 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6698 6699 sd_ssc_fini(ssc); 6700 return (rval); 6701 6702 sdpower_failed: 6703 6704 sd_ssc_fini(ssc); 6705 return (DDI_FAILURE); 6706 } 6707 6708 6709 6710 /* 6711 * Function: sdattach 6712 * 6713 * Description: Driver's attach(9e) entry point function. 6714 * 6715 * Arguments: devi - opaque device info handle 6716 * cmd - attach type 6717 * 6718 * Return Code: DDI_SUCCESS 6719 * DDI_FAILURE 6720 * 6721 * Context: Kernel thread context 6722 */ 6723 6724 static int 6725 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6726 { 6727 switch (cmd) { 6728 case DDI_ATTACH: 6729 return (sd_unit_attach(devi)); 6730 case DDI_RESUME: 6731 return (sd_ddi_resume(devi)); 6732 default: 6733 break; 6734 } 6735 return (DDI_FAILURE); 6736 } 6737 6738 6739 /* 6740 * Function: sddetach 6741 * 6742 * Description: Driver's detach(9E) entry point function. 6743 * 6744 * Arguments: devi - opaque device info handle 6745 * cmd - detach type 6746 * 6747 * Return Code: DDI_SUCCESS 6748 * DDI_FAILURE 6749 * 6750 * Context: Kernel thread context 6751 */ 6752 6753 static int 6754 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6755 { 6756 switch (cmd) { 6757 case DDI_DETACH: 6758 return (sd_unit_detach(devi)); 6759 case DDI_SUSPEND: 6760 return (sd_ddi_suspend(devi)); 6761 default: 6762 break; 6763 } 6764 return (DDI_FAILURE); 6765 } 6766 6767 6768 /* 6769 * Function: sd_sync_with_callback 6770 * 6771 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6772 * state while the callback routine is active. 6773 * 6774 * Arguments: un: softstate structure for the instance 6775 * 6776 * Context: Kernel thread context 6777 */ 6778 6779 static void 6780 sd_sync_with_callback(struct sd_lun *un) 6781 { 6782 ASSERT(un != NULL); 6783 6784 mutex_enter(SD_MUTEX(un)); 6785 6786 ASSERT(un->un_in_callback >= 0); 6787 6788 while (un->un_in_callback > 0) { 6789 mutex_exit(SD_MUTEX(un)); 6790 delay(2); 6791 mutex_enter(SD_MUTEX(un)); 6792 } 6793 6794 mutex_exit(SD_MUTEX(un)); 6795 } 6796 6797 /* 6798 * Function: sd_unit_attach 6799 * 6800 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6801 * the soft state structure for the device and performs 6802 * all necessary structure and device initializations. 6803 * 6804 * Arguments: devi: the system's dev_info_t for the device. 6805 * 6806 * Return Code: DDI_SUCCESS if attach is successful. 6807 * DDI_FAILURE if any part of the attach fails. 6808 * 6809 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6810 * Kernel thread context only. Can sleep. 6811 */ 6812 6813 static int 6814 sd_unit_attach(dev_info_t *devi) 6815 { 6816 struct scsi_device *devp; 6817 struct sd_lun *un; 6818 char *variantp; 6819 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6820 int instance; 6821 int rval; 6822 int wc_enabled; 6823 int tgt; 6824 uint64_t capacity; 6825 uint_t lbasize = 0; 6826 dev_info_t *pdip = ddi_get_parent(devi); 6827 int offbyone = 0; 6828 int geom_label_valid = 0; 6829 sd_ssc_t *ssc; 6830 int status; 6831 struct sd_fm_internal *sfip = NULL; 6832 #if defined(__sparc) 6833 int max_xfer_size; 6834 #endif 6835 6836 /* 6837 * Retrieve the target driver's private data area. This was set 6838 * up by the HBA. 6839 */ 6840 devp = ddi_get_driver_private(devi); 6841 6842 /* 6843 * Retrieve the target ID of the device. 6844 */ 6845 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6846 SCSI_ADDR_PROP_TARGET, -1); 6847 6848 /* 6849 * Since we have no idea what state things were left in by the last 6850 * user of the device, set up some 'default' settings, ie. turn 'em 6851 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6852 * Do this before the scsi_probe, which sends an inquiry. 6853 * This is a fix for bug (4430280). 6854 * Of special importance is wide-xfer. The drive could have been left 6855 * in wide transfer mode by the last driver to communicate with it, 6856 * this includes us. If that's the case, and if the following is not 6857 * setup properly or we don't re-negotiate with the drive prior to 6858 * transferring data to/from the drive, it causes bus parity errors, 6859 * data overruns, and unexpected interrupts. This first occurred when 6860 * the fix for bug (4378686) was made. 6861 */ 6862 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6863 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6864 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6865 6866 /* 6867 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6868 * on a target. Setting it per lun instance actually sets the 6869 * capability of this target, which affects those luns already 6870 * attached on the same target. So during attach, we can only disable 6871 * this capability only when no other lun has been attached on this 6872 * target. By doing this, we assume a target has the same tagged-qing 6873 * capability for every lun. The condition can be removed when HBA 6874 * is changed to support per lun based tagged-qing capability. 6875 */ 6876 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6877 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6878 } 6879 6880 /* 6881 * Use scsi_probe() to issue an INQUIRY command to the device. 6882 * This call will allocate and fill in the scsi_inquiry structure 6883 * and point the sd_inq member of the scsi_device structure to it. 6884 * If the attach succeeds, then this memory will not be de-allocated 6885 * (via scsi_unprobe()) until the instance is detached. 6886 */ 6887 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6888 goto probe_failed; 6889 } 6890 6891 /* 6892 * Check the device type as specified in the inquiry data and 6893 * claim it if it is of a type that we support. 6894 */ 6895 switch (devp->sd_inq->inq_dtype) { 6896 case DTYPE_DIRECT: 6897 break; 6898 case DTYPE_RODIRECT: 6899 break; 6900 case DTYPE_OPTICAL: 6901 break; 6902 case DTYPE_NOTPRESENT: 6903 default: 6904 /* Unsupported device type; fail the attach. */ 6905 goto probe_failed; 6906 } 6907 6908 /* 6909 * Allocate the soft state structure for this unit. 6910 * 6911 * We rely upon this memory being set to all zeroes by 6912 * ddi_soft_state_zalloc(). We assume that any member of the 6913 * soft state structure that is not explicitly initialized by 6914 * this routine will have a value of zero. 6915 */ 6916 instance = ddi_get_instance(devp->sd_dev); 6917 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6918 goto probe_failed; 6919 } 6920 6921 /* 6922 * Retrieve a pointer to the newly-allocated soft state. 6923 * 6924 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6925 * was successful, unless something has gone horribly wrong and the 6926 * ddi's soft state internals are corrupt (in which case it is 6927 * probably better to halt here than just fail the attach....) 6928 */ 6929 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6930 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6931 instance); 6932 /*NOTREACHED*/ 6933 } 6934 6935 /* 6936 * Link the back ptr of the driver soft state to the scsi_device 6937 * struct for this lun. 6938 * Save a pointer to the softstate in the driver-private area of 6939 * the scsi_device struct. 6940 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6941 * we first set un->un_sd below. 6942 */ 6943 un->un_sd = devp; 6944 devp->sd_private = (opaque_t)un; 6945 6946 /* 6947 * The following must be after devp is stored in the soft state struct. 6948 */ 6949 #ifdef SDDEBUG 6950 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6951 "%s_unit_attach: un:0x%p instance:%d\n", 6952 ddi_driver_name(devi), un, instance); 6953 #endif 6954 6955 /* 6956 * Set up the device type and node type (for the minor nodes). 6957 * By default we assume that the device can at least support the 6958 * Common Command Set. Call it a CD-ROM if it reports itself 6959 * as a RODIRECT device. 6960 */ 6961 switch (devp->sd_inq->inq_dtype) { 6962 case DTYPE_RODIRECT: 6963 un->un_node_type = DDI_NT_CD_CHAN; 6964 un->un_ctype = CTYPE_CDROM; 6965 break; 6966 case DTYPE_OPTICAL: 6967 un->un_node_type = DDI_NT_BLOCK_CHAN; 6968 un->un_ctype = CTYPE_ROD; 6969 break; 6970 default: 6971 un->un_node_type = DDI_NT_BLOCK_CHAN; 6972 un->un_ctype = CTYPE_CCS; 6973 break; 6974 } 6975 6976 /* 6977 * Try to read the interconnect type from the HBA. 6978 * 6979 * Note: This driver is currently compiled as two binaries, a parallel 6980 * scsi version (sd) and a fibre channel version (ssd). All functional 6981 * differences are determined at compile time. In the future a single 6982 * binary will be provided and the interconnect type will be used to 6983 * differentiate between fibre and parallel scsi behaviors. At that time 6984 * it will be necessary for all fibre channel HBAs to support this 6985 * property. 6986 * 6987 * set un_f_is_fiber to TRUE ( default fiber ) 6988 */ 6989 un->un_f_is_fibre = TRUE; 6990 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6991 case INTERCONNECT_SSA: 6992 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6993 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6994 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6995 break; 6996 case INTERCONNECT_PARALLEL: 6997 un->un_f_is_fibre = FALSE; 6998 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6999 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7000 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7001 break; 7002 case INTERCONNECT_SATA: 7003 un->un_f_is_fibre = FALSE; 7004 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7005 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7006 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7007 break; 7008 case INTERCONNECT_FIBRE: 7009 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7010 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7011 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7012 break; 7013 case INTERCONNECT_FABRIC: 7014 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7015 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7017 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7018 break; 7019 default: 7020 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7021 /* 7022 * The HBA does not support the "interconnect-type" property 7023 * (or did not provide a recognized type). 7024 * 7025 * Note: This will be obsoleted when a single fibre channel 7026 * and parallel scsi driver is delivered. In the meantime the 7027 * interconnect type will be set to the platform default.If that 7028 * type is not parallel SCSI, it means that we should be 7029 * assuming "ssd" semantics. However, here this also means that 7030 * the FC HBA is not supporting the "interconnect-type" property 7031 * like we expect it to, so log this occurrence. 7032 */ 7033 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7034 if (!SD_IS_PARALLEL_SCSI(un)) { 7035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7036 "sd_unit_attach: un:0x%p Assuming " 7037 "INTERCONNECT_FIBRE\n", un); 7038 } else { 7039 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7040 "sd_unit_attach: un:0x%p Assuming " 7041 "INTERCONNECT_PARALLEL\n", un); 7042 un->un_f_is_fibre = FALSE; 7043 } 7044 #else 7045 /* 7046 * Note: This source will be implemented when a single fibre 7047 * channel and parallel scsi driver is delivered. The default 7048 * will be to assume that if a device does not support the 7049 * "interconnect-type" property it is a parallel SCSI HBA and 7050 * we will set the interconnect type for parallel scsi. 7051 */ 7052 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7053 un->un_f_is_fibre = FALSE; 7054 #endif 7055 break; 7056 } 7057 7058 if (un->un_f_is_fibre == TRUE) { 7059 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7060 SCSI_VERSION_3) { 7061 switch (un->un_interconnect_type) { 7062 case SD_INTERCONNECT_FIBRE: 7063 case SD_INTERCONNECT_SSA: 7064 un->un_node_type = DDI_NT_BLOCK_WWN; 7065 break; 7066 default: 7067 break; 7068 } 7069 } 7070 } 7071 7072 /* 7073 * Initialize the Request Sense command for the target 7074 */ 7075 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7076 goto alloc_rqs_failed; 7077 } 7078 7079 /* 7080 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7081 * with separate binary for sd and ssd. 7082 * 7083 * x86 has 1 binary, un_retry_count is set base on connection type. 7084 * The hardcoded values will go away when Sparc uses 1 binary 7085 * for sd and ssd. This hardcoded values need to match 7086 * SD_RETRY_COUNT in sddef.h 7087 * The value used is base on interconnect type. 7088 * fibre = 3, parallel = 5 7089 */ 7090 #if defined(__i386) || defined(__amd64) 7091 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7092 #else 7093 un->un_retry_count = SD_RETRY_COUNT; 7094 #endif 7095 7096 /* 7097 * Set the per disk retry count to the default number of retries 7098 * for disks and CDROMs. This value can be overridden by the 7099 * disk property list or an entry in sd.conf. 7100 */ 7101 un->un_notready_retry_count = 7102 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7103 : DISK_NOT_READY_RETRY_COUNT(un); 7104 7105 /* 7106 * Set the busy retry count to the default value of un_retry_count. 7107 * This can be overridden by entries in sd.conf or the device 7108 * config table. 7109 */ 7110 un->un_busy_retry_count = un->un_retry_count; 7111 7112 /* 7113 * Init the reset threshold for retries. This number determines 7114 * how many retries must be performed before a reset can be issued 7115 * (for certain error conditions). This can be overridden by entries 7116 * in sd.conf or the device config table. 7117 */ 7118 un->un_reset_retry_count = (un->un_retry_count / 2); 7119 7120 /* 7121 * Set the victim_retry_count to the default un_retry_count 7122 */ 7123 un->un_victim_retry_count = (2 * un->un_retry_count); 7124 7125 /* 7126 * Set the reservation release timeout to the default value of 7127 * 5 seconds. This can be overridden by entries in ssd.conf or the 7128 * device config table. 7129 */ 7130 un->un_reserve_release_time = 5; 7131 7132 /* 7133 * Set up the default maximum transfer size. Note that this may 7134 * get updated later in the attach, when setting up default wide 7135 * operations for disks. 7136 */ 7137 #if defined(__i386) || defined(__amd64) 7138 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7139 un->un_partial_dma_supported = 1; 7140 #else 7141 un->un_max_xfer_size = (uint_t)maxphys; 7142 #endif 7143 7144 /* 7145 * Get "allow bus device reset" property (defaults to "enabled" if 7146 * the property was not defined). This is to disable bus resets for 7147 * certain kinds of error recovery. Note: In the future when a run-time 7148 * fibre check is available the soft state flag should default to 7149 * enabled. 7150 */ 7151 if (un->un_f_is_fibre == TRUE) { 7152 un->un_f_allow_bus_device_reset = TRUE; 7153 } else { 7154 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7155 "allow-bus-device-reset", 1) != 0) { 7156 un->un_f_allow_bus_device_reset = TRUE; 7157 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7158 "sd_unit_attach: un:0x%p Bus device reset " 7159 "enabled\n", un); 7160 } else { 7161 un->un_f_allow_bus_device_reset = FALSE; 7162 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7163 "sd_unit_attach: un:0x%p Bus device reset " 7164 "disabled\n", un); 7165 } 7166 } 7167 7168 /* 7169 * Check if this is an ATAPI device. ATAPI devices use Group 1 7170 * Read/Write commands and Group 2 Mode Sense/Select commands. 7171 * 7172 * Note: The "obsolete" way of doing this is to check for the "atapi" 7173 * property. The new "variant" property with a value of "atapi" has been 7174 * introduced so that future 'variants' of standard SCSI behavior (like 7175 * atapi) could be specified by the underlying HBA drivers by supplying 7176 * a new value for the "variant" property, instead of having to define a 7177 * new property. 7178 */ 7179 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7180 un->un_f_cfg_is_atapi = TRUE; 7181 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7182 "sd_unit_attach: un:0x%p Atapi device\n", un); 7183 } 7184 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7185 &variantp) == DDI_PROP_SUCCESS) { 7186 if (strcmp(variantp, "atapi") == 0) { 7187 un->un_f_cfg_is_atapi = TRUE; 7188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7189 "sd_unit_attach: un:0x%p Atapi device\n", un); 7190 } 7191 ddi_prop_free(variantp); 7192 } 7193 7194 un->un_cmd_timeout = SD_IO_TIME; 7195 7196 un->un_busy_timeout = SD_BSY_TIMEOUT; 7197 7198 /* Info on current states, statuses, etc. (Updated frequently) */ 7199 un->un_state = SD_STATE_NORMAL; 7200 un->un_last_state = SD_STATE_NORMAL; 7201 7202 /* Control & status info for command throttling */ 7203 un->un_throttle = sd_max_throttle; 7204 un->un_saved_throttle = sd_max_throttle; 7205 un->un_min_throttle = sd_min_throttle; 7206 7207 if (un->un_f_is_fibre == TRUE) { 7208 un->un_f_use_adaptive_throttle = TRUE; 7209 } else { 7210 un->un_f_use_adaptive_throttle = FALSE; 7211 } 7212 7213 /* Removable media support. */ 7214 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7215 un->un_mediastate = DKIO_NONE; 7216 un->un_specified_mediastate = DKIO_NONE; 7217 7218 /* CVs for suspend/resume (PM or DR) */ 7219 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7220 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7221 7222 /* Power management support. */ 7223 un->un_power_level = SD_SPINDLE_UNINIT; 7224 7225 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7226 un->un_f_wcc_inprog = 0; 7227 7228 /* 7229 * The open/close semaphore is used to serialize threads executing 7230 * in the driver's open & close entry point routines for a given 7231 * instance. 7232 */ 7233 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7234 7235 /* 7236 * The conf file entry and softstate variable is a forceful override, 7237 * meaning a non-zero value must be entered to change the default. 7238 */ 7239 un->un_f_disksort_disabled = FALSE; 7240 7241 /* 7242 * Retrieve the properties from the static driver table or the driver 7243 * configuration file (.conf) for this unit and update the soft state 7244 * for the device as needed for the indicated properties. 7245 * Note: the property configuration needs to occur here as some of the 7246 * following routines may have dependencies on soft state flags set 7247 * as part of the driver property configuration. 7248 */ 7249 sd_read_unit_properties(un); 7250 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7251 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7252 7253 /* 7254 * Only if a device has "hotpluggable" property, it is 7255 * treated as hotpluggable device. Otherwise, it is 7256 * regarded as non-hotpluggable one. 7257 */ 7258 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7259 -1) != -1) { 7260 un->un_f_is_hotpluggable = TRUE; 7261 } 7262 7263 /* 7264 * set unit's attributes(flags) according to "hotpluggable" and 7265 * RMB bit in INQUIRY data. 7266 */ 7267 sd_set_unit_attributes(un, devi); 7268 7269 /* 7270 * By default, we mark the capacity, lbasize, and geometry 7271 * as invalid. Only if we successfully read a valid capacity 7272 * will we update the un_blockcount and un_tgt_blocksize with the 7273 * valid values (the geometry will be validated later). 7274 */ 7275 un->un_f_blockcount_is_valid = FALSE; 7276 un->un_f_tgt_blocksize_is_valid = FALSE; 7277 7278 /* 7279 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7280 * otherwise. 7281 */ 7282 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7283 un->un_blockcount = 0; 7284 7285 /* 7286 * Set up the per-instance info needed to determine the correct 7287 * CDBs and other info for issuing commands to the target. 7288 */ 7289 sd_init_cdb_limits(un); 7290 7291 /* 7292 * Set up the IO chains to use, based upon the target type. 7293 */ 7294 if (un->un_f_non_devbsize_supported) { 7295 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7296 } else { 7297 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7298 } 7299 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7300 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7301 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7302 7303 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7304 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7305 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7306 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7307 7308 7309 if (ISCD(un)) { 7310 un->un_additional_codes = sd_additional_codes; 7311 } else { 7312 un->un_additional_codes = NULL; 7313 } 7314 7315 /* 7316 * Create the kstats here so they can be available for attach-time 7317 * routines that send commands to the unit (either polled or via 7318 * sd_send_scsi_cmd). 7319 * 7320 * Note: This is a critical sequence that needs to be maintained: 7321 * 1) Instantiate the kstats here, before any routines using the 7322 * iopath (i.e. sd_send_scsi_cmd). 7323 * 2) Instantiate and initialize the partition stats 7324 * (sd_set_pstats). 7325 * 3) Initialize the error stats (sd_set_errstats), following 7326 * sd_validate_geometry(),sd_register_devid(), 7327 * and sd_cache_control(). 7328 */ 7329 7330 un->un_stats = kstat_create(sd_label, instance, 7331 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7332 if (un->un_stats != NULL) { 7333 un->un_stats->ks_lock = SD_MUTEX(un); 7334 kstat_install(un->un_stats); 7335 } 7336 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7337 "sd_unit_attach: un:0x%p un_stats created\n", un); 7338 7339 sd_create_errstats(un, instance); 7340 if (un->un_errstats == NULL) { 7341 goto create_errstats_failed; 7342 } 7343 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7344 "sd_unit_attach: un:0x%p errstats created\n", un); 7345 7346 /* 7347 * The following if/else code was relocated here from below as part 7348 * of the fix for bug (4430280). However with the default setup added 7349 * on entry to this routine, it's no longer absolutely necessary for 7350 * this to be before the call to sd_spin_up_unit. 7351 */ 7352 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7353 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7354 (devp->sd_inq->inq_ansi == 5)) && 7355 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7356 7357 /* 7358 * If tagged queueing is supported by the target 7359 * and by the host adapter then we will enable it 7360 */ 7361 un->un_tagflags = 0; 7362 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7363 (un->un_f_arq_enabled == TRUE)) { 7364 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7365 1, 1) == 1) { 7366 un->un_tagflags = FLAG_STAG; 7367 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7368 "sd_unit_attach: un:0x%p tag queueing " 7369 "enabled\n", un); 7370 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7371 "untagged-qing", 0) == 1) { 7372 un->un_f_opt_queueing = TRUE; 7373 un->un_saved_throttle = un->un_throttle = 7374 min(un->un_throttle, 3); 7375 } else { 7376 un->un_f_opt_queueing = FALSE; 7377 un->un_saved_throttle = un->un_throttle = 1; 7378 } 7379 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7380 == 1) && (un->un_f_arq_enabled == TRUE)) { 7381 /* The Host Adapter supports internal queueing. */ 7382 un->un_f_opt_queueing = TRUE; 7383 un->un_saved_throttle = un->un_throttle = 7384 min(un->un_throttle, 3); 7385 } else { 7386 un->un_f_opt_queueing = FALSE; 7387 un->un_saved_throttle = un->un_throttle = 1; 7388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7389 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7390 } 7391 7392 /* 7393 * Enable large transfers for SATA/SAS drives 7394 */ 7395 if (SD_IS_SERIAL(un)) { 7396 un->un_max_xfer_size = 7397 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7398 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7399 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7400 "sd_unit_attach: un:0x%p max transfer " 7401 "size=0x%x\n", un, un->un_max_xfer_size); 7402 7403 } 7404 7405 /* Setup or tear down default wide operations for disks */ 7406 7407 /* 7408 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7409 * and "ssd_max_xfer_size" to exist simultaneously on the same 7410 * system and be set to different values. In the future this 7411 * code may need to be updated when the ssd module is 7412 * obsoleted and removed from the system. (4299588) 7413 */ 7414 if (SD_IS_PARALLEL_SCSI(un) && 7415 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7416 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7417 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7418 1, 1) == 1) { 7419 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7420 "sd_unit_attach: un:0x%p Wide Transfer " 7421 "enabled\n", un); 7422 } 7423 7424 /* 7425 * If tagged queuing has also been enabled, then 7426 * enable large xfers 7427 */ 7428 if (un->un_saved_throttle == sd_max_throttle) { 7429 un->un_max_xfer_size = 7430 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7431 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7432 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7433 "sd_unit_attach: un:0x%p max transfer " 7434 "size=0x%x\n", un, un->un_max_xfer_size); 7435 } 7436 } else { 7437 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7438 0, 1) == 1) { 7439 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7440 "sd_unit_attach: un:0x%p " 7441 "Wide Transfer disabled\n", un); 7442 } 7443 } 7444 } else { 7445 un->un_tagflags = FLAG_STAG; 7446 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7447 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7448 } 7449 7450 /* 7451 * If this target supports LUN reset, try to enable it. 7452 */ 7453 if (un->un_f_lun_reset_enabled) { 7454 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7455 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7456 "un:0x%p lun_reset capability set\n", un); 7457 } else { 7458 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7459 "un:0x%p lun-reset capability not set\n", un); 7460 } 7461 } 7462 7463 /* 7464 * Adjust the maximum transfer size. This is to fix 7465 * the problem of partial DMA support on SPARC. Some 7466 * HBA driver, like aac, has very small dma_attr_maxxfer 7467 * size, which requires partial DMA support on SPARC. 7468 * In the future the SPARC pci nexus driver may solve 7469 * the problem instead of this fix. 7470 */ 7471 #if defined(__sparc) 7472 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7473 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7474 un->un_max_xfer_size = max_xfer_size; 7475 un->un_partial_dma_supported = 1; 7476 } 7477 #endif 7478 7479 /* 7480 * Set PKT_DMA_PARTIAL flag. 7481 */ 7482 if (un->un_partial_dma_supported == 1) { 7483 un->un_pkt_flags = PKT_DMA_PARTIAL; 7484 } else { 7485 un->un_pkt_flags = 0; 7486 } 7487 7488 /* Initialize sd_ssc_t for internal uscsi commands */ 7489 ssc = sd_ssc_init(un); 7490 scsi_fm_init(devp); 7491 7492 /* 7493 * Allocate memory for SCSI FMA stuffs. 7494 */ 7495 un->un_fm_private = 7496 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7497 sfip = (struct sd_fm_internal *)un->un_fm_private; 7498 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7499 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7500 sfip->fm_ssc.ssc_un = un; 7501 7502 /* 7503 * At this point in the attach, we have enough info in the 7504 * soft state to be able to issue commands to the target. 7505 * 7506 * All command paths used below MUST issue their commands as 7507 * SD_PATH_DIRECT. This is important as intermediate layers 7508 * are not all initialized yet (such as PM). 7509 */ 7510 7511 /* 7512 * Send a TEST UNIT READY command to the device. This should clear 7513 * any outstanding UNIT ATTENTION that may be present. 7514 * 7515 * Note: Don't check for success, just track if there is a reservation, 7516 * this is a throw away command to clear any unit attentions. 7517 * 7518 * Note: This MUST be the first command issued to the target during 7519 * attach to ensure power on UNIT ATTENTIONS are cleared. 7520 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7521 * with attempts at spinning up a device with no media. 7522 */ 7523 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7524 if (status != 0) { 7525 if (status == EACCES) 7526 reservation_flag = SD_TARGET_IS_RESERVED; 7527 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7528 } 7529 7530 /* 7531 * If the device is NOT a removable media device, attempt to spin 7532 * it up (using the START_STOP_UNIT command) and read its capacity 7533 * (using the READ CAPACITY command). Note, however, that either 7534 * of these could fail and in some cases we would continue with 7535 * the attach despite the failure (see below). 7536 */ 7537 if (un->un_f_descr_format_supported) { 7538 7539 switch (sd_spin_up_unit(ssc)) { 7540 case 0: 7541 /* 7542 * Spin-up was successful; now try to read the 7543 * capacity. If successful then save the results 7544 * and mark the capacity & lbasize as valid. 7545 */ 7546 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7547 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7548 7549 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7550 &lbasize, SD_PATH_DIRECT); 7551 7552 switch (status) { 7553 case 0: { 7554 if (capacity > DK_MAX_BLOCKS) { 7555 #ifdef _LP64 7556 if ((capacity + 1) > 7557 SD_GROUP1_MAX_ADDRESS) { 7558 /* 7559 * Enable descriptor format 7560 * sense data so that we can 7561 * get 64 bit sense data 7562 * fields. 7563 */ 7564 sd_enable_descr_sense(ssc); 7565 } 7566 #else 7567 /* 32-bit kernels can't handle this */ 7568 scsi_log(SD_DEVINFO(un), 7569 sd_label, CE_WARN, 7570 "disk has %llu blocks, which " 7571 "is too large for a 32-bit " 7572 "kernel", capacity); 7573 7574 #if defined(__i386) || defined(__amd64) 7575 /* 7576 * 1TB disk was treated as (1T - 512)B 7577 * in the past, so that it might have 7578 * valid VTOC and solaris partitions, 7579 * we have to allow it to continue to 7580 * work. 7581 */ 7582 if (capacity -1 > DK_MAX_BLOCKS) 7583 #endif 7584 goto spinup_failed; 7585 #endif 7586 } 7587 7588 /* 7589 * Here it's not necessary to check the case: 7590 * the capacity of the device is bigger than 7591 * what the max hba cdb can support. Because 7592 * sd_send_scsi_READ_CAPACITY will retrieve 7593 * the capacity by sending USCSI command, which 7594 * is constrained by the max hba cdb. Actually, 7595 * sd_send_scsi_READ_CAPACITY will return 7596 * EINVAL when using bigger cdb than required 7597 * cdb length. Will handle this case in 7598 * "case EINVAL". 7599 */ 7600 7601 /* 7602 * The following relies on 7603 * sd_send_scsi_READ_CAPACITY never 7604 * returning 0 for capacity and/or lbasize. 7605 */ 7606 sd_update_block_info(un, lbasize, capacity); 7607 7608 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7609 "sd_unit_attach: un:0x%p capacity = %ld " 7610 "blocks; lbasize= %ld.\n", un, 7611 un->un_blockcount, un->un_tgt_blocksize); 7612 7613 break; 7614 } 7615 case EINVAL: 7616 /* 7617 * In the case where the max-cdb-length property 7618 * is smaller than the required CDB length for 7619 * a SCSI device, a target driver can fail to 7620 * attach to that device. 7621 */ 7622 scsi_log(SD_DEVINFO(un), 7623 sd_label, CE_WARN, 7624 "disk capacity is too large " 7625 "for current cdb length"); 7626 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7627 7628 goto spinup_failed; 7629 case EACCES: 7630 /* 7631 * Should never get here if the spin-up 7632 * succeeded, but code it in anyway. 7633 * From here, just continue with the attach... 7634 */ 7635 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7636 "sd_unit_attach: un:0x%p " 7637 "sd_send_scsi_READ_CAPACITY " 7638 "returned reservation conflict\n", un); 7639 reservation_flag = SD_TARGET_IS_RESERVED; 7640 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7641 break; 7642 default: 7643 /* 7644 * Likewise, should never get here if the 7645 * spin-up succeeded. Just continue with 7646 * the attach... 7647 */ 7648 if (status == EIO) 7649 sd_ssc_assessment(ssc, 7650 SD_FMT_STATUS_CHECK); 7651 else 7652 sd_ssc_assessment(ssc, 7653 SD_FMT_IGNORE); 7654 break; 7655 } 7656 break; 7657 case EACCES: 7658 /* 7659 * Device is reserved by another host. In this case 7660 * we could not spin it up or read the capacity, but 7661 * we continue with the attach anyway. 7662 */ 7663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7664 "sd_unit_attach: un:0x%p spin-up reservation " 7665 "conflict.\n", un); 7666 reservation_flag = SD_TARGET_IS_RESERVED; 7667 break; 7668 default: 7669 /* Fail the attach if the spin-up failed. */ 7670 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7671 "sd_unit_attach: un:0x%p spin-up failed.", un); 7672 goto spinup_failed; 7673 } 7674 7675 } 7676 7677 /* 7678 * Check to see if this is a MMC drive 7679 */ 7680 if (ISCD(un)) { 7681 sd_set_mmc_caps(ssc); 7682 } 7683 7684 7685 /* 7686 * Add a zero-length attribute to tell the world we support 7687 * kernel ioctls (for layered drivers) 7688 */ 7689 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7690 DDI_KERNEL_IOCTL, NULL, 0); 7691 7692 /* 7693 * Add a boolean property to tell the world we support 7694 * the B_FAILFAST flag (for layered drivers) 7695 */ 7696 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7697 "ddi-failfast-supported", NULL, 0); 7698 7699 /* 7700 * Initialize power management 7701 */ 7702 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7703 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7704 sd_setup_pm(ssc, devi); 7705 if (un->un_f_pm_is_enabled == FALSE) { 7706 /* 7707 * For performance, point to a jump table that does 7708 * not include pm. 7709 * The direct and priority chains don't change with PM. 7710 * 7711 * Note: this is currently done based on individual device 7712 * capabilities. When an interface for determining system 7713 * power enabled state becomes available, or when additional 7714 * layers are added to the command chain, these values will 7715 * have to be re-evaluated for correctness. 7716 */ 7717 if (un->un_f_non_devbsize_supported) { 7718 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7719 } else { 7720 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7721 } 7722 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7723 } 7724 7725 /* 7726 * This property is set to 0 by HA software to avoid retries 7727 * on a reserved disk. (The preferred property name is 7728 * "retry-on-reservation-conflict") (1189689) 7729 * 7730 * Note: The use of a global here can have unintended consequences. A 7731 * per instance variable is preferable to match the capabilities of 7732 * different underlying hba's (4402600) 7733 */ 7734 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7735 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7736 sd_retry_on_reservation_conflict); 7737 if (sd_retry_on_reservation_conflict != 0) { 7738 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7739 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7740 sd_retry_on_reservation_conflict); 7741 } 7742 7743 /* Set up options for QFULL handling. */ 7744 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7745 "qfull-retries", -1)) != -1) { 7746 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7747 rval, 1); 7748 } 7749 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7750 "qfull-retry-interval", -1)) != -1) { 7751 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7752 rval, 1); 7753 } 7754 7755 /* 7756 * This just prints a message that announces the existence of the 7757 * device. The message is always printed in the system logfile, but 7758 * only appears on the console if the system is booted with the 7759 * -v (verbose) argument. 7760 */ 7761 ddi_report_dev(devi); 7762 7763 un->un_mediastate = DKIO_NONE; 7764 7765 cmlb_alloc_handle(&un->un_cmlbhandle); 7766 7767 #if defined(__i386) || defined(__amd64) 7768 /* 7769 * On x86, compensate for off-by-1 legacy error 7770 */ 7771 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7772 (lbasize == un->un_sys_blocksize)) 7773 offbyone = CMLB_OFF_BY_ONE; 7774 #endif 7775 7776 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7777 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7778 un->un_node_type, offbyone, un->un_cmlbhandle, 7779 (void *)SD_PATH_DIRECT) != 0) { 7780 goto cmlb_attach_failed; 7781 } 7782 7783 7784 /* 7785 * Read and validate the device's geometry (ie, disk label) 7786 * A new unformatted drive will not have a valid geometry, but 7787 * the driver needs to successfully attach to this device so 7788 * the drive can be formatted via ioctls. 7789 */ 7790 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7791 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7792 7793 mutex_enter(SD_MUTEX(un)); 7794 7795 /* 7796 * Read and initialize the devid for the unit. 7797 */ 7798 if (un->un_f_devid_supported) { 7799 sd_register_devid(ssc, devi, reservation_flag); 7800 } 7801 mutex_exit(SD_MUTEX(un)); 7802 7803 #if (defined(__fibre)) 7804 /* 7805 * Register callbacks for fibre only. You can't do this solely 7806 * on the basis of the devid_type because this is hba specific. 7807 * We need to query our hba capabilities to find out whether to 7808 * register or not. 7809 */ 7810 if (un->un_f_is_fibre) { 7811 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7812 sd_init_event_callbacks(un); 7813 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7814 "sd_unit_attach: un:0x%p event callbacks inserted", 7815 un); 7816 } 7817 } 7818 #endif 7819 7820 if (un->un_f_opt_disable_cache == TRUE) { 7821 /* 7822 * Disable both read cache and write cache. This is 7823 * the historic behavior of the keywords in the config file. 7824 */ 7825 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7826 0) { 7827 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7828 "sd_unit_attach: un:0x%p Could not disable " 7829 "caching", un); 7830 goto devid_failed; 7831 } 7832 } 7833 7834 /* 7835 * Check the value of the WCE bit now and 7836 * set un_f_write_cache_enabled accordingly. 7837 */ 7838 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7839 mutex_enter(SD_MUTEX(un)); 7840 un->un_f_write_cache_enabled = (wc_enabled != 0); 7841 mutex_exit(SD_MUTEX(un)); 7842 7843 /* 7844 * Check the value of the NV_SUP bit and set 7845 * un_f_suppress_cache_flush accordingly. 7846 */ 7847 sd_get_nv_sup(ssc); 7848 7849 /* 7850 * Find out what type of reservation this disk supports. 7851 */ 7852 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7853 7854 switch (status) { 7855 case 0: 7856 /* 7857 * SCSI-3 reservations are supported. 7858 */ 7859 un->un_reservation_type = SD_SCSI3_RESERVATION; 7860 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7861 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7862 break; 7863 case ENOTSUP: 7864 /* 7865 * The PERSISTENT RESERVE IN command would not be recognized by 7866 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7867 */ 7868 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7869 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7870 un->un_reservation_type = SD_SCSI2_RESERVATION; 7871 7872 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7873 break; 7874 default: 7875 /* 7876 * default to SCSI-3 reservations 7877 */ 7878 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7879 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7880 un->un_reservation_type = SD_SCSI3_RESERVATION; 7881 7882 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7883 break; 7884 } 7885 7886 /* 7887 * Set the pstat and error stat values here, so data obtained during the 7888 * previous attach-time routines is available. 7889 * 7890 * Note: This is a critical sequence that needs to be maintained: 7891 * 1) Instantiate the kstats before any routines using the iopath 7892 * (i.e. sd_send_scsi_cmd). 7893 * 2) Initialize the error stats (sd_set_errstats) and partition 7894 * stats (sd_set_pstats)here, following 7895 * cmlb_validate_geometry(), sd_register_devid(), and 7896 * sd_cache_control(). 7897 */ 7898 7899 if (un->un_f_pkstats_enabled && geom_label_valid) { 7900 sd_set_pstats(un); 7901 SD_TRACE(SD_LOG_IO_PARTITION, un, 7902 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7903 } 7904 7905 sd_set_errstats(un); 7906 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7907 "sd_unit_attach: un:0x%p errstats set\n", un); 7908 7909 7910 /* 7911 * After successfully attaching an instance, we record the information 7912 * of how many luns have been attached on the relative target and 7913 * controller for parallel SCSI. This information is used when sd tries 7914 * to set the tagged queuing capability in HBA. 7915 */ 7916 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7917 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7918 } 7919 7920 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7921 "sd_unit_attach: un:0x%p exit success\n", un); 7922 7923 /* Uninitialize sd_ssc_t pointer */ 7924 sd_ssc_fini(ssc); 7925 7926 return (DDI_SUCCESS); 7927 7928 /* 7929 * An error occurred during the attach; clean up & return failure. 7930 */ 7931 7932 devid_failed: 7933 7934 setup_pm_failed: 7935 ddi_remove_minor_node(devi, NULL); 7936 7937 cmlb_attach_failed: 7938 /* 7939 * Cleanup from the scsi_ifsetcap() calls (437868) 7940 */ 7941 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7942 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7943 7944 /* 7945 * Refer to the comments of setting tagged-qing in the beginning of 7946 * sd_unit_attach. We can only disable tagged queuing when there is 7947 * no lun attached on the target. 7948 */ 7949 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7950 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7951 } 7952 7953 if (un->un_f_is_fibre == FALSE) { 7954 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7955 } 7956 7957 spinup_failed: 7958 7959 /* Uninitialize sd_ssc_t pointer */ 7960 sd_ssc_fini(ssc); 7961 7962 mutex_enter(SD_MUTEX(un)); 7963 7964 /* Deallocate SCSI FMA memory spaces */ 7965 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 7966 7967 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7968 if (un->un_direct_priority_timeid != NULL) { 7969 timeout_id_t temp_id = un->un_direct_priority_timeid; 7970 un->un_direct_priority_timeid = NULL; 7971 mutex_exit(SD_MUTEX(un)); 7972 (void) untimeout(temp_id); 7973 mutex_enter(SD_MUTEX(un)); 7974 } 7975 7976 /* Cancel any pending start/stop timeouts */ 7977 if (un->un_startstop_timeid != NULL) { 7978 timeout_id_t temp_id = un->un_startstop_timeid; 7979 un->un_startstop_timeid = NULL; 7980 mutex_exit(SD_MUTEX(un)); 7981 (void) untimeout(temp_id); 7982 mutex_enter(SD_MUTEX(un)); 7983 } 7984 7985 /* Cancel any pending reset-throttle timeouts */ 7986 if (un->un_reset_throttle_timeid != NULL) { 7987 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7988 un->un_reset_throttle_timeid = NULL; 7989 mutex_exit(SD_MUTEX(un)); 7990 (void) untimeout(temp_id); 7991 mutex_enter(SD_MUTEX(un)); 7992 } 7993 7994 /* Cancel any pending retry timeouts */ 7995 if (un->un_retry_timeid != NULL) { 7996 timeout_id_t temp_id = un->un_retry_timeid; 7997 un->un_retry_timeid = NULL; 7998 mutex_exit(SD_MUTEX(un)); 7999 (void) untimeout(temp_id); 8000 mutex_enter(SD_MUTEX(un)); 8001 } 8002 8003 /* Cancel any pending delayed cv broadcast timeouts */ 8004 if (un->un_dcvb_timeid != NULL) { 8005 timeout_id_t temp_id = un->un_dcvb_timeid; 8006 un->un_dcvb_timeid = NULL; 8007 mutex_exit(SD_MUTEX(un)); 8008 (void) untimeout(temp_id); 8009 mutex_enter(SD_MUTEX(un)); 8010 } 8011 8012 mutex_exit(SD_MUTEX(un)); 8013 8014 /* There should not be any in-progress I/O so ASSERT this check */ 8015 ASSERT(un->un_ncmds_in_transport == 0); 8016 ASSERT(un->un_ncmds_in_driver == 0); 8017 8018 /* Do not free the softstate if the callback routine is active */ 8019 sd_sync_with_callback(un); 8020 8021 /* 8022 * Partition stats apparently are not used with removables. These would 8023 * not have been created during attach, so no need to clean them up... 8024 */ 8025 if (un->un_errstats != NULL) { 8026 kstat_delete(un->un_errstats); 8027 un->un_errstats = NULL; 8028 } 8029 8030 create_errstats_failed: 8031 8032 if (un->un_stats != NULL) { 8033 kstat_delete(un->un_stats); 8034 un->un_stats = NULL; 8035 } 8036 8037 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8038 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8039 8040 ddi_prop_remove_all(devi); 8041 sema_destroy(&un->un_semoclose); 8042 cv_destroy(&un->un_state_cv); 8043 8044 getrbuf_failed: 8045 8046 sd_free_rqs(un); 8047 8048 alloc_rqs_failed: 8049 8050 devp->sd_private = NULL; 8051 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8052 8053 get_softstate_failed: 8054 /* 8055 * Note: the man pages are unclear as to whether or not doing a 8056 * ddi_soft_state_free(sd_state, instance) is the right way to 8057 * clean up after the ddi_soft_state_zalloc() if the subsequent 8058 * ddi_get_soft_state() fails. The implication seems to be 8059 * that the get_soft_state cannot fail if the zalloc succeeds. 8060 */ 8061 ddi_soft_state_free(sd_state, instance); 8062 8063 probe_failed: 8064 scsi_unprobe(devp); 8065 8066 return (DDI_FAILURE); 8067 } 8068 8069 8070 /* 8071 * Function: sd_unit_detach 8072 * 8073 * Description: Performs DDI_DETACH processing for sddetach(). 8074 * 8075 * Return Code: DDI_SUCCESS 8076 * DDI_FAILURE 8077 * 8078 * Context: Kernel thread context 8079 */ 8080 8081 static int 8082 sd_unit_detach(dev_info_t *devi) 8083 { 8084 struct scsi_device *devp; 8085 struct sd_lun *un; 8086 int i; 8087 int tgt; 8088 dev_t dev; 8089 dev_info_t *pdip = ddi_get_parent(devi); 8090 int instance = ddi_get_instance(devi); 8091 8092 mutex_enter(&sd_detach_mutex); 8093 8094 /* 8095 * Fail the detach for any of the following: 8096 * - Unable to get the sd_lun struct for the instance 8097 * - A layered driver has an outstanding open on the instance 8098 * - Another thread is already detaching this instance 8099 * - Another thread is currently performing an open 8100 */ 8101 devp = ddi_get_driver_private(devi); 8102 if ((devp == NULL) || 8103 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8104 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8105 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8106 mutex_exit(&sd_detach_mutex); 8107 return (DDI_FAILURE); 8108 } 8109 8110 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8111 8112 /* 8113 * Mark this instance as currently in a detach, to inhibit any 8114 * opens from a layered driver. 8115 */ 8116 un->un_detach_count++; 8117 mutex_exit(&sd_detach_mutex); 8118 8119 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8120 SCSI_ADDR_PROP_TARGET, -1); 8121 8122 dev = sd_make_device(SD_DEVINFO(un)); 8123 8124 #ifndef lint 8125 _NOTE(COMPETING_THREADS_NOW); 8126 #endif 8127 8128 mutex_enter(SD_MUTEX(un)); 8129 8130 /* 8131 * Fail the detach if there are any outstanding layered 8132 * opens on this device. 8133 */ 8134 for (i = 0; i < NDKMAP; i++) { 8135 if (un->un_ocmap.lyropen[i] != 0) { 8136 goto err_notclosed; 8137 } 8138 } 8139 8140 /* 8141 * Verify there are NO outstanding commands issued to this device. 8142 * ie, un_ncmds_in_transport == 0. 8143 * It's possible to have outstanding commands through the physio 8144 * code path, even though everything's closed. 8145 */ 8146 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8147 (un->un_direct_priority_timeid != NULL) || 8148 (un->un_state == SD_STATE_RWAIT)) { 8149 mutex_exit(SD_MUTEX(un)); 8150 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8151 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8152 goto err_stillbusy; 8153 } 8154 8155 /* 8156 * If we have the device reserved, release the reservation. 8157 */ 8158 if ((un->un_resvd_status & SD_RESERVE) && 8159 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8160 mutex_exit(SD_MUTEX(un)); 8161 /* 8162 * Note: sd_reserve_release sends a command to the device 8163 * via the sd_ioctlcmd() path, and can sleep. 8164 */ 8165 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8166 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8167 "sd_dr_detach: Cannot release reservation \n"); 8168 } 8169 } else { 8170 mutex_exit(SD_MUTEX(un)); 8171 } 8172 8173 /* 8174 * Untimeout any reserve recover, throttle reset, restart unit 8175 * and delayed broadcast timeout threads. Protect the timeout pointer 8176 * from getting nulled by their callback functions. 8177 */ 8178 mutex_enter(SD_MUTEX(un)); 8179 if (un->un_resvd_timeid != NULL) { 8180 timeout_id_t temp_id = un->un_resvd_timeid; 8181 un->un_resvd_timeid = NULL; 8182 mutex_exit(SD_MUTEX(un)); 8183 (void) untimeout(temp_id); 8184 mutex_enter(SD_MUTEX(un)); 8185 } 8186 8187 if (un->un_reset_throttle_timeid != NULL) { 8188 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8189 un->un_reset_throttle_timeid = NULL; 8190 mutex_exit(SD_MUTEX(un)); 8191 (void) untimeout(temp_id); 8192 mutex_enter(SD_MUTEX(un)); 8193 } 8194 8195 if (un->un_startstop_timeid != NULL) { 8196 timeout_id_t temp_id = un->un_startstop_timeid; 8197 un->un_startstop_timeid = NULL; 8198 mutex_exit(SD_MUTEX(un)); 8199 (void) untimeout(temp_id); 8200 mutex_enter(SD_MUTEX(un)); 8201 } 8202 8203 if (un->un_dcvb_timeid != NULL) { 8204 timeout_id_t temp_id = un->un_dcvb_timeid; 8205 un->un_dcvb_timeid = NULL; 8206 mutex_exit(SD_MUTEX(un)); 8207 (void) untimeout(temp_id); 8208 } else { 8209 mutex_exit(SD_MUTEX(un)); 8210 } 8211 8212 /* Remove any pending reservation reclaim requests for this device */ 8213 sd_rmv_resv_reclaim_req(dev); 8214 8215 mutex_enter(SD_MUTEX(un)); 8216 8217 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8218 if (un->un_direct_priority_timeid != NULL) { 8219 timeout_id_t temp_id = un->un_direct_priority_timeid; 8220 un->un_direct_priority_timeid = NULL; 8221 mutex_exit(SD_MUTEX(un)); 8222 (void) untimeout(temp_id); 8223 mutex_enter(SD_MUTEX(un)); 8224 } 8225 8226 /* Cancel any active multi-host disk watch thread requests */ 8227 if (un->un_mhd_token != NULL) { 8228 mutex_exit(SD_MUTEX(un)); 8229 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8230 if (scsi_watch_request_terminate(un->un_mhd_token, 8231 SCSI_WATCH_TERMINATE_NOWAIT)) { 8232 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8233 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8234 /* 8235 * Note: We are returning here after having removed 8236 * some driver timeouts above. This is consistent with 8237 * the legacy implementation but perhaps the watch 8238 * terminate call should be made with the wait flag set. 8239 */ 8240 goto err_stillbusy; 8241 } 8242 mutex_enter(SD_MUTEX(un)); 8243 un->un_mhd_token = NULL; 8244 } 8245 8246 if (un->un_swr_token != NULL) { 8247 mutex_exit(SD_MUTEX(un)); 8248 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8249 if (scsi_watch_request_terminate(un->un_swr_token, 8250 SCSI_WATCH_TERMINATE_NOWAIT)) { 8251 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8252 "sd_dr_detach: Cannot cancel swr watch request\n"); 8253 /* 8254 * Note: We are returning here after having removed 8255 * some driver timeouts above. This is consistent with 8256 * the legacy implementation but perhaps the watch 8257 * terminate call should be made with the wait flag set. 8258 */ 8259 goto err_stillbusy; 8260 } 8261 mutex_enter(SD_MUTEX(un)); 8262 un->un_swr_token = NULL; 8263 } 8264 8265 mutex_exit(SD_MUTEX(un)); 8266 8267 /* 8268 * Clear any scsi_reset_notifies. We clear the reset notifies 8269 * if we have not registered one. 8270 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8271 */ 8272 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8273 sd_mhd_reset_notify_cb, (caddr_t)un); 8274 8275 /* 8276 * protect the timeout pointers from getting nulled by 8277 * their callback functions during the cancellation process. 8278 * In such a scenario untimeout can be invoked with a null value. 8279 */ 8280 _NOTE(NO_COMPETING_THREADS_NOW); 8281 8282 mutex_enter(&un->un_pm_mutex); 8283 if (un->un_pm_idle_timeid != NULL) { 8284 timeout_id_t temp_id = un->un_pm_idle_timeid; 8285 un->un_pm_idle_timeid = NULL; 8286 mutex_exit(&un->un_pm_mutex); 8287 8288 /* 8289 * Timeout is active; cancel it. 8290 * Note that it'll never be active on a device 8291 * that does not support PM therefore we don't 8292 * have to check before calling pm_idle_component. 8293 */ 8294 (void) untimeout(temp_id); 8295 (void) pm_idle_component(SD_DEVINFO(un), 0); 8296 mutex_enter(&un->un_pm_mutex); 8297 } 8298 8299 /* 8300 * Check whether there is already a timeout scheduled for power 8301 * management. If yes then don't lower the power here, that's. 8302 * the timeout handler's job. 8303 */ 8304 if (un->un_pm_timeid != NULL) { 8305 timeout_id_t temp_id = un->un_pm_timeid; 8306 un->un_pm_timeid = NULL; 8307 mutex_exit(&un->un_pm_mutex); 8308 /* 8309 * Timeout is active; cancel it. 8310 * Note that it'll never be active on a device 8311 * that does not support PM therefore we don't 8312 * have to check before calling pm_idle_component. 8313 */ 8314 (void) untimeout(temp_id); 8315 (void) pm_idle_component(SD_DEVINFO(un), 0); 8316 8317 } else { 8318 mutex_exit(&un->un_pm_mutex); 8319 if ((un->un_f_pm_is_enabled == TRUE) && 8320 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8321 DDI_SUCCESS)) { 8322 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8323 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8324 /* 8325 * Fix for bug: 4297749, item # 13 8326 * The above test now includes a check to see if PM is 8327 * supported by this device before call 8328 * pm_lower_power(). 8329 * Note, the following is not dead code. The call to 8330 * pm_lower_power above will generate a call back into 8331 * our sdpower routine which might result in a timeout 8332 * handler getting activated. Therefore the following 8333 * code is valid and necessary. 8334 */ 8335 mutex_enter(&un->un_pm_mutex); 8336 if (un->un_pm_timeid != NULL) { 8337 timeout_id_t temp_id = un->un_pm_timeid; 8338 un->un_pm_timeid = NULL; 8339 mutex_exit(&un->un_pm_mutex); 8340 (void) untimeout(temp_id); 8341 (void) pm_idle_component(SD_DEVINFO(un), 0); 8342 } else { 8343 mutex_exit(&un->un_pm_mutex); 8344 } 8345 } 8346 } 8347 8348 /* 8349 * Cleanup from the scsi_ifsetcap() calls (437868) 8350 * Relocated here from above to be after the call to 8351 * pm_lower_power, which was getting errors. 8352 */ 8353 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8354 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8355 8356 /* 8357 * Currently, tagged queuing is supported per target based by HBA. 8358 * Setting this per lun instance actually sets the capability of this 8359 * target in HBA, which affects those luns already attached on the 8360 * same target. So during detach, we can only disable this capability 8361 * only when this is the only lun left on this target. By doing 8362 * this, we assume a target has the same tagged queuing capability 8363 * for every lun. The condition can be removed when HBA is changed to 8364 * support per lun based tagged queuing capability. 8365 */ 8366 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8367 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8368 } 8369 8370 if (un->un_f_is_fibre == FALSE) { 8371 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8372 } 8373 8374 /* 8375 * Remove any event callbacks, fibre only 8376 */ 8377 if (un->un_f_is_fibre == TRUE) { 8378 if ((un->un_insert_event != NULL) && 8379 (ddi_remove_event_handler(un->un_insert_cb_id) != 8380 DDI_SUCCESS)) { 8381 /* 8382 * Note: We are returning here after having done 8383 * substantial cleanup above. This is consistent 8384 * with the legacy implementation but this may not 8385 * be the right thing to do. 8386 */ 8387 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8388 "sd_dr_detach: Cannot cancel insert event\n"); 8389 goto err_remove_event; 8390 } 8391 un->un_insert_event = NULL; 8392 8393 if ((un->un_remove_event != NULL) && 8394 (ddi_remove_event_handler(un->un_remove_cb_id) != 8395 DDI_SUCCESS)) { 8396 /* 8397 * Note: We are returning here after having done 8398 * substantial cleanup above. This is consistent 8399 * with the legacy implementation but this may not 8400 * be the right thing to do. 8401 */ 8402 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8403 "sd_dr_detach: Cannot cancel remove event\n"); 8404 goto err_remove_event; 8405 } 8406 un->un_remove_event = NULL; 8407 } 8408 8409 /* Do not free the softstate if the callback routine is active */ 8410 sd_sync_with_callback(un); 8411 8412 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8413 cmlb_free_handle(&un->un_cmlbhandle); 8414 8415 /* 8416 * Hold the detach mutex here, to make sure that no other threads ever 8417 * can access a (partially) freed soft state structure. 8418 */ 8419 mutex_enter(&sd_detach_mutex); 8420 8421 /* 8422 * Clean up the soft state struct. 8423 * Cleanup is done in reverse order of allocs/inits. 8424 * At this point there should be no competing threads anymore. 8425 */ 8426 8427 scsi_fm_fini(devp); 8428 8429 /* 8430 * Deallocate memory for SCSI FMA. 8431 */ 8432 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8433 8434 /* Unregister and free device id. */ 8435 ddi_devid_unregister(devi); 8436 if (un->un_devid) { 8437 ddi_devid_free(un->un_devid); 8438 un->un_devid = NULL; 8439 } 8440 8441 /* 8442 * Destroy wmap cache if it exists. 8443 */ 8444 if (un->un_wm_cache != NULL) { 8445 kmem_cache_destroy(un->un_wm_cache); 8446 un->un_wm_cache = NULL; 8447 } 8448 8449 /* 8450 * kstat cleanup is done in detach for all device types (4363169). 8451 * We do not want to fail detach if the device kstats are not deleted 8452 * since there is a confusion about the devo_refcnt for the device. 8453 * We just delete the kstats and let detach complete successfully. 8454 */ 8455 if (un->un_stats != NULL) { 8456 kstat_delete(un->un_stats); 8457 un->un_stats = NULL; 8458 } 8459 if (un->un_errstats != NULL) { 8460 kstat_delete(un->un_errstats); 8461 un->un_errstats = NULL; 8462 } 8463 8464 /* Remove partition stats */ 8465 if (un->un_f_pkstats_enabled) { 8466 for (i = 0; i < NSDMAP; i++) { 8467 if (un->un_pstats[i] != NULL) { 8468 kstat_delete(un->un_pstats[i]); 8469 un->un_pstats[i] = NULL; 8470 } 8471 } 8472 } 8473 8474 /* Remove xbuf registration */ 8475 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8476 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8477 8478 /* Remove driver properties */ 8479 ddi_prop_remove_all(devi); 8480 8481 mutex_destroy(&un->un_pm_mutex); 8482 cv_destroy(&un->un_pm_busy_cv); 8483 8484 cv_destroy(&un->un_wcc_cv); 8485 8486 /* Open/close semaphore */ 8487 sema_destroy(&un->un_semoclose); 8488 8489 /* Removable media condvar. */ 8490 cv_destroy(&un->un_state_cv); 8491 8492 /* Suspend/resume condvar. */ 8493 cv_destroy(&un->un_suspend_cv); 8494 cv_destroy(&un->un_disk_busy_cv); 8495 8496 sd_free_rqs(un); 8497 8498 /* Free up soft state */ 8499 devp->sd_private = NULL; 8500 8501 bzero(un, sizeof (struct sd_lun)); 8502 ddi_soft_state_free(sd_state, instance); 8503 8504 mutex_exit(&sd_detach_mutex); 8505 8506 /* This frees up the INQUIRY data associated with the device. */ 8507 scsi_unprobe(devp); 8508 8509 /* 8510 * After successfully detaching an instance, we update the information 8511 * of how many luns have been attached in the relative target and 8512 * controller for parallel SCSI. This information is used when sd tries 8513 * to set the tagged queuing capability in HBA. 8514 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8515 * check if the device is parallel SCSI. However, we don't need to 8516 * check here because we've already checked during attach. No device 8517 * that is not parallel SCSI is in the chain. 8518 */ 8519 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8520 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8521 } 8522 8523 return (DDI_SUCCESS); 8524 8525 err_notclosed: 8526 mutex_exit(SD_MUTEX(un)); 8527 8528 err_stillbusy: 8529 _NOTE(NO_COMPETING_THREADS_NOW); 8530 8531 err_remove_event: 8532 mutex_enter(&sd_detach_mutex); 8533 un->un_detach_count--; 8534 mutex_exit(&sd_detach_mutex); 8535 8536 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8537 return (DDI_FAILURE); 8538 } 8539 8540 8541 /* 8542 * Function: sd_create_errstats 8543 * 8544 * Description: This routine instantiates the device error stats. 8545 * 8546 * Note: During attach the stats are instantiated first so they are 8547 * available for attach-time routines that utilize the driver 8548 * iopath to send commands to the device. The stats are initialized 8549 * separately so data obtained during some attach-time routines is 8550 * available. (4362483) 8551 * 8552 * Arguments: un - driver soft state (unit) structure 8553 * instance - driver instance 8554 * 8555 * Context: Kernel thread context 8556 */ 8557 8558 static void 8559 sd_create_errstats(struct sd_lun *un, int instance) 8560 { 8561 struct sd_errstats *stp; 8562 char kstatmodule_err[KSTAT_STRLEN]; 8563 char kstatname[KSTAT_STRLEN]; 8564 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8565 8566 ASSERT(un != NULL); 8567 8568 if (un->un_errstats != NULL) { 8569 return; 8570 } 8571 8572 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8573 "%serr", sd_label); 8574 (void) snprintf(kstatname, sizeof (kstatname), 8575 "%s%d,err", sd_label, instance); 8576 8577 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8578 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8579 8580 if (un->un_errstats == NULL) { 8581 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8582 "sd_create_errstats: Failed kstat_create\n"); 8583 return; 8584 } 8585 8586 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8587 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8588 KSTAT_DATA_UINT32); 8589 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8590 KSTAT_DATA_UINT32); 8591 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8592 KSTAT_DATA_UINT32); 8593 kstat_named_init(&stp->sd_vid, "Vendor", 8594 KSTAT_DATA_CHAR); 8595 kstat_named_init(&stp->sd_pid, "Product", 8596 KSTAT_DATA_CHAR); 8597 kstat_named_init(&stp->sd_revision, "Revision", 8598 KSTAT_DATA_CHAR); 8599 kstat_named_init(&stp->sd_serial, "Serial No", 8600 KSTAT_DATA_CHAR); 8601 kstat_named_init(&stp->sd_capacity, "Size", 8602 KSTAT_DATA_ULONGLONG); 8603 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8604 KSTAT_DATA_UINT32); 8605 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8606 KSTAT_DATA_UINT32); 8607 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8608 KSTAT_DATA_UINT32); 8609 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8610 KSTAT_DATA_UINT32); 8611 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8612 KSTAT_DATA_UINT32); 8613 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8614 KSTAT_DATA_UINT32); 8615 8616 un->un_errstats->ks_private = un; 8617 un->un_errstats->ks_update = nulldev; 8618 8619 kstat_install(un->un_errstats); 8620 } 8621 8622 8623 /* 8624 * Function: sd_set_errstats 8625 * 8626 * Description: This routine sets the value of the vendor id, product id, 8627 * revision, serial number, and capacity device error stats. 8628 * 8629 * Note: During attach the stats are instantiated first so they are 8630 * available for attach-time routines that utilize the driver 8631 * iopath to send commands to the device. The stats are initialized 8632 * separately so data obtained during some attach-time routines is 8633 * available. (4362483) 8634 * 8635 * Arguments: un - driver soft state (unit) structure 8636 * 8637 * Context: Kernel thread context 8638 */ 8639 8640 static void 8641 sd_set_errstats(struct sd_lun *un) 8642 { 8643 struct sd_errstats *stp; 8644 8645 ASSERT(un != NULL); 8646 ASSERT(un->un_errstats != NULL); 8647 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8648 ASSERT(stp != NULL); 8649 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8650 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8651 (void) strncpy(stp->sd_revision.value.c, 8652 un->un_sd->sd_inq->inq_revision, 4); 8653 8654 /* 8655 * All the errstats are persistent across detach/attach, 8656 * so reset all the errstats here in case of the hot 8657 * replacement of disk drives, except for not changed 8658 * Sun qualified drives. 8659 */ 8660 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8661 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8662 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8663 stp->sd_softerrs.value.ui32 = 0; 8664 stp->sd_harderrs.value.ui32 = 0; 8665 stp->sd_transerrs.value.ui32 = 0; 8666 stp->sd_rq_media_err.value.ui32 = 0; 8667 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8668 stp->sd_rq_nodev_err.value.ui32 = 0; 8669 stp->sd_rq_recov_err.value.ui32 = 0; 8670 stp->sd_rq_illrq_err.value.ui32 = 0; 8671 stp->sd_rq_pfa_err.value.ui32 = 0; 8672 } 8673 8674 /* 8675 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8676 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8677 * (4376302)) 8678 */ 8679 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8680 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8681 sizeof (SD_INQUIRY(un)->inq_serial)); 8682 } 8683 8684 if (un->un_f_blockcount_is_valid != TRUE) { 8685 /* 8686 * Set capacity error stat to 0 for no media. This ensures 8687 * a valid capacity is displayed in response to 'iostat -E' 8688 * when no media is present in the device. 8689 */ 8690 stp->sd_capacity.value.ui64 = 0; 8691 } else { 8692 /* 8693 * Multiply un_blockcount by un->un_sys_blocksize to get 8694 * capacity. 8695 * 8696 * Note: for non-512 blocksize devices "un_blockcount" has been 8697 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8698 * (un_tgt_blocksize / un->un_sys_blocksize). 8699 */ 8700 stp->sd_capacity.value.ui64 = (uint64_t) 8701 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8702 } 8703 } 8704 8705 8706 /* 8707 * Function: sd_set_pstats 8708 * 8709 * Description: This routine instantiates and initializes the partition 8710 * stats for each partition with more than zero blocks. 8711 * (4363169) 8712 * 8713 * Arguments: un - driver soft state (unit) structure 8714 * 8715 * Context: Kernel thread context 8716 */ 8717 8718 static void 8719 sd_set_pstats(struct sd_lun *un) 8720 { 8721 char kstatname[KSTAT_STRLEN]; 8722 int instance; 8723 int i; 8724 diskaddr_t nblks = 0; 8725 char *partname = NULL; 8726 8727 ASSERT(un != NULL); 8728 8729 instance = ddi_get_instance(SD_DEVINFO(un)); 8730 8731 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8732 for (i = 0; i < NSDMAP; i++) { 8733 8734 if (cmlb_partinfo(un->un_cmlbhandle, i, 8735 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8736 continue; 8737 mutex_enter(SD_MUTEX(un)); 8738 8739 if ((un->un_pstats[i] == NULL) && 8740 (nblks != 0)) { 8741 8742 (void) snprintf(kstatname, sizeof (kstatname), 8743 "%s%d,%s", sd_label, instance, 8744 partname); 8745 8746 un->un_pstats[i] = kstat_create(sd_label, 8747 instance, kstatname, "partition", KSTAT_TYPE_IO, 8748 1, KSTAT_FLAG_PERSISTENT); 8749 if (un->un_pstats[i] != NULL) { 8750 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8751 kstat_install(un->un_pstats[i]); 8752 } 8753 } 8754 mutex_exit(SD_MUTEX(un)); 8755 } 8756 } 8757 8758 8759 #if (defined(__fibre)) 8760 /* 8761 * Function: sd_init_event_callbacks 8762 * 8763 * Description: This routine initializes the insertion and removal event 8764 * callbacks. (fibre only) 8765 * 8766 * Arguments: un - driver soft state (unit) structure 8767 * 8768 * Context: Kernel thread context 8769 */ 8770 8771 static void 8772 sd_init_event_callbacks(struct sd_lun *un) 8773 { 8774 ASSERT(un != NULL); 8775 8776 if ((un->un_insert_event == NULL) && 8777 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8778 &un->un_insert_event) == DDI_SUCCESS)) { 8779 /* 8780 * Add the callback for an insertion event 8781 */ 8782 (void) ddi_add_event_handler(SD_DEVINFO(un), 8783 un->un_insert_event, sd_event_callback, (void *)un, 8784 &(un->un_insert_cb_id)); 8785 } 8786 8787 if ((un->un_remove_event == NULL) && 8788 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8789 &un->un_remove_event) == DDI_SUCCESS)) { 8790 /* 8791 * Add the callback for a removal event 8792 */ 8793 (void) ddi_add_event_handler(SD_DEVINFO(un), 8794 un->un_remove_event, sd_event_callback, (void *)un, 8795 &(un->un_remove_cb_id)); 8796 } 8797 } 8798 8799 8800 /* 8801 * Function: sd_event_callback 8802 * 8803 * Description: This routine handles insert/remove events (photon). The 8804 * state is changed to OFFLINE which can be used to supress 8805 * error msgs. (fibre only) 8806 * 8807 * Arguments: un - driver soft state (unit) structure 8808 * 8809 * Context: Callout thread context 8810 */ 8811 /* ARGSUSED */ 8812 static void 8813 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8814 void *bus_impldata) 8815 { 8816 struct sd_lun *un = (struct sd_lun *)arg; 8817 8818 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8819 if (event == un->un_insert_event) { 8820 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8821 mutex_enter(SD_MUTEX(un)); 8822 if (un->un_state == SD_STATE_OFFLINE) { 8823 if (un->un_last_state != SD_STATE_SUSPENDED) { 8824 un->un_state = un->un_last_state; 8825 } else { 8826 /* 8827 * We have gone through SUSPEND/RESUME while 8828 * we were offline. Restore the last state 8829 */ 8830 un->un_state = un->un_save_state; 8831 } 8832 } 8833 mutex_exit(SD_MUTEX(un)); 8834 8835 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8836 } else if (event == un->un_remove_event) { 8837 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8838 mutex_enter(SD_MUTEX(un)); 8839 /* 8840 * We need to handle an event callback that occurs during 8841 * the suspend operation, since we don't prevent it. 8842 */ 8843 if (un->un_state != SD_STATE_OFFLINE) { 8844 if (un->un_state != SD_STATE_SUSPENDED) { 8845 New_state(un, SD_STATE_OFFLINE); 8846 } else { 8847 un->un_last_state = SD_STATE_OFFLINE; 8848 } 8849 } 8850 mutex_exit(SD_MUTEX(un)); 8851 } else { 8852 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8853 "!Unknown event\n"); 8854 } 8855 8856 } 8857 #endif 8858 8859 /* 8860 * Function: sd_cache_control() 8861 * 8862 * Description: This routine is the driver entry point for setting 8863 * read and write caching by modifying the WCE (write cache 8864 * enable) and RCD (read cache disable) bits of mode 8865 * page 8 (MODEPAGE_CACHING). 8866 * 8867 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8868 * structure for this target. 8869 * rcd_flag - flag for controlling the read cache 8870 * wce_flag - flag for controlling the write cache 8871 * 8872 * Return Code: EIO 8873 * code returned by sd_send_scsi_MODE_SENSE and 8874 * sd_send_scsi_MODE_SELECT 8875 * 8876 * Context: Kernel Thread 8877 */ 8878 8879 static int 8880 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8881 { 8882 struct mode_caching *mode_caching_page; 8883 uchar_t *header; 8884 size_t buflen; 8885 int hdrlen; 8886 int bd_len; 8887 int rval = 0; 8888 struct mode_header_grp2 *mhp; 8889 struct sd_lun *un; 8890 int status; 8891 8892 ASSERT(ssc != NULL); 8893 un = ssc->ssc_un; 8894 ASSERT(un != NULL); 8895 8896 /* 8897 * Do a test unit ready, otherwise a mode sense may not work if this 8898 * is the first command sent to the device after boot. 8899 */ 8900 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8901 if (status != 0) 8902 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8903 8904 if (un->un_f_cfg_is_atapi == TRUE) { 8905 hdrlen = MODE_HEADER_LENGTH_GRP2; 8906 } else { 8907 hdrlen = MODE_HEADER_LENGTH; 8908 } 8909 8910 /* 8911 * Allocate memory for the retrieved mode page and its headers. Set 8912 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8913 * we get all of the mode sense data otherwise, the mode select 8914 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8915 */ 8916 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8917 sizeof (struct mode_cache_scsi3); 8918 8919 header = kmem_zalloc(buflen, KM_SLEEP); 8920 8921 /* Get the information from the device. */ 8922 if (un->un_f_cfg_is_atapi == TRUE) { 8923 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 8924 MODEPAGE_CACHING, SD_PATH_DIRECT); 8925 } else { 8926 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 8927 MODEPAGE_CACHING, SD_PATH_DIRECT); 8928 } 8929 8930 if (rval != 0) { 8931 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8932 "sd_cache_control: Mode Sense Failed\n"); 8933 goto mode_sense_failed; 8934 } 8935 8936 /* 8937 * Determine size of Block Descriptors in order to locate 8938 * the mode page data. ATAPI devices return 0, SCSI devices 8939 * should return MODE_BLK_DESC_LENGTH. 8940 */ 8941 if (un->un_f_cfg_is_atapi == TRUE) { 8942 mhp = (struct mode_header_grp2 *)header; 8943 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8944 } else { 8945 bd_len = ((struct mode_header *)header)->bdesc_length; 8946 } 8947 8948 if (bd_len > MODE_BLK_DESC_LENGTH) { 8949 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8950 "sd_cache_control: Mode Sense returned invalid " 8951 "block descriptor length\n"); 8952 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8953 "sd_cache_control: Mode Sense returned invalid " 8954 "block descriptor length"); 8955 rval = EIO; 8956 goto mode_sense_failed; 8957 } 8958 8959 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8960 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8961 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8962 " caching page code mismatch %d\n", 8963 mode_caching_page->mode_page.code); 8964 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8965 "sd_cache_control: Mode Sense caching page code " 8966 "mismatch %d", mode_caching_page->mode_page.code); 8967 rval = EIO; 8968 goto mode_sense_failed; 8969 } 8970 8971 /* Check the relevant bits on successful mode sense. */ 8972 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8973 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8974 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8975 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8976 8977 size_t sbuflen; 8978 uchar_t save_pg; 8979 8980 /* 8981 * Construct select buffer length based on the 8982 * length of the sense data returned. 8983 */ 8984 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8985 sizeof (struct mode_page) + 8986 (int)mode_caching_page->mode_page.length; 8987 8988 /* 8989 * Set the caching bits as requested. 8990 */ 8991 if (rcd_flag == SD_CACHE_ENABLE) 8992 mode_caching_page->rcd = 0; 8993 else if (rcd_flag == SD_CACHE_DISABLE) 8994 mode_caching_page->rcd = 1; 8995 8996 if (wce_flag == SD_CACHE_ENABLE) 8997 mode_caching_page->wce = 1; 8998 else if (wce_flag == SD_CACHE_DISABLE) 8999 mode_caching_page->wce = 0; 9000 9001 /* 9002 * Save the page if the mode sense says the 9003 * drive supports it. 9004 */ 9005 save_pg = mode_caching_page->mode_page.ps ? 9006 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9007 9008 /* Clear reserved bits before mode select. */ 9009 mode_caching_page->mode_page.ps = 0; 9010 9011 /* 9012 * Clear out mode header for mode select. 9013 * The rest of the retrieved page will be reused. 9014 */ 9015 bzero(header, hdrlen); 9016 9017 if (un->un_f_cfg_is_atapi == TRUE) { 9018 mhp = (struct mode_header_grp2 *)header; 9019 mhp->bdesc_length_hi = bd_len >> 8; 9020 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9021 } else { 9022 ((struct mode_header *)header)->bdesc_length = bd_len; 9023 } 9024 9025 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9026 9027 /* Issue mode select to change the cache settings */ 9028 if (un->un_f_cfg_is_atapi == TRUE) { 9029 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9030 sbuflen, save_pg, SD_PATH_DIRECT); 9031 } else { 9032 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9033 sbuflen, save_pg, SD_PATH_DIRECT); 9034 } 9035 9036 } 9037 9038 9039 mode_sense_failed: 9040 9041 kmem_free(header, buflen); 9042 9043 if (rval != 0) { 9044 if (rval == EIO) 9045 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9046 else 9047 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9048 } 9049 return (rval); 9050 } 9051 9052 9053 /* 9054 * Function: sd_get_write_cache_enabled() 9055 * 9056 * Description: This routine is the driver entry point for determining if 9057 * write caching is enabled. It examines the WCE (write cache 9058 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9059 * 9060 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9061 * structure for this target. 9062 * is_enabled - pointer to int where write cache enabled state 9063 * is returned (non-zero -> write cache enabled) 9064 * 9065 * 9066 * Return Code: EIO 9067 * code returned by sd_send_scsi_MODE_SENSE 9068 * 9069 * Context: Kernel Thread 9070 * 9071 * NOTE: If ioctl is added to disable write cache, this sequence should 9072 * be followed so that no locking is required for accesses to 9073 * un->un_f_write_cache_enabled: 9074 * do mode select to clear wce 9075 * do synchronize cache to flush cache 9076 * set un->un_f_write_cache_enabled = FALSE 9077 * 9078 * Conversely, an ioctl to enable the write cache should be done 9079 * in this order: 9080 * set un->un_f_write_cache_enabled = TRUE 9081 * do mode select to set wce 9082 */ 9083 9084 static int 9085 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9086 { 9087 struct mode_caching *mode_caching_page; 9088 uchar_t *header; 9089 size_t buflen; 9090 int hdrlen; 9091 int bd_len; 9092 int rval = 0; 9093 struct sd_lun *un; 9094 int status; 9095 9096 ASSERT(ssc != NULL); 9097 un = ssc->ssc_un; 9098 ASSERT(un != NULL); 9099 ASSERT(is_enabled != NULL); 9100 9101 /* in case of error, flag as enabled */ 9102 *is_enabled = TRUE; 9103 9104 /* 9105 * Do a test unit ready, otherwise a mode sense may not work if this 9106 * is the first command sent to the device after boot. 9107 */ 9108 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9109 9110 if (status != 0) 9111 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9112 9113 if (un->un_f_cfg_is_atapi == TRUE) { 9114 hdrlen = MODE_HEADER_LENGTH_GRP2; 9115 } else { 9116 hdrlen = MODE_HEADER_LENGTH; 9117 } 9118 9119 /* 9120 * Allocate memory for the retrieved mode page and its headers. Set 9121 * a pointer to the page itself. 9122 */ 9123 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9124 header = kmem_zalloc(buflen, KM_SLEEP); 9125 9126 /* Get the information from the device. */ 9127 if (un->un_f_cfg_is_atapi == TRUE) { 9128 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9129 MODEPAGE_CACHING, SD_PATH_DIRECT); 9130 } else { 9131 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9132 MODEPAGE_CACHING, SD_PATH_DIRECT); 9133 } 9134 9135 if (rval != 0) { 9136 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9137 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9138 goto mode_sense_failed; 9139 } 9140 9141 /* 9142 * Determine size of Block Descriptors in order to locate 9143 * the mode page data. ATAPI devices return 0, SCSI devices 9144 * should return MODE_BLK_DESC_LENGTH. 9145 */ 9146 if (un->un_f_cfg_is_atapi == TRUE) { 9147 struct mode_header_grp2 *mhp; 9148 mhp = (struct mode_header_grp2 *)header; 9149 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9150 } else { 9151 bd_len = ((struct mode_header *)header)->bdesc_length; 9152 } 9153 9154 if (bd_len > MODE_BLK_DESC_LENGTH) { 9155 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9156 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9157 "block descriptor length\n"); 9158 /* FMA should make upset complain here */ 9159 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9160 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9161 "block descriptor length %d", bd_len); 9162 rval = EIO; 9163 goto mode_sense_failed; 9164 } 9165 9166 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9167 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9168 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 9169 " caching page code mismatch %d\n", 9170 mode_caching_page->mode_page.code); 9171 /* FMA could make upset complain here */ 9172 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9173 "sd_cache_control: Mode Sense caching page code " 9174 "mismatch %d", mode_caching_page->mode_page.code); 9175 rval = EIO; 9176 goto mode_sense_failed; 9177 } 9178 *is_enabled = mode_caching_page->wce; 9179 9180 mode_sense_failed: 9181 if (rval == 0) { 9182 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9183 } else if (rval == EIO) { 9184 /* 9185 * Some disks do not support mode sense(6), we 9186 * should ignore this kind of error(sense key is 9187 * 0x5 - illegal request). 9188 */ 9189 uint8_t *sensep; 9190 int senlen; 9191 9192 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9193 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9194 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9195 9196 if (senlen > 0 && 9197 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9198 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9199 } else { 9200 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9201 } 9202 } else { 9203 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9204 } 9205 kmem_free(header, buflen); 9206 return (rval); 9207 } 9208 9209 /* 9210 * Function: sd_get_nv_sup() 9211 * 9212 * Description: This routine is the driver entry point for 9213 * determining whether non-volatile cache is supported. This 9214 * determination process works as follows: 9215 * 9216 * 1. sd first queries sd.conf on whether 9217 * suppress_cache_flush bit is set for this device. 9218 * 9219 * 2. if not there, then queries the internal disk table. 9220 * 9221 * 3. if either sd.conf or internal disk table specifies 9222 * cache flush be suppressed, we don't bother checking 9223 * NV_SUP bit. 9224 * 9225 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9226 * the optional INQUIRY VPD page 0x86. If the device 9227 * supports VPD page 0x86, sd examines the NV_SUP 9228 * (non-volatile cache support) bit in the INQUIRY VPD page 9229 * 0x86: 9230 * o If NV_SUP bit is set, sd assumes the device has a 9231 * non-volatile cache and set the 9232 * un_f_sync_nv_supported to TRUE. 9233 * o Otherwise cache is not non-volatile, 9234 * un_f_sync_nv_supported is set to FALSE. 9235 * 9236 * Arguments: un - driver soft state (unit) structure 9237 * 9238 * Return Code: 9239 * 9240 * Context: Kernel Thread 9241 */ 9242 9243 static void 9244 sd_get_nv_sup(sd_ssc_t *ssc) 9245 { 9246 int rval = 0; 9247 uchar_t *inq86 = NULL; 9248 size_t inq86_len = MAX_INQUIRY_SIZE; 9249 size_t inq86_resid = 0; 9250 struct dk_callback *dkc; 9251 struct sd_lun *un; 9252 9253 ASSERT(ssc != NULL); 9254 un = ssc->ssc_un; 9255 ASSERT(un != NULL); 9256 9257 mutex_enter(SD_MUTEX(un)); 9258 9259 /* 9260 * Be conservative on the device's support of 9261 * SYNC_NV bit: un_f_sync_nv_supported is 9262 * initialized to be false. 9263 */ 9264 un->un_f_sync_nv_supported = FALSE; 9265 9266 /* 9267 * If either sd.conf or internal disk table 9268 * specifies cache flush be suppressed, then 9269 * we don't bother checking NV_SUP bit. 9270 */ 9271 if (un->un_f_suppress_cache_flush == TRUE) { 9272 mutex_exit(SD_MUTEX(un)); 9273 return; 9274 } 9275 9276 if (sd_check_vpd_page_support(ssc) == 0 && 9277 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9278 mutex_exit(SD_MUTEX(un)); 9279 /* collect page 86 data if available */ 9280 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9281 9282 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9283 0x01, 0x86, &inq86_resid); 9284 9285 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9286 SD_TRACE(SD_LOG_COMMON, un, 9287 "sd_get_nv_sup: \ 9288 successfully get VPD page: %x \ 9289 PAGE LENGTH: %x BYTE 6: %x\n", 9290 inq86[1], inq86[3], inq86[6]); 9291 9292 mutex_enter(SD_MUTEX(un)); 9293 /* 9294 * check the value of NV_SUP bit: only if the device 9295 * reports NV_SUP bit to be 1, the 9296 * un_f_sync_nv_supported bit will be set to true. 9297 */ 9298 if (inq86[6] & SD_VPD_NV_SUP) { 9299 un->un_f_sync_nv_supported = TRUE; 9300 } 9301 mutex_exit(SD_MUTEX(un)); 9302 } else if (rval != 0) { 9303 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9304 } 9305 9306 kmem_free(inq86, inq86_len); 9307 } else { 9308 mutex_exit(SD_MUTEX(un)); 9309 } 9310 9311 /* 9312 * Send a SYNC CACHE command to check whether 9313 * SYNC_NV bit is supported. This command should have 9314 * un_f_sync_nv_supported set to correct value. 9315 */ 9316 mutex_enter(SD_MUTEX(un)); 9317 if (un->un_f_sync_nv_supported) { 9318 mutex_exit(SD_MUTEX(un)); 9319 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9320 dkc->dkc_flag = FLUSH_VOLATILE; 9321 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9322 9323 /* 9324 * Send a TEST UNIT READY command to the device. This should 9325 * clear any outstanding UNIT ATTENTION that may be present. 9326 */ 9327 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9328 if (rval != 0) 9329 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9330 9331 kmem_free(dkc, sizeof (struct dk_callback)); 9332 } else { 9333 mutex_exit(SD_MUTEX(un)); 9334 } 9335 9336 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9337 un_f_suppress_cache_flush is set to %d\n", 9338 un->un_f_suppress_cache_flush); 9339 } 9340 9341 /* 9342 * Function: sd_make_device 9343 * 9344 * Description: Utility routine to return the Solaris device number from 9345 * the data in the device's dev_info structure. 9346 * 9347 * Return Code: The Solaris device number 9348 * 9349 * Context: Any 9350 */ 9351 9352 static dev_t 9353 sd_make_device(dev_info_t *devi) 9354 { 9355 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9356 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9357 } 9358 9359 9360 /* 9361 * Function: sd_pm_entry 9362 * 9363 * Description: Called at the start of a new command to manage power 9364 * and busy status of a device. This includes determining whether 9365 * the current power state of the device is sufficient for 9366 * performing the command or whether it must be changed. 9367 * The PM framework is notified appropriately. 9368 * Only with a return status of DDI_SUCCESS will the 9369 * component be busy to the framework. 9370 * 9371 * All callers of sd_pm_entry must check the return status 9372 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9373 * of DDI_FAILURE indicates the device failed to power up. 9374 * In this case un_pm_count has been adjusted so the result 9375 * on exit is still powered down, ie. count is less than 0. 9376 * Calling sd_pm_exit with this count value hits an ASSERT. 9377 * 9378 * Return Code: DDI_SUCCESS or DDI_FAILURE 9379 * 9380 * Context: Kernel thread context. 9381 */ 9382 9383 static int 9384 sd_pm_entry(struct sd_lun *un) 9385 { 9386 int return_status = DDI_SUCCESS; 9387 9388 ASSERT(!mutex_owned(SD_MUTEX(un))); 9389 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9390 9391 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9392 9393 if (un->un_f_pm_is_enabled == FALSE) { 9394 SD_TRACE(SD_LOG_IO_PM, un, 9395 "sd_pm_entry: exiting, PM not enabled\n"); 9396 return (return_status); 9397 } 9398 9399 /* 9400 * Just increment a counter if PM is enabled. On the transition from 9401 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9402 * the count with each IO and mark the device as idle when the count 9403 * hits 0. 9404 * 9405 * If the count is less than 0 the device is powered down. If a powered 9406 * down device is successfully powered up then the count must be 9407 * incremented to reflect the power up. Note that it'll get incremented 9408 * a second time to become busy. 9409 * 9410 * Because the following has the potential to change the device state 9411 * and must release the un_pm_mutex to do so, only one thread can be 9412 * allowed through at a time. 9413 */ 9414 9415 mutex_enter(&un->un_pm_mutex); 9416 while (un->un_pm_busy == TRUE) { 9417 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9418 } 9419 un->un_pm_busy = TRUE; 9420 9421 if (un->un_pm_count < 1) { 9422 9423 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9424 9425 /* 9426 * Indicate we are now busy so the framework won't attempt to 9427 * power down the device. This call will only fail if either 9428 * we passed a bad component number or the device has no 9429 * components. Neither of these should ever happen. 9430 */ 9431 mutex_exit(&un->un_pm_mutex); 9432 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9433 ASSERT(return_status == DDI_SUCCESS); 9434 9435 mutex_enter(&un->un_pm_mutex); 9436 9437 if (un->un_pm_count < 0) { 9438 mutex_exit(&un->un_pm_mutex); 9439 9440 SD_TRACE(SD_LOG_IO_PM, un, 9441 "sd_pm_entry: power up component\n"); 9442 9443 /* 9444 * pm_raise_power will cause sdpower to be called 9445 * which brings the device power level to the 9446 * desired state, ON in this case. If successful, 9447 * un_pm_count and un_power_level will be updated 9448 * appropriately. 9449 */ 9450 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9451 SD_SPINDLE_ON); 9452 9453 mutex_enter(&un->un_pm_mutex); 9454 9455 if (return_status != DDI_SUCCESS) { 9456 /* 9457 * Power up failed. 9458 * Idle the device and adjust the count 9459 * so the result on exit is that we're 9460 * still powered down, ie. count is less than 0. 9461 */ 9462 SD_TRACE(SD_LOG_IO_PM, un, 9463 "sd_pm_entry: power up failed," 9464 " idle the component\n"); 9465 9466 (void) pm_idle_component(SD_DEVINFO(un), 0); 9467 un->un_pm_count--; 9468 } else { 9469 /* 9470 * Device is powered up, verify the 9471 * count is non-negative. 9472 * This is debug only. 9473 */ 9474 ASSERT(un->un_pm_count == 0); 9475 } 9476 } 9477 9478 if (return_status == DDI_SUCCESS) { 9479 /* 9480 * For performance, now that the device has been tagged 9481 * as busy, and it's known to be powered up, update the 9482 * chain types to use jump tables that do not include 9483 * pm. This significantly lowers the overhead and 9484 * therefore improves performance. 9485 */ 9486 9487 mutex_exit(&un->un_pm_mutex); 9488 mutex_enter(SD_MUTEX(un)); 9489 SD_TRACE(SD_LOG_IO_PM, un, 9490 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9491 un->un_uscsi_chain_type); 9492 9493 if (un->un_f_non_devbsize_supported) { 9494 un->un_buf_chain_type = 9495 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9496 } else { 9497 un->un_buf_chain_type = 9498 SD_CHAIN_INFO_DISK_NO_PM; 9499 } 9500 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9501 9502 SD_TRACE(SD_LOG_IO_PM, un, 9503 " changed uscsi_chain_type to %d\n", 9504 un->un_uscsi_chain_type); 9505 mutex_exit(SD_MUTEX(un)); 9506 mutex_enter(&un->un_pm_mutex); 9507 9508 if (un->un_pm_idle_timeid == NULL) { 9509 /* 300 ms. */ 9510 un->un_pm_idle_timeid = 9511 timeout(sd_pm_idletimeout_handler, un, 9512 (drv_usectohz((clock_t)300000))); 9513 /* 9514 * Include an extra call to busy which keeps the 9515 * device busy with-respect-to the PM layer 9516 * until the timer fires, at which time it'll 9517 * get the extra idle call. 9518 */ 9519 (void) pm_busy_component(SD_DEVINFO(un), 0); 9520 } 9521 } 9522 } 9523 un->un_pm_busy = FALSE; 9524 /* Next... */ 9525 cv_signal(&un->un_pm_busy_cv); 9526 9527 un->un_pm_count++; 9528 9529 SD_TRACE(SD_LOG_IO_PM, un, 9530 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9531 9532 mutex_exit(&un->un_pm_mutex); 9533 9534 return (return_status); 9535 } 9536 9537 9538 /* 9539 * Function: sd_pm_exit 9540 * 9541 * Description: Called at the completion of a command to manage busy 9542 * status for the device. If the device becomes idle the 9543 * PM framework is notified. 9544 * 9545 * Context: Kernel thread context 9546 */ 9547 9548 static void 9549 sd_pm_exit(struct sd_lun *un) 9550 { 9551 ASSERT(!mutex_owned(SD_MUTEX(un))); 9552 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9553 9554 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9555 9556 /* 9557 * After attach the following flag is only read, so don't 9558 * take the penalty of acquiring a mutex for it. 9559 */ 9560 if (un->un_f_pm_is_enabled == TRUE) { 9561 9562 mutex_enter(&un->un_pm_mutex); 9563 un->un_pm_count--; 9564 9565 SD_TRACE(SD_LOG_IO_PM, un, 9566 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9567 9568 ASSERT(un->un_pm_count >= 0); 9569 if (un->un_pm_count == 0) { 9570 mutex_exit(&un->un_pm_mutex); 9571 9572 SD_TRACE(SD_LOG_IO_PM, un, 9573 "sd_pm_exit: idle component\n"); 9574 9575 (void) pm_idle_component(SD_DEVINFO(un), 0); 9576 9577 } else { 9578 mutex_exit(&un->un_pm_mutex); 9579 } 9580 } 9581 9582 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9583 } 9584 9585 9586 /* 9587 * Function: sdopen 9588 * 9589 * Description: Driver's open(9e) entry point function. 9590 * 9591 * Arguments: dev_i - pointer to device number 9592 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9593 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9594 * cred_p - user credential pointer 9595 * 9596 * Return Code: EINVAL 9597 * ENXIO 9598 * EIO 9599 * EROFS 9600 * EBUSY 9601 * 9602 * Context: Kernel thread context 9603 */ 9604 /* ARGSUSED */ 9605 static int 9606 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9607 { 9608 struct sd_lun *un; 9609 int nodelay; 9610 int part; 9611 uint64_t partmask; 9612 int instance; 9613 dev_t dev; 9614 int rval = EIO; 9615 diskaddr_t nblks = 0; 9616 diskaddr_t label_cap; 9617 9618 /* Validate the open type */ 9619 if (otyp >= OTYPCNT) { 9620 return (EINVAL); 9621 } 9622 9623 dev = *dev_p; 9624 instance = SDUNIT(dev); 9625 mutex_enter(&sd_detach_mutex); 9626 9627 /* 9628 * Fail the open if there is no softstate for the instance, or 9629 * if another thread somewhere is trying to detach the instance. 9630 */ 9631 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9632 (un->un_detach_count != 0)) { 9633 mutex_exit(&sd_detach_mutex); 9634 /* 9635 * The probe cache only needs to be cleared when open (9e) fails 9636 * with ENXIO (4238046). 9637 */ 9638 /* 9639 * un-conditionally clearing probe cache is ok with 9640 * separate sd/ssd binaries 9641 * x86 platform can be an issue with both parallel 9642 * and fibre in 1 binary 9643 */ 9644 sd_scsi_clear_probe_cache(); 9645 return (ENXIO); 9646 } 9647 9648 /* 9649 * The un_layer_count is to prevent another thread in specfs from 9650 * trying to detach the instance, which can happen when we are 9651 * called from a higher-layer driver instead of thru specfs. 9652 * This will not be needed when DDI provides a layered driver 9653 * interface that allows specfs to know that an instance is in 9654 * use by a layered driver & should not be detached. 9655 * 9656 * Note: the semantics for layered driver opens are exactly one 9657 * close for every open. 9658 */ 9659 if (otyp == OTYP_LYR) { 9660 un->un_layer_count++; 9661 } 9662 9663 /* 9664 * Keep a count of the current # of opens in progress. This is because 9665 * some layered drivers try to call us as a regular open. This can 9666 * cause problems that we cannot prevent, however by keeping this count 9667 * we can at least keep our open and detach routines from racing against 9668 * each other under such conditions. 9669 */ 9670 un->un_opens_in_progress++; 9671 mutex_exit(&sd_detach_mutex); 9672 9673 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9674 part = SDPART(dev); 9675 partmask = 1 << part; 9676 9677 /* 9678 * We use a semaphore here in order to serialize 9679 * open and close requests on the device. 9680 */ 9681 sema_p(&un->un_semoclose); 9682 9683 mutex_enter(SD_MUTEX(un)); 9684 9685 /* 9686 * All device accesses go thru sdstrategy() where we check 9687 * on suspend status but there could be a scsi_poll command, 9688 * which bypasses sdstrategy(), so we need to check pm 9689 * status. 9690 */ 9691 9692 if (!nodelay) { 9693 while ((un->un_state == SD_STATE_SUSPENDED) || 9694 (un->un_state == SD_STATE_PM_CHANGING)) { 9695 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9696 } 9697 9698 mutex_exit(SD_MUTEX(un)); 9699 if (sd_pm_entry(un) != DDI_SUCCESS) { 9700 rval = EIO; 9701 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9702 "sdopen: sd_pm_entry failed\n"); 9703 goto open_failed_with_pm; 9704 } 9705 mutex_enter(SD_MUTEX(un)); 9706 } 9707 9708 /* check for previous exclusive open */ 9709 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9710 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9711 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9712 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9713 9714 if (un->un_exclopen & (partmask)) { 9715 goto excl_open_fail; 9716 } 9717 9718 if (flag & FEXCL) { 9719 int i; 9720 if (un->un_ocmap.lyropen[part]) { 9721 goto excl_open_fail; 9722 } 9723 for (i = 0; i < (OTYPCNT - 1); i++) { 9724 if (un->un_ocmap.regopen[i] & (partmask)) { 9725 goto excl_open_fail; 9726 } 9727 } 9728 } 9729 9730 /* 9731 * Check the write permission if this is a removable media device, 9732 * NDELAY has not been set, and writable permission is requested. 9733 * 9734 * Note: If NDELAY was set and this is write-protected media the WRITE 9735 * attempt will fail with EIO as part of the I/O processing. This is a 9736 * more permissive implementation that allows the open to succeed and 9737 * WRITE attempts to fail when appropriate. 9738 */ 9739 if (un->un_f_chk_wp_open) { 9740 if ((flag & FWRITE) && (!nodelay)) { 9741 mutex_exit(SD_MUTEX(un)); 9742 /* 9743 * Defer the check for write permission on writable 9744 * DVD drive till sdstrategy and will not fail open even 9745 * if FWRITE is set as the device can be writable 9746 * depending upon the media and the media can change 9747 * after the call to open(). 9748 */ 9749 if (un->un_f_dvdram_writable_device == FALSE) { 9750 if (ISCD(un) || sr_check_wp(dev)) { 9751 rval = EROFS; 9752 mutex_enter(SD_MUTEX(un)); 9753 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9754 "write to cd or write protected media\n"); 9755 goto open_fail; 9756 } 9757 } 9758 mutex_enter(SD_MUTEX(un)); 9759 } 9760 } 9761 9762 /* 9763 * If opening in NDELAY/NONBLOCK mode, just return. 9764 * Check if disk is ready and has a valid geometry later. 9765 */ 9766 if (!nodelay) { 9767 sd_ssc_t *ssc; 9768 9769 mutex_exit(SD_MUTEX(un)); 9770 ssc = sd_ssc_init(un); 9771 rval = sd_ready_and_valid(ssc, part); 9772 sd_ssc_fini(ssc); 9773 mutex_enter(SD_MUTEX(un)); 9774 /* 9775 * Fail if device is not ready or if the number of disk 9776 * blocks is zero or negative for non CD devices. 9777 */ 9778 9779 nblks = 0; 9780 9781 if (rval == SD_READY_VALID && (!ISCD(un))) { 9782 /* if cmlb_partinfo fails, nblks remains 0 */ 9783 mutex_exit(SD_MUTEX(un)); 9784 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9785 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9786 mutex_enter(SD_MUTEX(un)); 9787 } 9788 9789 if ((rval != SD_READY_VALID) || 9790 (!ISCD(un) && nblks <= 0)) { 9791 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9792 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9793 "device not ready or invalid disk block value\n"); 9794 goto open_fail; 9795 } 9796 #if defined(__i386) || defined(__amd64) 9797 } else { 9798 uchar_t *cp; 9799 /* 9800 * x86 requires special nodelay handling, so that p0 is 9801 * always defined and accessible. 9802 * Invalidate geometry only if device is not already open. 9803 */ 9804 cp = &un->un_ocmap.chkd[0]; 9805 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9806 if (*cp != (uchar_t)0) { 9807 break; 9808 } 9809 cp++; 9810 } 9811 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9812 mutex_exit(SD_MUTEX(un)); 9813 cmlb_invalidate(un->un_cmlbhandle, 9814 (void *)SD_PATH_DIRECT); 9815 mutex_enter(SD_MUTEX(un)); 9816 } 9817 9818 #endif 9819 } 9820 9821 if (otyp == OTYP_LYR) { 9822 un->un_ocmap.lyropen[part]++; 9823 } else { 9824 un->un_ocmap.regopen[otyp] |= partmask; 9825 } 9826 9827 /* Set up open and exclusive open flags */ 9828 if (flag & FEXCL) { 9829 un->un_exclopen |= (partmask); 9830 } 9831 9832 /* 9833 * If the lun is EFI labeled and lun capacity is greater than the 9834 * capacity contained in the label, log a sys-event to notify the 9835 * interested module. 9836 * To avoid an infinite loop of logging sys-event, we only log the 9837 * event when the lun is not opened in NDELAY mode. The event handler 9838 * should open the lun in NDELAY mode. 9839 */ 9840 if (!(flag & FNDELAY)) { 9841 mutex_exit(SD_MUTEX(un)); 9842 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9843 (void*)SD_PATH_DIRECT) == 0) { 9844 mutex_enter(SD_MUTEX(un)); 9845 if (un->un_f_blockcount_is_valid && 9846 un->un_blockcount > label_cap) { 9847 mutex_exit(SD_MUTEX(un)); 9848 sd_log_lun_expansion_event(un, 9849 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9850 mutex_enter(SD_MUTEX(un)); 9851 } 9852 } else { 9853 mutex_enter(SD_MUTEX(un)); 9854 } 9855 } 9856 9857 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9858 "open of part %d type %d\n", part, otyp); 9859 9860 mutex_exit(SD_MUTEX(un)); 9861 if (!nodelay) { 9862 sd_pm_exit(un); 9863 } 9864 9865 sema_v(&un->un_semoclose); 9866 9867 mutex_enter(&sd_detach_mutex); 9868 un->un_opens_in_progress--; 9869 mutex_exit(&sd_detach_mutex); 9870 9871 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9872 return (DDI_SUCCESS); 9873 9874 excl_open_fail: 9875 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9876 rval = EBUSY; 9877 9878 open_fail: 9879 mutex_exit(SD_MUTEX(un)); 9880 9881 /* 9882 * On a failed open we must exit the pm management. 9883 */ 9884 if (!nodelay) { 9885 sd_pm_exit(un); 9886 } 9887 open_failed_with_pm: 9888 sema_v(&un->un_semoclose); 9889 9890 mutex_enter(&sd_detach_mutex); 9891 un->un_opens_in_progress--; 9892 if (otyp == OTYP_LYR) { 9893 un->un_layer_count--; 9894 } 9895 mutex_exit(&sd_detach_mutex); 9896 9897 return (rval); 9898 } 9899 9900 9901 /* 9902 * Function: sdclose 9903 * 9904 * Description: Driver's close(9e) entry point function. 9905 * 9906 * Arguments: dev - device number 9907 * flag - file status flag, informational only 9908 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9909 * cred_p - user credential pointer 9910 * 9911 * Return Code: ENXIO 9912 * 9913 * Context: Kernel thread context 9914 */ 9915 /* ARGSUSED */ 9916 static int 9917 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9918 { 9919 struct sd_lun *un; 9920 uchar_t *cp; 9921 int part; 9922 int nodelay; 9923 int rval = 0; 9924 9925 /* Validate the open type */ 9926 if (otyp >= OTYPCNT) { 9927 return (ENXIO); 9928 } 9929 9930 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9931 return (ENXIO); 9932 } 9933 9934 part = SDPART(dev); 9935 nodelay = flag & (FNDELAY | FNONBLOCK); 9936 9937 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9938 "sdclose: close of part %d type %d\n", part, otyp); 9939 9940 /* 9941 * We use a semaphore here in order to serialize 9942 * open and close requests on the device. 9943 */ 9944 sema_p(&un->un_semoclose); 9945 9946 mutex_enter(SD_MUTEX(un)); 9947 9948 /* Don't proceed if power is being changed. */ 9949 while (un->un_state == SD_STATE_PM_CHANGING) { 9950 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9951 } 9952 9953 if (un->un_exclopen & (1 << part)) { 9954 un->un_exclopen &= ~(1 << part); 9955 } 9956 9957 /* Update the open partition map */ 9958 if (otyp == OTYP_LYR) { 9959 un->un_ocmap.lyropen[part] -= 1; 9960 } else { 9961 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9962 } 9963 9964 cp = &un->un_ocmap.chkd[0]; 9965 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9966 if (*cp != NULL) { 9967 break; 9968 } 9969 cp++; 9970 } 9971 9972 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9973 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9974 9975 /* 9976 * We avoid persistance upon the last close, and set 9977 * the throttle back to the maximum. 9978 */ 9979 un->un_throttle = un->un_saved_throttle; 9980 9981 if (un->un_state == SD_STATE_OFFLINE) { 9982 if (un->un_f_is_fibre == FALSE) { 9983 scsi_log(SD_DEVINFO(un), sd_label, 9984 CE_WARN, "offline\n"); 9985 } 9986 mutex_exit(SD_MUTEX(un)); 9987 cmlb_invalidate(un->un_cmlbhandle, 9988 (void *)SD_PATH_DIRECT); 9989 mutex_enter(SD_MUTEX(un)); 9990 9991 } else { 9992 /* 9993 * Flush any outstanding writes in NVRAM cache. 9994 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9995 * cmd, it may not work for non-Pluto devices. 9996 * SYNCHRONIZE CACHE is not required for removables, 9997 * except DVD-RAM drives. 9998 * 9999 * Also note: because SYNCHRONIZE CACHE is currently 10000 * the only command issued here that requires the 10001 * drive be powered up, only do the power up before 10002 * sending the Sync Cache command. If additional 10003 * commands are added which require a powered up 10004 * drive, the following sequence may have to change. 10005 * 10006 * And finally, note that parallel SCSI on SPARC 10007 * only issues a Sync Cache to DVD-RAM, a newly 10008 * supported device. 10009 */ 10010 #if defined(__i386) || defined(__amd64) 10011 if ((un->un_f_sync_cache_supported && 10012 un->un_f_sync_cache_required) || 10013 un->un_f_dvdram_writable_device == TRUE) { 10014 #else 10015 if (un->un_f_dvdram_writable_device == TRUE) { 10016 #endif 10017 mutex_exit(SD_MUTEX(un)); 10018 if (sd_pm_entry(un) == DDI_SUCCESS) { 10019 rval = 10020 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10021 NULL); 10022 /* ignore error if not supported */ 10023 if (rval == ENOTSUP) { 10024 rval = 0; 10025 } else if (rval != 0) { 10026 rval = EIO; 10027 } 10028 sd_pm_exit(un); 10029 } else { 10030 rval = EIO; 10031 } 10032 mutex_enter(SD_MUTEX(un)); 10033 } 10034 10035 /* 10036 * For devices which supports DOOR_LOCK, send an ALLOW 10037 * MEDIA REMOVAL command, but don't get upset if it 10038 * fails. We need to raise the power of the drive before 10039 * we can call sd_send_scsi_DOORLOCK() 10040 */ 10041 if (un->un_f_doorlock_supported) { 10042 mutex_exit(SD_MUTEX(un)); 10043 if (sd_pm_entry(un) == DDI_SUCCESS) { 10044 sd_ssc_t *ssc; 10045 10046 ssc = sd_ssc_init(un); 10047 rval = sd_send_scsi_DOORLOCK(ssc, 10048 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10049 if (rval != 0) 10050 sd_ssc_assessment(ssc, 10051 SD_FMT_IGNORE); 10052 sd_ssc_fini(ssc); 10053 10054 sd_pm_exit(un); 10055 if (ISCD(un) && (rval != 0) && 10056 (nodelay != 0)) { 10057 rval = ENXIO; 10058 } 10059 } else { 10060 rval = EIO; 10061 } 10062 mutex_enter(SD_MUTEX(un)); 10063 } 10064 10065 /* 10066 * If a device has removable media, invalidate all 10067 * parameters related to media, such as geometry, 10068 * blocksize, and blockcount. 10069 */ 10070 if (un->un_f_has_removable_media) { 10071 sr_ejected(un); 10072 } 10073 10074 /* 10075 * Destroy the cache (if it exists) which was 10076 * allocated for the write maps since this is 10077 * the last close for this media. 10078 */ 10079 if (un->un_wm_cache) { 10080 /* 10081 * Check if there are pending commands. 10082 * and if there are give a warning and 10083 * do not destroy the cache. 10084 */ 10085 if (un->un_ncmds_in_driver > 0) { 10086 scsi_log(SD_DEVINFO(un), 10087 sd_label, CE_WARN, 10088 "Unable to clean up memory " 10089 "because of pending I/O\n"); 10090 } else { 10091 kmem_cache_destroy( 10092 un->un_wm_cache); 10093 un->un_wm_cache = NULL; 10094 } 10095 } 10096 } 10097 } 10098 10099 mutex_exit(SD_MUTEX(un)); 10100 sema_v(&un->un_semoclose); 10101 10102 if (otyp == OTYP_LYR) { 10103 mutex_enter(&sd_detach_mutex); 10104 /* 10105 * The detach routine may run when the layer count 10106 * drops to zero. 10107 */ 10108 un->un_layer_count--; 10109 mutex_exit(&sd_detach_mutex); 10110 } 10111 10112 return (rval); 10113 } 10114 10115 10116 /* 10117 * Function: sd_ready_and_valid 10118 * 10119 * Description: Test if device is ready and has a valid geometry. 10120 * 10121 * Arguments: ssc - sd_ssc_t will contain un 10122 * un - driver soft state (unit) structure 10123 * 10124 * Return Code: SD_READY_VALID ready and valid label 10125 * SD_NOT_READY_VALID not ready, no label 10126 * SD_RESERVED_BY_OTHERS reservation conflict 10127 * 10128 * Context: Never called at interrupt context. 10129 */ 10130 10131 static int 10132 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10133 { 10134 struct sd_errstats *stp; 10135 uint64_t capacity; 10136 uint_t lbasize; 10137 int rval = SD_READY_VALID; 10138 char name_str[48]; 10139 int is_valid; 10140 struct sd_lun *un; 10141 int status; 10142 10143 ASSERT(ssc != NULL); 10144 un = ssc->ssc_un; 10145 ASSERT(un != NULL); 10146 ASSERT(!mutex_owned(SD_MUTEX(un))); 10147 10148 mutex_enter(SD_MUTEX(un)); 10149 /* 10150 * If a device has removable media, we must check if media is 10151 * ready when checking if this device is ready and valid. 10152 */ 10153 if (un->un_f_has_removable_media) { 10154 mutex_exit(SD_MUTEX(un)); 10155 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10156 10157 if (status != 0) { 10158 rval = SD_NOT_READY_VALID; 10159 mutex_enter(SD_MUTEX(un)); 10160 10161 /* Ignore all failed status for removalbe media */ 10162 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10163 10164 goto done; 10165 } 10166 10167 is_valid = SD_IS_VALID_LABEL(un); 10168 mutex_enter(SD_MUTEX(un)); 10169 if (!is_valid || 10170 (un->un_f_blockcount_is_valid == FALSE) || 10171 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10172 10173 /* capacity has to be read every open. */ 10174 mutex_exit(SD_MUTEX(un)); 10175 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10176 &lbasize, SD_PATH_DIRECT); 10177 10178 if (status != 0) { 10179 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10180 10181 cmlb_invalidate(un->un_cmlbhandle, 10182 (void *)SD_PATH_DIRECT); 10183 mutex_enter(SD_MUTEX(un)); 10184 rval = SD_NOT_READY_VALID; 10185 10186 goto done; 10187 } else { 10188 mutex_enter(SD_MUTEX(un)); 10189 sd_update_block_info(un, lbasize, capacity); 10190 } 10191 } 10192 10193 /* 10194 * Check if the media in the device is writable or not. 10195 */ 10196 if (!is_valid && ISCD(un)) { 10197 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10198 } 10199 10200 } else { 10201 /* 10202 * Do a test unit ready to clear any unit attention from non-cd 10203 * devices. 10204 */ 10205 mutex_exit(SD_MUTEX(un)); 10206 10207 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10208 if (status != 0) { 10209 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10210 } 10211 10212 mutex_enter(SD_MUTEX(un)); 10213 } 10214 10215 10216 /* 10217 * If this is a non 512 block device, allocate space for 10218 * the wmap cache. This is being done here since every time 10219 * a media is changed this routine will be called and the 10220 * block size is a function of media rather than device. 10221 */ 10222 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10223 if (!(un->un_wm_cache)) { 10224 (void) snprintf(name_str, sizeof (name_str), 10225 "%s%d_cache", 10226 ddi_driver_name(SD_DEVINFO(un)), 10227 ddi_get_instance(SD_DEVINFO(un))); 10228 un->un_wm_cache = kmem_cache_create( 10229 name_str, sizeof (struct sd_w_map), 10230 8, sd_wm_cache_constructor, 10231 sd_wm_cache_destructor, NULL, 10232 (void *)un, NULL, 0); 10233 if (!(un->un_wm_cache)) { 10234 rval = ENOMEM; 10235 goto done; 10236 } 10237 } 10238 } 10239 10240 if (un->un_state == SD_STATE_NORMAL) { 10241 /* 10242 * If the target is not yet ready here (defined by a TUR 10243 * failure), invalidate the geometry and print an 'offline' 10244 * message. This is a legacy message, as the state of the 10245 * target is not actually changed to SD_STATE_OFFLINE. 10246 * 10247 * If the TUR fails for EACCES (Reservation Conflict), 10248 * SD_RESERVED_BY_OTHERS will be returned to indicate 10249 * reservation conflict. If the TUR fails for other 10250 * reasons, SD_NOT_READY_VALID will be returned. 10251 */ 10252 int err; 10253 10254 mutex_exit(SD_MUTEX(un)); 10255 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10256 mutex_enter(SD_MUTEX(un)); 10257 10258 if (err != 0) { 10259 mutex_exit(SD_MUTEX(un)); 10260 cmlb_invalidate(un->un_cmlbhandle, 10261 (void *)SD_PATH_DIRECT); 10262 mutex_enter(SD_MUTEX(un)); 10263 if (err == EACCES) { 10264 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10265 "reservation conflict\n"); 10266 rval = SD_RESERVED_BY_OTHERS; 10267 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10268 } else { 10269 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10270 "drive offline\n"); 10271 rval = SD_NOT_READY_VALID; 10272 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10273 } 10274 goto done; 10275 } 10276 } 10277 10278 if (un->un_f_format_in_progress == FALSE) { 10279 mutex_exit(SD_MUTEX(un)); 10280 10281 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10282 NULL, (void *) SD_PATH_DIRECT) != 0) { 10283 rval = SD_NOT_READY_VALID; 10284 mutex_enter(SD_MUTEX(un)); 10285 10286 goto done; 10287 } 10288 if (un->un_f_pkstats_enabled) { 10289 sd_set_pstats(un); 10290 SD_TRACE(SD_LOG_IO_PARTITION, un, 10291 "sd_ready_and_valid: un:0x%p pstats created and " 10292 "set\n", un); 10293 } 10294 mutex_enter(SD_MUTEX(un)); 10295 } 10296 10297 /* 10298 * If this device supports DOOR_LOCK command, try and send 10299 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10300 * if it fails. For a CD, however, it is an error 10301 */ 10302 if (un->un_f_doorlock_supported) { 10303 mutex_exit(SD_MUTEX(un)); 10304 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10305 SD_PATH_DIRECT); 10306 10307 if ((status != 0) && ISCD(un)) { 10308 rval = SD_NOT_READY_VALID; 10309 mutex_enter(SD_MUTEX(un)); 10310 10311 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10312 10313 goto done; 10314 } else if (status != 0) 10315 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10316 mutex_enter(SD_MUTEX(un)); 10317 } 10318 10319 /* The state has changed, inform the media watch routines */ 10320 un->un_mediastate = DKIO_INSERTED; 10321 cv_broadcast(&un->un_state_cv); 10322 rval = SD_READY_VALID; 10323 10324 done: 10325 10326 /* 10327 * Initialize the capacity kstat value, if no media previously 10328 * (capacity kstat is 0) and a media has been inserted 10329 * (un_blockcount > 0). 10330 */ 10331 if (un->un_errstats != NULL) { 10332 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10333 if ((stp->sd_capacity.value.ui64 == 0) && 10334 (un->un_f_blockcount_is_valid == TRUE)) { 10335 stp->sd_capacity.value.ui64 = 10336 (uint64_t)((uint64_t)un->un_blockcount * 10337 un->un_sys_blocksize); 10338 } 10339 } 10340 10341 mutex_exit(SD_MUTEX(un)); 10342 return (rval); 10343 } 10344 10345 10346 /* 10347 * Function: sdmin 10348 * 10349 * Description: Routine to limit the size of a data transfer. Used in 10350 * conjunction with physio(9F). 10351 * 10352 * Arguments: bp - pointer to the indicated buf(9S) struct. 10353 * 10354 * Context: Kernel thread context. 10355 */ 10356 10357 static void 10358 sdmin(struct buf *bp) 10359 { 10360 struct sd_lun *un; 10361 int instance; 10362 10363 instance = SDUNIT(bp->b_edev); 10364 10365 un = ddi_get_soft_state(sd_state, instance); 10366 ASSERT(un != NULL); 10367 10368 if (bp->b_bcount > un->un_max_xfer_size) { 10369 bp->b_bcount = un->un_max_xfer_size; 10370 } 10371 } 10372 10373 10374 /* 10375 * Function: sdread 10376 * 10377 * Description: Driver's read(9e) entry point function. 10378 * 10379 * Arguments: dev - device number 10380 * uio - structure pointer describing where data is to be stored 10381 * in user's space 10382 * cred_p - user credential pointer 10383 * 10384 * Return Code: ENXIO 10385 * EIO 10386 * EINVAL 10387 * value returned by physio 10388 * 10389 * Context: Kernel thread context. 10390 */ 10391 /* ARGSUSED */ 10392 static int 10393 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10394 { 10395 struct sd_lun *un = NULL; 10396 int secmask; 10397 int err = 0; 10398 sd_ssc_t *ssc; 10399 10400 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10401 return (ENXIO); 10402 } 10403 10404 ASSERT(!mutex_owned(SD_MUTEX(un))); 10405 10406 10407 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10408 mutex_enter(SD_MUTEX(un)); 10409 /* 10410 * Because the call to sd_ready_and_valid will issue I/O we 10411 * must wait here if either the device is suspended or 10412 * if it's power level is changing. 10413 */ 10414 while ((un->un_state == SD_STATE_SUSPENDED) || 10415 (un->un_state == SD_STATE_PM_CHANGING)) { 10416 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10417 } 10418 un->un_ncmds_in_driver++; 10419 mutex_exit(SD_MUTEX(un)); 10420 10421 /* Initialize sd_ssc_t for internal uscsi commands */ 10422 ssc = sd_ssc_init(un); 10423 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10424 err = EIO; 10425 } else { 10426 err = 0; 10427 } 10428 sd_ssc_fini(ssc); 10429 10430 mutex_enter(SD_MUTEX(un)); 10431 un->un_ncmds_in_driver--; 10432 ASSERT(un->un_ncmds_in_driver >= 0); 10433 mutex_exit(SD_MUTEX(un)); 10434 if (err != 0) 10435 return (err); 10436 } 10437 10438 /* 10439 * Read requests are restricted to multiples of the system block size. 10440 */ 10441 secmask = un->un_sys_blocksize - 1; 10442 10443 if (uio->uio_loffset & ((offset_t)(secmask))) { 10444 SD_ERROR(SD_LOG_READ_WRITE, un, 10445 "sdread: file offset not modulo %d\n", 10446 un->un_sys_blocksize); 10447 err = EINVAL; 10448 } else if (uio->uio_iov->iov_len & (secmask)) { 10449 SD_ERROR(SD_LOG_READ_WRITE, un, 10450 "sdread: transfer length not modulo %d\n", 10451 un->un_sys_blocksize); 10452 err = EINVAL; 10453 } else { 10454 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10455 } 10456 10457 return (err); 10458 } 10459 10460 10461 /* 10462 * Function: sdwrite 10463 * 10464 * Description: Driver's write(9e) entry point function. 10465 * 10466 * Arguments: dev - device number 10467 * uio - structure pointer describing where data is stored in 10468 * user's space 10469 * cred_p - user credential pointer 10470 * 10471 * Return Code: ENXIO 10472 * EIO 10473 * EINVAL 10474 * value returned by physio 10475 * 10476 * Context: Kernel thread context. 10477 */ 10478 /* ARGSUSED */ 10479 static int 10480 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10481 { 10482 struct sd_lun *un = NULL; 10483 int secmask; 10484 int err = 0; 10485 sd_ssc_t *ssc; 10486 10487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10488 return (ENXIO); 10489 } 10490 10491 ASSERT(!mutex_owned(SD_MUTEX(un))); 10492 10493 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10494 mutex_enter(SD_MUTEX(un)); 10495 /* 10496 * Because the call to sd_ready_and_valid will issue I/O we 10497 * must wait here if either the device is suspended or 10498 * if it's power level is changing. 10499 */ 10500 while ((un->un_state == SD_STATE_SUSPENDED) || 10501 (un->un_state == SD_STATE_PM_CHANGING)) { 10502 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10503 } 10504 un->un_ncmds_in_driver++; 10505 mutex_exit(SD_MUTEX(un)); 10506 10507 /* Initialize sd_ssc_t for internal uscsi commands */ 10508 ssc = sd_ssc_init(un); 10509 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10510 err = EIO; 10511 } else { 10512 err = 0; 10513 } 10514 sd_ssc_fini(ssc); 10515 10516 mutex_enter(SD_MUTEX(un)); 10517 un->un_ncmds_in_driver--; 10518 ASSERT(un->un_ncmds_in_driver >= 0); 10519 mutex_exit(SD_MUTEX(un)); 10520 if (err != 0) 10521 return (err); 10522 } 10523 10524 /* 10525 * Write requests are restricted to multiples of the system block size. 10526 */ 10527 secmask = un->un_sys_blocksize - 1; 10528 10529 if (uio->uio_loffset & ((offset_t)(secmask))) { 10530 SD_ERROR(SD_LOG_READ_WRITE, un, 10531 "sdwrite: file offset not modulo %d\n", 10532 un->un_sys_blocksize); 10533 err = EINVAL; 10534 } else if (uio->uio_iov->iov_len & (secmask)) { 10535 SD_ERROR(SD_LOG_READ_WRITE, un, 10536 "sdwrite: transfer length not modulo %d\n", 10537 un->un_sys_blocksize); 10538 err = EINVAL; 10539 } else { 10540 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10541 } 10542 10543 return (err); 10544 } 10545 10546 10547 /* 10548 * Function: sdaread 10549 * 10550 * Description: Driver's aread(9e) entry point function. 10551 * 10552 * Arguments: dev - device number 10553 * aio - structure pointer describing where data is to be stored 10554 * cred_p - user credential pointer 10555 * 10556 * Return Code: ENXIO 10557 * EIO 10558 * EINVAL 10559 * value returned by aphysio 10560 * 10561 * Context: Kernel thread context. 10562 */ 10563 /* ARGSUSED */ 10564 static int 10565 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10566 { 10567 struct sd_lun *un = NULL; 10568 struct uio *uio = aio->aio_uio; 10569 int secmask; 10570 int err = 0; 10571 sd_ssc_t *ssc; 10572 10573 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10574 return (ENXIO); 10575 } 10576 10577 ASSERT(!mutex_owned(SD_MUTEX(un))); 10578 10579 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10580 mutex_enter(SD_MUTEX(un)); 10581 /* 10582 * Because the call to sd_ready_and_valid will issue I/O we 10583 * must wait here if either the device is suspended or 10584 * if it's power level is changing. 10585 */ 10586 while ((un->un_state == SD_STATE_SUSPENDED) || 10587 (un->un_state == SD_STATE_PM_CHANGING)) { 10588 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10589 } 10590 un->un_ncmds_in_driver++; 10591 mutex_exit(SD_MUTEX(un)); 10592 10593 /* Initialize sd_ssc_t for internal uscsi commands */ 10594 ssc = sd_ssc_init(un); 10595 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10596 err = EIO; 10597 } else { 10598 err = 0; 10599 } 10600 sd_ssc_fini(ssc); 10601 10602 mutex_enter(SD_MUTEX(un)); 10603 un->un_ncmds_in_driver--; 10604 ASSERT(un->un_ncmds_in_driver >= 0); 10605 mutex_exit(SD_MUTEX(un)); 10606 if (err != 0) 10607 return (err); 10608 } 10609 10610 /* 10611 * Read requests are restricted to multiples of the system block size. 10612 */ 10613 secmask = un->un_sys_blocksize - 1; 10614 10615 if (uio->uio_loffset & ((offset_t)(secmask))) { 10616 SD_ERROR(SD_LOG_READ_WRITE, un, 10617 "sdaread: file offset not modulo %d\n", 10618 un->un_sys_blocksize); 10619 err = EINVAL; 10620 } else if (uio->uio_iov->iov_len & (secmask)) { 10621 SD_ERROR(SD_LOG_READ_WRITE, un, 10622 "sdaread: transfer length not modulo %d\n", 10623 un->un_sys_blocksize); 10624 err = EINVAL; 10625 } else { 10626 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10627 } 10628 10629 return (err); 10630 } 10631 10632 10633 /* 10634 * Function: sdawrite 10635 * 10636 * Description: Driver's awrite(9e) entry point function. 10637 * 10638 * Arguments: dev - device number 10639 * aio - structure pointer describing where data is stored 10640 * cred_p - user credential pointer 10641 * 10642 * Return Code: ENXIO 10643 * EIO 10644 * EINVAL 10645 * value returned by aphysio 10646 * 10647 * Context: Kernel thread context. 10648 */ 10649 /* ARGSUSED */ 10650 static int 10651 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10652 { 10653 struct sd_lun *un = NULL; 10654 struct uio *uio = aio->aio_uio; 10655 int secmask; 10656 int err = 0; 10657 sd_ssc_t *ssc; 10658 10659 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10660 return (ENXIO); 10661 } 10662 10663 ASSERT(!mutex_owned(SD_MUTEX(un))); 10664 10665 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10666 mutex_enter(SD_MUTEX(un)); 10667 /* 10668 * Because the call to sd_ready_and_valid will issue I/O we 10669 * must wait here if either the device is suspended or 10670 * if it's power level is changing. 10671 */ 10672 while ((un->un_state == SD_STATE_SUSPENDED) || 10673 (un->un_state == SD_STATE_PM_CHANGING)) { 10674 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10675 } 10676 un->un_ncmds_in_driver++; 10677 mutex_exit(SD_MUTEX(un)); 10678 10679 /* Initialize sd_ssc_t for internal uscsi commands */ 10680 ssc = sd_ssc_init(un); 10681 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10682 err = EIO; 10683 } else { 10684 err = 0; 10685 } 10686 sd_ssc_fini(ssc); 10687 10688 mutex_enter(SD_MUTEX(un)); 10689 un->un_ncmds_in_driver--; 10690 ASSERT(un->un_ncmds_in_driver >= 0); 10691 mutex_exit(SD_MUTEX(un)); 10692 if (err != 0) 10693 return (err); 10694 } 10695 10696 /* 10697 * Write requests are restricted to multiples of the system block size. 10698 */ 10699 secmask = un->un_sys_blocksize - 1; 10700 10701 if (uio->uio_loffset & ((offset_t)(secmask))) { 10702 SD_ERROR(SD_LOG_READ_WRITE, un, 10703 "sdawrite: file offset not modulo %d\n", 10704 un->un_sys_blocksize); 10705 err = EINVAL; 10706 } else if (uio->uio_iov->iov_len & (secmask)) { 10707 SD_ERROR(SD_LOG_READ_WRITE, un, 10708 "sdawrite: transfer length not modulo %d\n", 10709 un->un_sys_blocksize); 10710 err = EINVAL; 10711 } else { 10712 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10713 } 10714 10715 return (err); 10716 } 10717 10718 10719 10720 10721 10722 /* 10723 * Driver IO processing follows the following sequence: 10724 * 10725 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10726 * | | ^ 10727 * v v | 10728 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10729 * | | | | 10730 * v | | | 10731 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10732 * | | ^ ^ 10733 * v v | | 10734 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10735 * | | | | 10736 * +---+ | +------------+ +-------+ 10737 * | | | | 10738 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10739 * | v | | 10740 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10741 * | | ^ | 10742 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10743 * | v | | 10744 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10745 * | | ^ | 10746 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10747 * | v | | 10748 * | sd_checksum_iostart() sd_checksum_iodone() | 10749 * | | ^ | 10750 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10751 * | v | | 10752 * | sd_pm_iostart() sd_pm_iodone() | 10753 * | | ^ | 10754 * | | | | 10755 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10756 * | ^ 10757 * v | 10758 * sd_core_iostart() | 10759 * | | 10760 * | +------>(*destroypkt)() 10761 * +-> sd_start_cmds() <-+ | | 10762 * | | | v 10763 * | | | scsi_destroy_pkt(9F) 10764 * | | | 10765 * +->(*initpkt)() +- sdintr() 10766 * | | | | 10767 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10768 * | +-> scsi_setup_cdb(9F) | 10769 * | | 10770 * +--> scsi_transport(9F) | 10771 * | | 10772 * +----> SCSA ---->+ 10773 * 10774 * 10775 * This code is based upon the following presumptions: 10776 * 10777 * - iostart and iodone functions operate on buf(9S) structures. These 10778 * functions perform the necessary operations on the buf(9S) and pass 10779 * them along to the next function in the chain by using the macros 10780 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10781 * (for iodone side functions). 10782 * 10783 * - The iostart side functions may sleep. The iodone side functions 10784 * are called under interrupt context and may NOT sleep. Therefore 10785 * iodone side functions also may not call iostart side functions. 10786 * (NOTE: iostart side functions should NOT sleep for memory, as 10787 * this could result in deadlock.) 10788 * 10789 * - An iostart side function may call its corresponding iodone side 10790 * function directly (if necessary). 10791 * 10792 * - In the event of an error, an iostart side function can return a buf(9S) 10793 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10794 * b_error in the usual way of course). 10795 * 10796 * - The taskq mechanism may be used by the iodone side functions to dispatch 10797 * requests to the iostart side functions. The iostart side functions in 10798 * this case would be called under the context of a taskq thread, so it's 10799 * OK for them to block/sleep/spin in this case. 10800 * 10801 * - iostart side functions may allocate "shadow" buf(9S) structs and 10802 * pass them along to the next function in the chain. The corresponding 10803 * iodone side functions must coalesce the "shadow" bufs and return 10804 * the "original" buf to the next higher layer. 10805 * 10806 * - The b_private field of the buf(9S) struct holds a pointer to 10807 * an sd_xbuf struct, which contains information needed to 10808 * construct the scsi_pkt for the command. 10809 * 10810 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10811 * layer must acquire & release the SD_MUTEX(un) as needed. 10812 */ 10813 10814 10815 /* 10816 * Create taskq for all targets in the system. This is created at 10817 * _init(9E) and destroyed at _fini(9E). 10818 * 10819 * Note: here we set the minalloc to a reasonably high number to ensure that 10820 * we will have an adequate supply of task entries available at interrupt time. 10821 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10822 * sd_create_taskq(). Since we do not want to sleep for allocations at 10823 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10824 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10825 * requests any one instant in time. 10826 */ 10827 #define SD_TASKQ_NUMTHREADS 8 10828 #define SD_TASKQ_MINALLOC 256 10829 #define SD_TASKQ_MAXALLOC 256 10830 10831 static taskq_t *sd_tq = NULL; 10832 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10833 10834 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10835 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10836 10837 /* 10838 * The following task queue is being created for the write part of 10839 * read-modify-write of non-512 block size devices. 10840 * Limit the number of threads to 1 for now. This number has been chosen 10841 * considering the fact that it applies only to dvd ram drives/MO drives 10842 * currently. Performance for which is not main criteria at this stage. 10843 * Note: It needs to be explored if we can use a single taskq in future 10844 */ 10845 #define SD_WMR_TASKQ_NUMTHREADS 1 10846 static taskq_t *sd_wmr_tq = NULL; 10847 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10848 10849 /* 10850 * Function: sd_taskq_create 10851 * 10852 * Description: Create taskq thread(s) and preallocate task entries 10853 * 10854 * Return Code: Returns a pointer to the allocated taskq_t. 10855 * 10856 * Context: Can sleep. Requires blockable context. 10857 * 10858 * Notes: - The taskq() facility currently is NOT part of the DDI. 10859 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10860 * - taskq_create() will block for memory, also it will panic 10861 * if it cannot create the requested number of threads. 10862 * - Currently taskq_create() creates threads that cannot be 10863 * swapped. 10864 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10865 * supply of taskq entries at interrupt time (ie, so that we 10866 * do not have to sleep for memory) 10867 */ 10868 10869 static void 10870 sd_taskq_create(void) 10871 { 10872 char taskq_name[TASKQ_NAMELEN]; 10873 10874 ASSERT(sd_tq == NULL); 10875 ASSERT(sd_wmr_tq == NULL); 10876 10877 (void) snprintf(taskq_name, sizeof (taskq_name), 10878 "%s_drv_taskq", sd_label); 10879 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10880 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10881 TASKQ_PREPOPULATE)); 10882 10883 (void) snprintf(taskq_name, sizeof (taskq_name), 10884 "%s_rmw_taskq", sd_label); 10885 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10886 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10887 TASKQ_PREPOPULATE)); 10888 } 10889 10890 10891 /* 10892 * Function: sd_taskq_delete 10893 * 10894 * Description: Complementary cleanup routine for sd_taskq_create(). 10895 * 10896 * Context: Kernel thread context. 10897 */ 10898 10899 static void 10900 sd_taskq_delete(void) 10901 { 10902 ASSERT(sd_tq != NULL); 10903 ASSERT(sd_wmr_tq != NULL); 10904 taskq_destroy(sd_tq); 10905 taskq_destroy(sd_wmr_tq); 10906 sd_tq = NULL; 10907 sd_wmr_tq = NULL; 10908 } 10909 10910 10911 /* 10912 * Function: sdstrategy 10913 * 10914 * Description: Driver's strategy (9E) entry point function. 10915 * 10916 * Arguments: bp - pointer to buf(9S) 10917 * 10918 * Return Code: Always returns zero 10919 * 10920 * Context: Kernel thread context. 10921 */ 10922 10923 static int 10924 sdstrategy(struct buf *bp) 10925 { 10926 struct sd_lun *un; 10927 10928 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10929 if (un == NULL) { 10930 bioerror(bp, EIO); 10931 bp->b_resid = bp->b_bcount; 10932 biodone(bp); 10933 return (0); 10934 } 10935 /* As was done in the past, fail new cmds. if state is dumping. */ 10936 if (un->un_state == SD_STATE_DUMPING) { 10937 bioerror(bp, ENXIO); 10938 bp->b_resid = bp->b_bcount; 10939 biodone(bp); 10940 return (0); 10941 } 10942 10943 ASSERT(!mutex_owned(SD_MUTEX(un))); 10944 10945 /* 10946 * Commands may sneak in while we released the mutex in 10947 * DDI_SUSPEND, we should block new commands. However, old 10948 * commands that are still in the driver at this point should 10949 * still be allowed to drain. 10950 */ 10951 mutex_enter(SD_MUTEX(un)); 10952 /* 10953 * Must wait here if either the device is suspended or 10954 * if it's power level is changing. 10955 */ 10956 while ((un->un_state == SD_STATE_SUSPENDED) || 10957 (un->un_state == SD_STATE_PM_CHANGING)) { 10958 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10959 } 10960 10961 un->un_ncmds_in_driver++; 10962 10963 /* 10964 * atapi: Since we are running the CD for now in PIO mode we need to 10965 * call bp_mapin here to avoid bp_mapin called interrupt context under 10966 * the HBA's init_pkt routine. 10967 */ 10968 if (un->un_f_cfg_is_atapi == TRUE) { 10969 mutex_exit(SD_MUTEX(un)); 10970 bp_mapin(bp); 10971 mutex_enter(SD_MUTEX(un)); 10972 } 10973 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10974 un->un_ncmds_in_driver); 10975 10976 if (bp->b_flags & B_WRITE) 10977 un->un_f_sync_cache_required = TRUE; 10978 10979 mutex_exit(SD_MUTEX(un)); 10980 10981 /* 10982 * This will (eventually) allocate the sd_xbuf area and 10983 * call sd_xbuf_strategy(). We just want to return the 10984 * result of ddi_xbuf_qstrategy so that we have an opt- 10985 * imized tail call which saves us a stack frame. 10986 */ 10987 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10988 } 10989 10990 10991 /* 10992 * Function: sd_xbuf_strategy 10993 * 10994 * Description: Function for initiating IO operations via the 10995 * ddi_xbuf_qstrategy() mechanism. 10996 * 10997 * Context: Kernel thread context. 10998 */ 10999 11000 static void 11001 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11002 { 11003 struct sd_lun *un = arg; 11004 11005 ASSERT(bp != NULL); 11006 ASSERT(xp != NULL); 11007 ASSERT(un != NULL); 11008 ASSERT(!mutex_owned(SD_MUTEX(un))); 11009 11010 /* 11011 * Initialize the fields in the xbuf and save a pointer to the 11012 * xbuf in bp->b_private. 11013 */ 11014 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11015 11016 /* Send the buf down the iostart chain */ 11017 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11018 } 11019 11020 11021 /* 11022 * Function: sd_xbuf_init 11023 * 11024 * Description: Prepare the given sd_xbuf struct for use. 11025 * 11026 * Arguments: un - ptr to softstate 11027 * bp - ptr to associated buf(9S) 11028 * xp - ptr to associated sd_xbuf 11029 * chain_type - IO chain type to use: 11030 * SD_CHAIN_NULL 11031 * SD_CHAIN_BUFIO 11032 * SD_CHAIN_USCSI 11033 * SD_CHAIN_DIRECT 11034 * SD_CHAIN_DIRECT_PRIORITY 11035 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11036 * initialization; may be NULL if none. 11037 * 11038 * Context: Kernel thread context 11039 */ 11040 11041 static void 11042 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11043 uchar_t chain_type, void *pktinfop) 11044 { 11045 int index; 11046 11047 ASSERT(un != NULL); 11048 ASSERT(bp != NULL); 11049 ASSERT(xp != NULL); 11050 11051 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11052 bp, chain_type); 11053 11054 xp->xb_un = un; 11055 xp->xb_pktp = NULL; 11056 xp->xb_pktinfo = pktinfop; 11057 xp->xb_private = bp->b_private; 11058 xp->xb_blkno = (daddr_t)bp->b_blkno; 11059 11060 /* 11061 * Set up the iostart and iodone chain indexes in the xbuf, based 11062 * upon the specified chain type to use. 11063 */ 11064 switch (chain_type) { 11065 case SD_CHAIN_NULL: 11066 /* 11067 * Fall thru to just use the values for the buf type, even 11068 * tho for the NULL chain these values will never be used. 11069 */ 11070 /* FALLTHRU */ 11071 case SD_CHAIN_BUFIO: 11072 index = un->un_buf_chain_type; 11073 break; 11074 case SD_CHAIN_USCSI: 11075 index = un->un_uscsi_chain_type; 11076 break; 11077 case SD_CHAIN_DIRECT: 11078 index = un->un_direct_chain_type; 11079 break; 11080 case SD_CHAIN_DIRECT_PRIORITY: 11081 index = un->un_priority_chain_type; 11082 break; 11083 default: 11084 /* We're really broken if we ever get here... */ 11085 panic("sd_xbuf_init: illegal chain type!"); 11086 /*NOTREACHED*/ 11087 } 11088 11089 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11090 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11091 11092 /* 11093 * It might be a bit easier to simply bzero the entire xbuf above, 11094 * but it turns out that since we init a fair number of members anyway, 11095 * we save a fair number cycles by doing explicit assignment of zero. 11096 */ 11097 xp->xb_pkt_flags = 0; 11098 xp->xb_dma_resid = 0; 11099 xp->xb_retry_count = 0; 11100 xp->xb_victim_retry_count = 0; 11101 xp->xb_ua_retry_count = 0; 11102 xp->xb_nr_retry_count = 0; 11103 xp->xb_sense_bp = NULL; 11104 xp->xb_sense_status = 0; 11105 xp->xb_sense_state = 0; 11106 xp->xb_sense_resid = 0; 11107 xp->xb_ena = 0; 11108 11109 bp->b_private = xp; 11110 bp->b_flags &= ~(B_DONE | B_ERROR); 11111 bp->b_resid = 0; 11112 bp->av_forw = NULL; 11113 bp->av_back = NULL; 11114 bioerror(bp, 0); 11115 11116 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11117 } 11118 11119 11120 /* 11121 * Function: sd_uscsi_strategy 11122 * 11123 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11124 * 11125 * Arguments: bp - buf struct ptr 11126 * 11127 * Return Code: Always returns 0 11128 * 11129 * Context: Kernel thread context 11130 */ 11131 11132 static int 11133 sd_uscsi_strategy(struct buf *bp) 11134 { 11135 struct sd_lun *un; 11136 struct sd_uscsi_info *uip; 11137 struct sd_xbuf *xp; 11138 uchar_t chain_type; 11139 uchar_t cmd; 11140 11141 ASSERT(bp != NULL); 11142 11143 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11144 if (un == NULL) { 11145 bioerror(bp, EIO); 11146 bp->b_resid = bp->b_bcount; 11147 biodone(bp); 11148 return (0); 11149 } 11150 11151 ASSERT(!mutex_owned(SD_MUTEX(un))); 11152 11153 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11154 11155 /* 11156 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11157 */ 11158 ASSERT(bp->b_private != NULL); 11159 uip = (struct sd_uscsi_info *)bp->b_private; 11160 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11161 11162 mutex_enter(SD_MUTEX(un)); 11163 /* 11164 * atapi: Since we are running the CD for now in PIO mode we need to 11165 * call bp_mapin here to avoid bp_mapin called interrupt context under 11166 * the HBA's init_pkt routine. 11167 */ 11168 if (un->un_f_cfg_is_atapi == TRUE) { 11169 mutex_exit(SD_MUTEX(un)); 11170 bp_mapin(bp); 11171 mutex_enter(SD_MUTEX(un)); 11172 } 11173 un->un_ncmds_in_driver++; 11174 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11175 un->un_ncmds_in_driver); 11176 11177 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11178 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11179 un->un_f_sync_cache_required = TRUE; 11180 11181 mutex_exit(SD_MUTEX(un)); 11182 11183 switch (uip->ui_flags) { 11184 case SD_PATH_DIRECT: 11185 chain_type = SD_CHAIN_DIRECT; 11186 break; 11187 case SD_PATH_DIRECT_PRIORITY: 11188 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11189 break; 11190 default: 11191 chain_type = SD_CHAIN_USCSI; 11192 break; 11193 } 11194 11195 /* 11196 * We may allocate extra buf for external USCSI commands. If the 11197 * application asks for bigger than 20-byte sense data via USCSI, 11198 * SCSA layer will allocate 252 bytes sense buf for that command. 11199 */ 11200 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11201 SENSE_LENGTH) { 11202 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11203 MAX_SENSE_LENGTH, KM_SLEEP); 11204 } else { 11205 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11206 } 11207 11208 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11209 11210 /* Use the index obtained within xbuf_init */ 11211 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11212 11213 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11214 11215 return (0); 11216 } 11217 11218 /* 11219 * Function: sd_send_scsi_cmd 11220 * 11221 * Description: Runs a USCSI command for user (when called thru sdioctl), 11222 * or for the driver 11223 * 11224 * Arguments: dev - the dev_t for the device 11225 * incmd - ptr to a valid uscsi_cmd struct 11226 * flag - bit flag, indicating open settings, 32/64 bit type 11227 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11228 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11229 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11230 * to use the USCSI "direct" chain and bypass the normal 11231 * command waitq. 11232 * 11233 * Return Code: 0 - successful completion of the given command 11234 * EIO - scsi_uscsi_handle_command() failed 11235 * ENXIO - soft state not found for specified dev 11236 * EINVAL 11237 * EFAULT - copyin/copyout error 11238 * return code of scsi_uscsi_handle_command(): 11239 * EIO 11240 * ENXIO 11241 * EACCES 11242 * 11243 * Context: Waits for command to complete. Can sleep. 11244 */ 11245 11246 static int 11247 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11248 enum uio_seg dataspace, int path_flag) 11249 { 11250 struct sd_lun *un; 11251 sd_ssc_t *ssc; 11252 int rval; 11253 11254 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11255 if (un == NULL) { 11256 return (ENXIO); 11257 } 11258 11259 /* 11260 * Using sd_ssc_send to handle uscsi cmd 11261 */ 11262 ssc = sd_ssc_init(un); 11263 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11264 sd_ssc_fini(ssc); 11265 11266 return (rval); 11267 } 11268 11269 /* 11270 * Function: sd_ssc_init 11271 * 11272 * Description: Uscsi end-user call this function to initialize necessary 11273 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11274 * 11275 * The return value of sd_send_scsi_cmd will be treated as a 11276 * fault in various conditions. Even it is not Zero, some 11277 * callers may ignore the return value. That is to say, we can 11278 * not make an accurate assessment in sdintr, since if a 11279 * command is failed in sdintr it does not mean the caller of 11280 * sd_send_scsi_cmd will treat it as a real failure. 11281 * 11282 * To avoid printing too many error logs for a failed uscsi 11283 * packet that the caller may not treat it as a failure, the 11284 * sd will keep silent for handling all uscsi commands. 11285 * 11286 * During detach->attach and attach-open, for some types of 11287 * problems, the driver should be providing information about 11288 * the problem encountered. Device use USCSI_SILENT, which 11289 * suppresses all driver information. The result is that no 11290 * information about the problem is available. Being 11291 * completely silent during this time is inappropriate. The 11292 * driver needs a more selective filter than USCSI_SILENT, so 11293 * that information related to faults is provided. 11294 * 11295 * To make the accurate accessment, the caller of 11296 * sd_send_scsi_USCSI_CMD should take the ownership and 11297 * get necessary information to print error messages. 11298 * 11299 * If we want to print necessary info of uscsi command, we need to 11300 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11301 * assessment. We use sd_ssc_init to alloc necessary 11302 * structs for sending an uscsi command and we are also 11303 * responsible for free the memory by calling 11304 * sd_ssc_fini. 11305 * 11306 * The calling secquences will look like: 11307 * sd_ssc_init-> 11308 * 11309 * ... 11310 * 11311 * sd_send_scsi_USCSI_CMD-> 11312 * sd_ssc_send-> - - - sdintr 11313 * ... 11314 * 11315 * if we think the return value should be treated as a 11316 * failure, we make the accessment here and print out 11317 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11318 * 11319 * ... 11320 * 11321 * sd_ssc_fini 11322 * 11323 * 11324 * Arguments: un - pointer to driver soft state (unit) structure for this 11325 * target. 11326 * 11327 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11328 * uscsi_cmd and sd_uscsi_info. 11329 * NULL - if can not alloc memory for sd_ssc_t struct 11330 * 11331 * Context: Kernel Thread. 11332 */ 11333 static sd_ssc_t * 11334 sd_ssc_init(struct sd_lun *un) 11335 { 11336 sd_ssc_t *ssc; 11337 struct uscsi_cmd *ucmdp; 11338 struct sd_uscsi_info *uip; 11339 11340 ASSERT(un != NULL); 11341 ASSERT(!mutex_owned(SD_MUTEX(un))); 11342 11343 /* 11344 * Allocate sd_ssc_t structure 11345 */ 11346 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11347 11348 /* 11349 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11350 */ 11351 ucmdp = scsi_uscsi_alloc(); 11352 11353 /* 11354 * Allocate sd_uscsi_info structure 11355 */ 11356 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11357 11358 ssc->ssc_uscsi_cmd = ucmdp; 11359 ssc->ssc_uscsi_info = uip; 11360 ssc->ssc_un = un; 11361 11362 return (ssc); 11363 } 11364 11365 /* 11366 * Function: sd_ssc_fini 11367 * 11368 * Description: To free sd_ssc_t and it's hanging off 11369 * 11370 * Arguments: ssc - struct pointer of sd_ssc_t. 11371 */ 11372 static void 11373 sd_ssc_fini(sd_ssc_t *ssc) 11374 { 11375 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11376 11377 if (ssc->ssc_uscsi_info != NULL) { 11378 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11379 ssc->ssc_uscsi_info = NULL; 11380 } 11381 11382 kmem_free(ssc, sizeof (sd_ssc_t)); 11383 ssc = NULL; 11384 } 11385 11386 /* 11387 * Function: sd_ssc_send 11388 * 11389 * Description: Runs a USCSI command for user when called through sdioctl, 11390 * or for the driver. 11391 * 11392 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11393 * sd_uscsi_info in. 11394 * incmd - ptr to a valid uscsi_cmd struct 11395 * flag - bit flag, indicating open settings, 32/64 bit type 11396 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11397 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11398 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11399 * to use the USCSI "direct" chain and bypass the normal 11400 * command waitq. 11401 * 11402 * Return Code: 0 - successful completion of the given command 11403 * EIO - scsi_uscsi_handle_command() failed 11404 * ENXIO - soft state not found for specified dev 11405 * EINVAL 11406 * EFAULT - copyin/copyout error 11407 * return code of scsi_uscsi_handle_command(): 11408 * EIO 11409 * ENXIO 11410 * EACCES 11411 * 11412 * Context: Kernel Thread; 11413 * Waits for command to complete. Can sleep. 11414 */ 11415 static int 11416 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11417 enum uio_seg dataspace, int path_flag) 11418 { 11419 struct sd_uscsi_info *uip; 11420 struct uscsi_cmd *uscmd = ssc->ssc_uscsi_cmd; 11421 struct sd_lun *un; 11422 dev_t dev; 11423 11424 int format = 0; 11425 int rval; 11426 11427 11428 ASSERT(ssc != NULL); 11429 un = ssc->ssc_un; 11430 ASSERT(un != NULL); 11431 ASSERT(!mutex_owned(SD_MUTEX(un))); 11432 ASSERT(!(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT)); 11433 /* 11434 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11435 * followed to avoid missing any point of telemetry. 11436 */ 11437 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11438 11439 if (uscmd == NULL) { 11440 return (ENXIO); 11441 } 11442 11443 11444 #ifdef SDDEBUG 11445 switch (dataspace) { 11446 case UIO_USERSPACE: 11447 SD_TRACE(SD_LOG_IO, un, 11448 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11449 break; 11450 case UIO_SYSSPACE: 11451 SD_TRACE(SD_LOG_IO, un, 11452 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11453 break; 11454 default: 11455 SD_TRACE(SD_LOG_IO, un, 11456 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11457 break; 11458 } 11459 #endif 11460 11461 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11462 SD_ADDRESS(un), &uscmd); 11463 if (rval != 0) { 11464 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11465 "scsi_uscsi_alloc_and_copyin failed\n", un); 11466 return (rval); 11467 } 11468 11469 if ((uscmd->uscsi_cdb != NULL) && 11470 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11471 mutex_enter(SD_MUTEX(un)); 11472 un->un_f_format_in_progress = TRUE; 11473 mutex_exit(SD_MUTEX(un)); 11474 format = 1; 11475 } 11476 11477 /* 11478 * Allocate an sd_uscsi_info struct and fill it with the info 11479 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11480 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11481 * since we allocate the buf here in this function, we do not 11482 * need to preserve the prior contents of b_private. 11483 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11484 */ 11485 uip = ssc->ssc_uscsi_info; 11486 uip->ui_flags = path_flag; 11487 uip->ui_cmdp = uscmd; 11488 11489 /* 11490 * Commands sent with priority are intended for error recovery 11491 * situations, and do not have retries performed. 11492 */ 11493 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11494 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11495 } 11496 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11497 11498 dev = SD_GET_DEV(un); 11499 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11500 sd_uscsi_strategy, NULL, uip); 11501 11502 /* 11503 * mark ssc_flags right after handle_cmd to make sure 11504 * the uscsi has been sent 11505 */ 11506 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11507 11508 #ifdef SDDEBUG 11509 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11510 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11511 uscmd->uscsi_status, uscmd->uscsi_resid); 11512 if (uscmd->uscsi_bufaddr != NULL) { 11513 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11514 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11515 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11516 if (dataspace == UIO_SYSSPACE) { 11517 SD_DUMP_MEMORY(un, SD_LOG_IO, 11518 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11519 uscmd->uscsi_buflen, SD_LOG_HEX); 11520 } 11521 } 11522 #endif 11523 11524 if (format == 1) { 11525 mutex_enter(SD_MUTEX(un)); 11526 un->un_f_format_in_progress = FALSE; 11527 mutex_exit(SD_MUTEX(un)); 11528 } 11529 11530 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11531 11532 return (rval); 11533 } 11534 11535 /* 11536 * Function: sd_ssc_print 11537 * 11538 * Description: Print information available to the console. 11539 * 11540 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11541 * sd_uscsi_info in. 11542 * sd_severity - log level. 11543 * Context: Kernel thread or interrupt context. 11544 */ 11545 static void 11546 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11547 { 11548 struct uscsi_cmd *ucmdp; 11549 struct scsi_device *devp; 11550 dev_info_t *devinfo; 11551 uchar_t *sensep; 11552 int senlen; 11553 union scsi_cdb *cdbp; 11554 uchar_t com; 11555 extern struct scsi_key_strings scsi_cmds[]; 11556 11557 ASSERT(ssc != NULL); 11558 11559 ucmdp = ssc->ssc_uscsi_cmd; 11560 devp = SD_SCSI_DEVP(ssc->ssc_un); 11561 devinfo = SD_DEVINFO(ssc->ssc_un); 11562 ASSERT(ucmdp != NULL); 11563 ASSERT(devp != NULL); 11564 ASSERT(devinfo != NULL); 11565 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11566 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11567 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11568 11569 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11570 if (cdbp == NULL) 11571 return; 11572 /* We don't print log if no sense data available. */ 11573 if (senlen == 0) 11574 sensep = NULL; 11575 com = cdbp->scc_cmd; 11576 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11577 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11578 } 11579 11580 /* 11581 * Function: sd_ssc_assessment 11582 * 11583 * Description: We use this function to make an assessment at the point 11584 * where SD driver may encounter a potential error. 11585 * 11586 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11587 * sd_uscsi_info in. 11588 * tp_assess - a hint of strategy for ereport posting. 11589 * Possible values of tp_assess include: 11590 * SD_FMT_IGNORE - we don't post any ereport because we're 11591 * sure that it is ok to ignore the underlying problems. 11592 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11593 * but it might be not correct to ignore the underlying hardware 11594 * error. 11595 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11596 * payload driver-assessment of value "fail" or 11597 * "fatal"(depending on what information we have here). This 11598 * assessment value is usually set when SD driver think there 11599 * is a potential error occurred(Typically, when return value 11600 * of the SCSI command is EIO). 11601 * SD_FMT_STANDARD - we will post an ereport with the payload 11602 * driver-assessment of value "info". This assessment value is 11603 * set when the SCSI command returned successfully and with 11604 * sense data sent back. 11605 * 11606 * Context: Kernel thread. 11607 */ 11608 static void 11609 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11610 { 11611 int senlen = 0; 11612 struct uscsi_cmd *ucmdp = NULL; 11613 struct sd_lun *un; 11614 11615 ASSERT(ssc != NULL); 11616 ASSERT(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT); 11617 11618 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11619 un = ssc->ssc_un; 11620 ASSERT(un != NULL); 11621 11622 /* 11623 * We don't handle CD-ROM, and removable media 11624 */ 11625 if (ISCD(un) || un->un_f_has_removable_media) { 11626 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11627 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11628 return; 11629 } 11630 11631 /* 11632 * Only handle an issued command which is waiting for assessment. 11633 */ 11634 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11635 sd_ssc_print(ssc, SCSI_ERR_INFO); 11636 return; 11637 } else 11638 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11639 11640 ucmdp = ssc->ssc_uscsi_cmd; 11641 ASSERT(ucmdp != NULL); 11642 11643 /* 11644 * We will not deal with non-retryable commands here. 11645 */ 11646 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11647 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11648 return; 11649 } 11650 11651 switch (tp_assess) { 11652 case SD_FMT_IGNORE: 11653 case SD_FMT_IGNORE_COMPROMISE: 11654 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11655 break; 11656 case SD_FMT_STATUS_CHECK: 11657 /* 11658 * For a failed command(including the succeeded command 11659 * with invalid data sent back). 11660 */ 11661 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11662 break; 11663 case SD_FMT_STANDARD: 11664 /* 11665 * Always for the succeeded commands probably with sense 11666 * data sent back. 11667 * Limitation: 11668 * We can only handle a succeeded command with sense 11669 * data sent back when auto-request-sense is enabled. 11670 */ 11671 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11672 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11673 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11674 (un->un_f_arq_enabled == TRUE) && 11675 senlen > 0 && 11676 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11677 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11678 } 11679 break; 11680 default: 11681 /* 11682 * Should be an software error. 11683 */ 11684 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11685 "sd_ssc_assessment got wrong \ 11686 sd_type_assessment %d\n", tp_assess); 11687 break; 11688 } 11689 } 11690 11691 /* 11692 * Function: sd_ssc_post 11693 * 11694 * Description: 1. read the driver property to get fm-scsi-log flag. 11695 * 2. print log if fm_log_capable is non-zero. 11696 * 3. call sd_ssc_ereport_post to post ereport if possible. 11697 * 11698 * Context: May be called from kernel thread or interrupt context. 11699 */ 11700 static void 11701 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11702 { 11703 struct sd_lun *un; 11704 int fm_scsi_log = 0; 11705 int sd_severity; 11706 11707 ASSERT(ssc != NULL); 11708 un = ssc->ssc_un; 11709 ASSERT(un != NULL); 11710 11711 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 11712 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 11713 11714 if (fm_scsi_log != 0) { 11715 switch (sd_assess) { 11716 case SD_FM_DRV_FATAL: 11717 sd_severity = SCSI_ERR_FATAL; 11718 break; 11719 case SD_FM_DRV_RECOVERY: 11720 sd_severity = SCSI_ERR_RECOVERED; 11721 break; 11722 case SD_FM_DRV_RETRY: 11723 sd_severity = SCSI_ERR_RETRYABLE; 11724 break; 11725 case SD_FM_DRV_NOTICE: 11726 sd_severity = SCSI_ERR_INFO; 11727 break; 11728 default: 11729 sd_severity = SCSI_ERR_UNKNOWN; 11730 } 11731 /* print log */ 11732 sd_ssc_print(ssc, sd_severity); 11733 } 11734 11735 /* always post ereport */ 11736 sd_ssc_ereport_post(ssc, sd_assess); 11737 } 11738 11739 /* 11740 * Function: sd_ssc_set_info 11741 * 11742 * Description: Mark ssc_flags and set ssc_info which would be the 11743 * payload of uderr ereport. This function will cause 11744 * sd_ssc_ereport_post to post uderr ereport only. 11745 * 11746 * Context: Kernel thread or interrupt context 11747 */ 11748 static void 11749 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, const char *fmt, ...) 11750 { 11751 va_list ap; 11752 11753 ASSERT(ssc != NULL); 11754 11755 ssc->ssc_flags |= ssc_flags; 11756 va_start(ap, fmt); 11757 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11758 va_end(ap); 11759 } 11760 11761 /* 11762 * Function: sd_buf_iodone 11763 * 11764 * Description: Frees the sd_xbuf & returns the buf to its originator. 11765 * 11766 * Context: May be called from interrupt context. 11767 */ 11768 /* ARGSUSED */ 11769 static void 11770 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11771 { 11772 struct sd_xbuf *xp; 11773 11774 ASSERT(un != NULL); 11775 ASSERT(bp != NULL); 11776 ASSERT(!mutex_owned(SD_MUTEX(un))); 11777 11778 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11779 11780 xp = SD_GET_XBUF(bp); 11781 ASSERT(xp != NULL); 11782 11783 mutex_enter(SD_MUTEX(un)); 11784 11785 /* 11786 * Grab time when the cmd completed. 11787 * This is used for determining if the system has been 11788 * idle long enough to make it idle to the PM framework. 11789 * This is for lowering the overhead, and therefore improving 11790 * performance per I/O operation. 11791 */ 11792 un->un_pm_idle_time = ddi_get_time(); 11793 11794 un->un_ncmds_in_driver--; 11795 ASSERT(un->un_ncmds_in_driver >= 0); 11796 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11797 un->un_ncmds_in_driver); 11798 11799 mutex_exit(SD_MUTEX(un)); 11800 11801 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11802 biodone(bp); /* bp is gone after this */ 11803 11804 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11805 } 11806 11807 11808 /* 11809 * Function: sd_uscsi_iodone 11810 * 11811 * Description: Frees the sd_xbuf & returns the buf to its originator. 11812 * 11813 * Context: May be called from interrupt context. 11814 */ 11815 /* ARGSUSED */ 11816 static void 11817 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11818 { 11819 struct sd_xbuf *xp; 11820 11821 ASSERT(un != NULL); 11822 ASSERT(bp != NULL); 11823 11824 xp = SD_GET_XBUF(bp); 11825 ASSERT(xp != NULL); 11826 ASSERT(!mutex_owned(SD_MUTEX(un))); 11827 11828 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11829 11830 bp->b_private = xp->xb_private; 11831 11832 mutex_enter(SD_MUTEX(un)); 11833 11834 /* 11835 * Grab time when the cmd completed. 11836 * This is used for determining if the system has been 11837 * idle long enough to make it idle to the PM framework. 11838 * This is for lowering the overhead, and therefore improving 11839 * performance per I/O operation. 11840 */ 11841 un->un_pm_idle_time = ddi_get_time(); 11842 11843 un->un_ncmds_in_driver--; 11844 ASSERT(un->un_ncmds_in_driver >= 0); 11845 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11846 un->un_ncmds_in_driver); 11847 11848 mutex_exit(SD_MUTEX(un)); 11849 11850 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11851 SENSE_LENGTH) { 11852 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11853 MAX_SENSE_LENGTH); 11854 } else { 11855 kmem_free(xp, sizeof (struct sd_xbuf)); 11856 } 11857 11858 biodone(bp); 11859 11860 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11861 } 11862 11863 11864 /* 11865 * Function: sd_mapblockaddr_iostart 11866 * 11867 * Description: Verify request lies within the partition limits for 11868 * the indicated minor device. Issue "overrun" buf if 11869 * request would exceed partition range. Converts 11870 * partition-relative block address to absolute. 11871 * 11872 * Context: Can sleep 11873 * 11874 * Issues: This follows what the old code did, in terms of accessing 11875 * some of the partition info in the unit struct without holding 11876 * the mutext. This is a general issue, if the partition info 11877 * can be altered while IO is in progress... as soon as we send 11878 * a buf, its partitioning can be invalid before it gets to the 11879 * device. Probably the right fix is to move partitioning out 11880 * of the driver entirely. 11881 */ 11882 11883 static void 11884 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11885 { 11886 diskaddr_t nblocks; /* #blocks in the given partition */ 11887 daddr_t blocknum; /* Block number specified by the buf */ 11888 size_t requested_nblocks; 11889 size_t available_nblocks; 11890 int partition; 11891 diskaddr_t partition_offset; 11892 struct sd_xbuf *xp; 11893 11894 ASSERT(un != NULL); 11895 ASSERT(bp != NULL); 11896 ASSERT(!mutex_owned(SD_MUTEX(un))); 11897 11898 SD_TRACE(SD_LOG_IO_PARTITION, un, 11899 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11900 11901 xp = SD_GET_XBUF(bp); 11902 ASSERT(xp != NULL); 11903 11904 /* 11905 * If the geometry is not indicated as valid, attempt to access 11906 * the unit & verify the geometry/label. This can be the case for 11907 * removable-media devices, of if the device was opened in 11908 * NDELAY/NONBLOCK mode. 11909 */ 11910 partition = SDPART(bp->b_edev); 11911 11912 if (!SD_IS_VALID_LABEL(un)) { 11913 sd_ssc_t *ssc; 11914 /* 11915 * Initialize sd_ssc_t for internal uscsi commands 11916 * In case of potential porformance issue, we need 11917 * to alloc memory only if there is invalid label 11918 */ 11919 ssc = sd_ssc_init(un); 11920 11921 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 11922 /* 11923 * For removable devices it is possible to start an 11924 * I/O without a media by opening the device in nodelay 11925 * mode. Also for writable CDs there can be many 11926 * scenarios where there is no geometry yet but volume 11927 * manager is trying to issue a read() just because 11928 * it can see TOC on the CD. So do not print a message 11929 * for removables. 11930 */ 11931 if (!un->un_f_has_removable_media) { 11932 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11933 "i/o to invalid geometry\n"); 11934 } 11935 bioerror(bp, EIO); 11936 bp->b_resid = bp->b_bcount; 11937 SD_BEGIN_IODONE(index, un, bp); 11938 11939 sd_ssc_fini(ssc); 11940 return; 11941 } 11942 sd_ssc_fini(ssc); 11943 } 11944 11945 nblocks = 0; 11946 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11947 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 11948 11949 /* 11950 * blocknum is the starting block number of the request. At this 11951 * point it is still relative to the start of the minor device. 11952 */ 11953 blocknum = xp->xb_blkno; 11954 11955 /* 11956 * Legacy: If the starting block number is one past the last block 11957 * in the partition, do not set B_ERROR in the buf. 11958 */ 11959 if (blocknum == nblocks) { 11960 goto error_exit; 11961 } 11962 11963 /* 11964 * Confirm that the first block of the request lies within the 11965 * partition limits. Also the requested number of bytes must be 11966 * a multiple of the system block size. 11967 */ 11968 if ((blocknum < 0) || (blocknum >= nblocks) || 11969 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11970 bp->b_flags |= B_ERROR; 11971 goto error_exit; 11972 } 11973 11974 /* 11975 * If the requsted # blocks exceeds the available # blocks, that 11976 * is an overrun of the partition. 11977 */ 11978 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11979 available_nblocks = (size_t)(nblocks - blocknum); 11980 ASSERT(nblocks >= blocknum); 11981 11982 if (requested_nblocks > available_nblocks) { 11983 /* 11984 * Allocate an "overrun" buf to allow the request to proceed 11985 * for the amount of space available in the partition. The 11986 * amount not transferred will be added into the b_resid 11987 * when the operation is complete. The overrun buf 11988 * replaces the original buf here, and the original buf 11989 * is saved inside the overrun buf, for later use. 11990 */ 11991 size_t resid = SD_SYSBLOCKS2BYTES(un, 11992 (offset_t)(requested_nblocks - available_nblocks)); 11993 size_t count = bp->b_bcount - resid; 11994 /* 11995 * Note: count is an unsigned entity thus it'll NEVER 11996 * be less than 0 so ASSERT the original values are 11997 * correct. 11998 */ 11999 ASSERT(bp->b_bcount >= resid); 12000 12001 bp = sd_bioclone_alloc(bp, count, blocknum, 12002 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12003 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12004 ASSERT(xp != NULL); 12005 } 12006 12007 /* At this point there should be no residual for this buf. */ 12008 ASSERT(bp->b_resid == 0); 12009 12010 /* Convert the block number to an absolute address. */ 12011 xp->xb_blkno += partition_offset; 12012 12013 SD_NEXT_IOSTART(index, un, bp); 12014 12015 SD_TRACE(SD_LOG_IO_PARTITION, un, 12016 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12017 12018 return; 12019 12020 error_exit: 12021 bp->b_resid = bp->b_bcount; 12022 SD_BEGIN_IODONE(index, un, bp); 12023 SD_TRACE(SD_LOG_IO_PARTITION, un, 12024 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12025 } 12026 12027 12028 /* 12029 * Function: sd_mapblockaddr_iodone 12030 * 12031 * Description: Completion-side processing for partition management. 12032 * 12033 * Context: May be called under interrupt context 12034 */ 12035 12036 static void 12037 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12038 { 12039 /* int partition; */ /* Not used, see below. */ 12040 ASSERT(un != NULL); 12041 ASSERT(bp != NULL); 12042 ASSERT(!mutex_owned(SD_MUTEX(un))); 12043 12044 SD_TRACE(SD_LOG_IO_PARTITION, un, 12045 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12046 12047 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12048 /* 12049 * We have an "overrun" buf to deal with... 12050 */ 12051 struct sd_xbuf *xp; 12052 struct buf *obp; /* ptr to the original buf */ 12053 12054 xp = SD_GET_XBUF(bp); 12055 ASSERT(xp != NULL); 12056 12057 /* Retrieve the pointer to the original buf */ 12058 obp = (struct buf *)xp->xb_private; 12059 ASSERT(obp != NULL); 12060 12061 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12062 bioerror(obp, bp->b_error); 12063 12064 sd_bioclone_free(bp); 12065 12066 /* 12067 * Get back the original buf. 12068 * Note that since the restoration of xb_blkno below 12069 * was removed, the sd_xbuf is not needed. 12070 */ 12071 bp = obp; 12072 /* 12073 * xp = SD_GET_XBUF(bp); 12074 * ASSERT(xp != NULL); 12075 */ 12076 } 12077 12078 /* 12079 * Convert sd->xb_blkno back to a minor-device relative value. 12080 * Note: this has been commented out, as it is not needed in the 12081 * current implementation of the driver (ie, since this function 12082 * is at the top of the layering chains, so the info will be 12083 * discarded) and it is in the "hot" IO path. 12084 * 12085 * partition = getminor(bp->b_edev) & SDPART_MASK; 12086 * xp->xb_blkno -= un->un_offset[partition]; 12087 */ 12088 12089 SD_NEXT_IODONE(index, un, bp); 12090 12091 SD_TRACE(SD_LOG_IO_PARTITION, un, 12092 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12093 } 12094 12095 12096 /* 12097 * Function: sd_mapblocksize_iostart 12098 * 12099 * Description: Convert between system block size (un->un_sys_blocksize) 12100 * and target block size (un->un_tgt_blocksize). 12101 * 12102 * Context: Can sleep to allocate resources. 12103 * 12104 * Assumptions: A higher layer has already performed any partition validation, 12105 * and converted the xp->xb_blkno to an absolute value relative 12106 * to the start of the device. 12107 * 12108 * It is also assumed that the higher layer has implemented 12109 * an "overrun" mechanism for the case where the request would 12110 * read/write beyond the end of a partition. In this case we 12111 * assume (and ASSERT) that bp->b_resid == 0. 12112 * 12113 * Note: The implementation for this routine assumes the target 12114 * block size remains constant between allocation and transport. 12115 */ 12116 12117 static void 12118 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12119 { 12120 struct sd_mapblocksize_info *bsp; 12121 struct sd_xbuf *xp; 12122 offset_t first_byte; 12123 daddr_t start_block, end_block; 12124 daddr_t request_bytes; 12125 ushort_t is_aligned = FALSE; 12126 12127 ASSERT(un != NULL); 12128 ASSERT(bp != NULL); 12129 ASSERT(!mutex_owned(SD_MUTEX(un))); 12130 ASSERT(bp->b_resid == 0); 12131 12132 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12133 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12134 12135 /* 12136 * For a non-writable CD, a write request is an error 12137 */ 12138 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12139 (un->un_f_mmc_writable_media == FALSE)) { 12140 bioerror(bp, EIO); 12141 bp->b_resid = bp->b_bcount; 12142 SD_BEGIN_IODONE(index, un, bp); 12143 return; 12144 } 12145 12146 /* 12147 * We do not need a shadow buf if the device is using 12148 * un->un_sys_blocksize as its block size or if bcount == 0. 12149 * In this case there is no layer-private data block allocated. 12150 */ 12151 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12152 (bp->b_bcount == 0)) { 12153 goto done; 12154 } 12155 12156 #if defined(__i386) || defined(__amd64) 12157 /* We do not support non-block-aligned transfers for ROD devices */ 12158 ASSERT(!ISROD(un)); 12159 #endif 12160 12161 xp = SD_GET_XBUF(bp); 12162 ASSERT(xp != NULL); 12163 12164 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12165 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12166 un->un_tgt_blocksize, un->un_sys_blocksize); 12167 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12168 "request start block:0x%x\n", xp->xb_blkno); 12169 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12170 "request len:0x%x\n", bp->b_bcount); 12171 12172 /* 12173 * Allocate the layer-private data area for the mapblocksize layer. 12174 * Layers are allowed to use the xp_private member of the sd_xbuf 12175 * struct to store the pointer to their layer-private data block, but 12176 * each layer also has the responsibility of restoring the prior 12177 * contents of xb_private before returning the buf/xbuf to the 12178 * higher layer that sent it. 12179 * 12180 * Here we save the prior contents of xp->xb_private into the 12181 * bsp->mbs_oprivate field of our layer-private data area. This value 12182 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12183 * the layer-private area and returning the buf/xbuf to the layer 12184 * that sent it. 12185 * 12186 * Note that here we use kmem_zalloc for the allocation as there are 12187 * parts of the mapblocksize code that expect certain fields to be 12188 * zero unless explicitly set to a required value. 12189 */ 12190 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12191 bsp->mbs_oprivate = xp->xb_private; 12192 xp->xb_private = bsp; 12193 12194 /* 12195 * This treats the data on the disk (target) as an array of bytes. 12196 * first_byte is the byte offset, from the beginning of the device, 12197 * to the location of the request. This is converted from a 12198 * un->un_sys_blocksize block address to a byte offset, and then back 12199 * to a block address based upon a un->un_tgt_blocksize block size. 12200 * 12201 * xp->xb_blkno should be absolute upon entry into this function, 12202 * but, but it is based upon partitions that use the "system" 12203 * block size. It must be adjusted to reflect the block size of 12204 * the target. 12205 * 12206 * Note that end_block is actually the block that follows the last 12207 * block of the request, but that's what is needed for the computation. 12208 */ 12209 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12210 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12211 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12212 un->un_tgt_blocksize; 12213 12214 /* request_bytes is rounded up to a multiple of the target block size */ 12215 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12216 12217 /* 12218 * See if the starting address of the request and the request 12219 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12220 * then we do not need to allocate a shadow buf to handle the request. 12221 */ 12222 if (((first_byte % un->un_tgt_blocksize) == 0) && 12223 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12224 is_aligned = TRUE; 12225 } 12226 12227 if ((bp->b_flags & B_READ) == 0) { 12228 /* 12229 * Lock the range for a write operation. An aligned request is 12230 * considered a simple write; otherwise the request must be a 12231 * read-modify-write. 12232 */ 12233 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12234 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12235 } 12236 12237 /* 12238 * Alloc a shadow buf if the request is not aligned. Also, this is 12239 * where the READ command is generated for a read-modify-write. (The 12240 * write phase is deferred until after the read completes.) 12241 */ 12242 if (is_aligned == FALSE) { 12243 12244 struct sd_mapblocksize_info *shadow_bsp; 12245 struct sd_xbuf *shadow_xp; 12246 struct buf *shadow_bp; 12247 12248 /* 12249 * Allocate the shadow buf and it associated xbuf. Note that 12250 * after this call the xb_blkno value in both the original 12251 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12252 * same: absolute relative to the start of the device, and 12253 * adjusted for the target block size. The b_blkno in the 12254 * shadow buf will also be set to this value. We should never 12255 * change b_blkno in the original bp however. 12256 * 12257 * Note also that the shadow buf will always need to be a 12258 * READ command, regardless of whether the incoming command 12259 * is a READ or a WRITE. 12260 */ 12261 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12262 xp->xb_blkno, 12263 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12264 12265 shadow_xp = SD_GET_XBUF(shadow_bp); 12266 12267 /* 12268 * Allocate the layer-private data for the shadow buf. 12269 * (No need to preserve xb_private in the shadow xbuf.) 12270 */ 12271 shadow_xp->xb_private = shadow_bsp = 12272 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12273 12274 /* 12275 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12276 * to figure out where the start of the user data is (based upon 12277 * the system block size) in the data returned by the READ 12278 * command (which will be based upon the target blocksize). Note 12279 * that this is only really used if the request is unaligned. 12280 */ 12281 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12282 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12283 ASSERT((bsp->mbs_copy_offset >= 0) && 12284 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12285 12286 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12287 12288 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12289 12290 /* Transfer the wmap (if any) to the shadow buf */ 12291 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12292 bsp->mbs_wmp = NULL; 12293 12294 /* 12295 * The shadow buf goes on from here in place of the 12296 * original buf. 12297 */ 12298 shadow_bsp->mbs_orig_bp = bp; 12299 bp = shadow_bp; 12300 } 12301 12302 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12303 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12304 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12305 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12306 request_bytes); 12307 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12308 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12309 12310 done: 12311 SD_NEXT_IOSTART(index, un, bp); 12312 12313 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12314 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12315 } 12316 12317 12318 /* 12319 * Function: sd_mapblocksize_iodone 12320 * 12321 * Description: Completion side processing for block-size mapping. 12322 * 12323 * Context: May be called under interrupt context 12324 */ 12325 12326 static void 12327 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12328 { 12329 struct sd_mapblocksize_info *bsp; 12330 struct sd_xbuf *xp; 12331 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12332 struct buf *orig_bp; /* ptr to the original buf */ 12333 offset_t shadow_end; 12334 offset_t request_end; 12335 offset_t shadow_start; 12336 ssize_t copy_offset; 12337 size_t copy_length; 12338 size_t shortfall; 12339 uint_t is_write; /* TRUE if this bp is a WRITE */ 12340 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12341 12342 ASSERT(un != NULL); 12343 ASSERT(bp != NULL); 12344 12345 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12346 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12347 12348 /* 12349 * There is no shadow buf or layer-private data if the target is 12350 * using un->un_sys_blocksize as its block size or if bcount == 0. 12351 */ 12352 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12353 (bp->b_bcount == 0)) { 12354 goto exit; 12355 } 12356 12357 xp = SD_GET_XBUF(bp); 12358 ASSERT(xp != NULL); 12359 12360 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12361 bsp = xp->xb_private; 12362 12363 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12364 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12365 12366 if (is_write) { 12367 /* 12368 * For a WRITE request we must free up the block range that 12369 * we have locked up. This holds regardless of whether this is 12370 * an aligned write request or a read-modify-write request. 12371 */ 12372 sd_range_unlock(un, bsp->mbs_wmp); 12373 bsp->mbs_wmp = NULL; 12374 } 12375 12376 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12377 /* 12378 * An aligned read or write command will have no shadow buf; 12379 * there is not much else to do with it. 12380 */ 12381 goto done; 12382 } 12383 12384 orig_bp = bsp->mbs_orig_bp; 12385 ASSERT(orig_bp != NULL); 12386 orig_xp = SD_GET_XBUF(orig_bp); 12387 ASSERT(orig_xp != NULL); 12388 ASSERT(!mutex_owned(SD_MUTEX(un))); 12389 12390 if (!is_write && has_wmap) { 12391 /* 12392 * A READ with a wmap means this is the READ phase of a 12393 * read-modify-write. If an error occurred on the READ then 12394 * we do not proceed with the WRITE phase or copy any data. 12395 * Just release the write maps and return with an error. 12396 */ 12397 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12398 orig_bp->b_resid = orig_bp->b_bcount; 12399 bioerror(orig_bp, bp->b_error); 12400 sd_range_unlock(un, bsp->mbs_wmp); 12401 goto freebuf_done; 12402 } 12403 } 12404 12405 /* 12406 * Here is where we set up to copy the data from the shadow buf 12407 * into the space associated with the original buf. 12408 * 12409 * To deal with the conversion between block sizes, these 12410 * computations treat the data as an array of bytes, with the 12411 * first byte (byte 0) corresponding to the first byte in the 12412 * first block on the disk. 12413 */ 12414 12415 /* 12416 * shadow_start and shadow_len indicate the location and size of 12417 * the data returned with the shadow IO request. 12418 */ 12419 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12420 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12421 12422 /* 12423 * copy_offset gives the offset (in bytes) from the start of the first 12424 * block of the READ request to the beginning of the data. We retrieve 12425 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12426 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12427 * data to be copied (in bytes). 12428 */ 12429 copy_offset = bsp->mbs_copy_offset; 12430 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12431 copy_length = orig_bp->b_bcount; 12432 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12433 12434 /* 12435 * Set up the resid and error fields of orig_bp as appropriate. 12436 */ 12437 if (shadow_end >= request_end) { 12438 /* We got all the requested data; set resid to zero */ 12439 orig_bp->b_resid = 0; 12440 } else { 12441 /* 12442 * We failed to get enough data to fully satisfy the original 12443 * request. Just copy back whatever data we got and set 12444 * up the residual and error code as required. 12445 * 12446 * 'shortfall' is the amount by which the data received with the 12447 * shadow buf has "fallen short" of the requested amount. 12448 */ 12449 shortfall = (size_t)(request_end - shadow_end); 12450 12451 if (shortfall > orig_bp->b_bcount) { 12452 /* 12453 * We did not get enough data to even partially 12454 * fulfill the original request. The residual is 12455 * equal to the amount requested. 12456 */ 12457 orig_bp->b_resid = orig_bp->b_bcount; 12458 } else { 12459 /* 12460 * We did not get all the data that we requested 12461 * from the device, but we will try to return what 12462 * portion we did get. 12463 */ 12464 orig_bp->b_resid = shortfall; 12465 } 12466 ASSERT(copy_length >= orig_bp->b_resid); 12467 copy_length -= orig_bp->b_resid; 12468 } 12469 12470 /* Propagate the error code from the shadow buf to the original buf */ 12471 bioerror(orig_bp, bp->b_error); 12472 12473 if (is_write) { 12474 goto freebuf_done; /* No data copying for a WRITE */ 12475 } 12476 12477 if (has_wmap) { 12478 /* 12479 * This is a READ command from the READ phase of a 12480 * read-modify-write request. We have to copy the data given 12481 * by the user OVER the data returned by the READ command, 12482 * then convert the command from a READ to a WRITE and send 12483 * it back to the target. 12484 */ 12485 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12486 copy_length); 12487 12488 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12489 12490 /* 12491 * Dispatch the WRITE command to the taskq thread, which 12492 * will in turn send the command to the target. When the 12493 * WRITE command completes, we (sd_mapblocksize_iodone()) 12494 * will get called again as part of the iodone chain 12495 * processing for it. Note that we will still be dealing 12496 * with the shadow buf at that point. 12497 */ 12498 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12499 KM_NOSLEEP) != 0) { 12500 /* 12501 * Dispatch was successful so we are done. Return 12502 * without going any higher up the iodone chain. Do 12503 * not free up any layer-private data until after the 12504 * WRITE completes. 12505 */ 12506 return; 12507 } 12508 12509 /* 12510 * Dispatch of the WRITE command failed; set up the error 12511 * condition and send this IO back up the iodone chain. 12512 */ 12513 bioerror(orig_bp, EIO); 12514 orig_bp->b_resid = orig_bp->b_bcount; 12515 12516 } else { 12517 /* 12518 * This is a regular READ request (ie, not a RMW). Copy the 12519 * data from the shadow buf into the original buf. The 12520 * copy_offset compensates for any "misalignment" between the 12521 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12522 * original buf (with its un->un_sys_blocksize blocks). 12523 */ 12524 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12525 copy_length); 12526 } 12527 12528 freebuf_done: 12529 12530 /* 12531 * At this point we still have both the shadow buf AND the original 12532 * buf to deal with, as well as the layer-private data area in each. 12533 * Local variables are as follows: 12534 * 12535 * bp -- points to shadow buf 12536 * xp -- points to xbuf of shadow buf 12537 * bsp -- points to layer-private data area of shadow buf 12538 * orig_bp -- points to original buf 12539 * 12540 * First free the shadow buf and its associated xbuf, then free the 12541 * layer-private data area from the shadow buf. There is no need to 12542 * restore xb_private in the shadow xbuf. 12543 */ 12544 sd_shadow_buf_free(bp); 12545 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12546 12547 /* 12548 * Now update the local variables to point to the original buf, xbuf, 12549 * and layer-private area. 12550 */ 12551 bp = orig_bp; 12552 xp = SD_GET_XBUF(bp); 12553 ASSERT(xp != NULL); 12554 ASSERT(xp == orig_xp); 12555 bsp = xp->xb_private; 12556 ASSERT(bsp != NULL); 12557 12558 done: 12559 /* 12560 * Restore xb_private to whatever it was set to by the next higher 12561 * layer in the chain, then free the layer-private data area. 12562 */ 12563 xp->xb_private = bsp->mbs_oprivate; 12564 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12565 12566 exit: 12567 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12568 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12569 12570 SD_NEXT_IODONE(index, un, bp); 12571 } 12572 12573 12574 /* 12575 * Function: sd_checksum_iostart 12576 * 12577 * Description: A stub function for a layer that's currently not used. 12578 * For now just a placeholder. 12579 * 12580 * Context: Kernel thread context 12581 */ 12582 12583 static void 12584 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12585 { 12586 ASSERT(un != NULL); 12587 ASSERT(bp != NULL); 12588 ASSERT(!mutex_owned(SD_MUTEX(un))); 12589 SD_NEXT_IOSTART(index, un, bp); 12590 } 12591 12592 12593 /* 12594 * Function: sd_checksum_iodone 12595 * 12596 * Description: A stub function for a layer that's currently not used. 12597 * For now just a placeholder. 12598 * 12599 * Context: May be called under interrupt context 12600 */ 12601 12602 static void 12603 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12604 { 12605 ASSERT(un != NULL); 12606 ASSERT(bp != NULL); 12607 ASSERT(!mutex_owned(SD_MUTEX(un))); 12608 SD_NEXT_IODONE(index, un, bp); 12609 } 12610 12611 12612 /* 12613 * Function: sd_checksum_uscsi_iostart 12614 * 12615 * Description: A stub function for a layer that's currently not used. 12616 * For now just a placeholder. 12617 * 12618 * Context: Kernel thread context 12619 */ 12620 12621 static void 12622 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12623 { 12624 ASSERT(un != NULL); 12625 ASSERT(bp != NULL); 12626 ASSERT(!mutex_owned(SD_MUTEX(un))); 12627 SD_NEXT_IOSTART(index, un, bp); 12628 } 12629 12630 12631 /* 12632 * Function: sd_checksum_uscsi_iodone 12633 * 12634 * Description: A stub function for a layer that's currently not used. 12635 * For now just a placeholder. 12636 * 12637 * Context: May be called under interrupt context 12638 */ 12639 12640 static void 12641 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12642 { 12643 ASSERT(un != NULL); 12644 ASSERT(bp != NULL); 12645 ASSERT(!mutex_owned(SD_MUTEX(un))); 12646 SD_NEXT_IODONE(index, un, bp); 12647 } 12648 12649 12650 /* 12651 * Function: sd_pm_iostart 12652 * 12653 * Description: iostart-side routine for Power mangement. 12654 * 12655 * Context: Kernel thread context 12656 */ 12657 12658 static void 12659 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12660 { 12661 ASSERT(un != NULL); 12662 ASSERT(bp != NULL); 12663 ASSERT(!mutex_owned(SD_MUTEX(un))); 12664 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12665 12666 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12667 12668 if (sd_pm_entry(un) != DDI_SUCCESS) { 12669 /* 12670 * Set up to return the failed buf back up the 'iodone' 12671 * side of the calling chain. 12672 */ 12673 bioerror(bp, EIO); 12674 bp->b_resid = bp->b_bcount; 12675 12676 SD_BEGIN_IODONE(index, un, bp); 12677 12678 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12679 return; 12680 } 12681 12682 SD_NEXT_IOSTART(index, un, bp); 12683 12684 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12685 } 12686 12687 12688 /* 12689 * Function: sd_pm_iodone 12690 * 12691 * Description: iodone-side routine for power mangement. 12692 * 12693 * Context: may be called from interrupt context 12694 */ 12695 12696 static void 12697 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12698 { 12699 ASSERT(un != NULL); 12700 ASSERT(bp != NULL); 12701 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12702 12703 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12704 12705 /* 12706 * After attach the following flag is only read, so don't 12707 * take the penalty of acquiring a mutex for it. 12708 */ 12709 if (un->un_f_pm_is_enabled == TRUE) { 12710 sd_pm_exit(un); 12711 } 12712 12713 SD_NEXT_IODONE(index, un, bp); 12714 12715 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12716 } 12717 12718 12719 /* 12720 * Function: sd_core_iostart 12721 * 12722 * Description: Primary driver function for enqueuing buf(9S) structs from 12723 * the system and initiating IO to the target device 12724 * 12725 * Context: Kernel thread context. Can sleep. 12726 * 12727 * Assumptions: - The given xp->xb_blkno is absolute 12728 * (ie, relative to the start of the device). 12729 * - The IO is to be done using the native blocksize of 12730 * the device, as specified in un->un_tgt_blocksize. 12731 */ 12732 /* ARGSUSED */ 12733 static void 12734 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12735 { 12736 struct sd_xbuf *xp; 12737 12738 ASSERT(un != NULL); 12739 ASSERT(bp != NULL); 12740 ASSERT(!mutex_owned(SD_MUTEX(un))); 12741 ASSERT(bp->b_resid == 0); 12742 12743 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12744 12745 xp = SD_GET_XBUF(bp); 12746 ASSERT(xp != NULL); 12747 12748 mutex_enter(SD_MUTEX(un)); 12749 12750 /* 12751 * If we are currently in the failfast state, fail any new IO 12752 * that has B_FAILFAST set, then return. 12753 */ 12754 if ((bp->b_flags & B_FAILFAST) && 12755 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12756 mutex_exit(SD_MUTEX(un)); 12757 bioerror(bp, EIO); 12758 bp->b_resid = bp->b_bcount; 12759 SD_BEGIN_IODONE(index, un, bp); 12760 return; 12761 } 12762 12763 if (SD_IS_DIRECT_PRIORITY(xp)) { 12764 /* 12765 * Priority command -- transport it immediately. 12766 * 12767 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12768 * because all direct priority commands should be associated 12769 * with error recovery actions which we don't want to retry. 12770 */ 12771 sd_start_cmds(un, bp); 12772 } else { 12773 /* 12774 * Normal command -- add it to the wait queue, then start 12775 * transporting commands from the wait queue. 12776 */ 12777 sd_add_buf_to_waitq(un, bp); 12778 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12779 sd_start_cmds(un, NULL); 12780 } 12781 12782 mutex_exit(SD_MUTEX(un)); 12783 12784 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12785 } 12786 12787 12788 /* 12789 * Function: sd_init_cdb_limits 12790 * 12791 * Description: This is to handle scsi_pkt initialization differences 12792 * between the driver platforms. 12793 * 12794 * Legacy behaviors: 12795 * 12796 * If the block number or the sector count exceeds the 12797 * capabilities of a Group 0 command, shift over to a 12798 * Group 1 command. We don't blindly use Group 1 12799 * commands because a) some drives (CDC Wren IVs) get a 12800 * bit confused, and b) there is probably a fair amount 12801 * of speed difference for a target to receive and decode 12802 * a 10 byte command instead of a 6 byte command. 12803 * 12804 * The xfer time difference of 6 vs 10 byte CDBs is 12805 * still significant so this code is still worthwhile. 12806 * 10 byte CDBs are very inefficient with the fas HBA driver 12807 * and older disks. Each CDB byte took 1 usec with some 12808 * popular disks. 12809 * 12810 * Context: Must be called at attach time 12811 */ 12812 12813 static void 12814 sd_init_cdb_limits(struct sd_lun *un) 12815 { 12816 int hba_cdb_limit; 12817 12818 /* 12819 * Use CDB_GROUP1 commands for most devices except for 12820 * parallel SCSI fixed drives in which case we get better 12821 * performance using CDB_GROUP0 commands (where applicable). 12822 */ 12823 un->un_mincdb = SD_CDB_GROUP1; 12824 #if !defined(__fibre) 12825 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12826 !un->un_f_has_removable_media) { 12827 un->un_mincdb = SD_CDB_GROUP0; 12828 } 12829 #endif 12830 12831 /* 12832 * Try to read the max-cdb-length supported by HBA. 12833 */ 12834 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 12835 if (0 >= un->un_max_hba_cdb) { 12836 un->un_max_hba_cdb = CDB_GROUP4; 12837 hba_cdb_limit = SD_CDB_GROUP4; 12838 } else if (0 < un->un_max_hba_cdb && 12839 un->un_max_hba_cdb < CDB_GROUP1) { 12840 hba_cdb_limit = SD_CDB_GROUP0; 12841 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 12842 un->un_max_hba_cdb < CDB_GROUP5) { 12843 hba_cdb_limit = SD_CDB_GROUP1; 12844 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 12845 un->un_max_hba_cdb < CDB_GROUP4) { 12846 hba_cdb_limit = SD_CDB_GROUP5; 12847 } else { 12848 hba_cdb_limit = SD_CDB_GROUP4; 12849 } 12850 12851 /* 12852 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12853 * commands for fixed disks unless we are building for a 32 bit 12854 * kernel. 12855 */ 12856 #ifdef _LP64 12857 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12858 min(hba_cdb_limit, SD_CDB_GROUP4); 12859 #else 12860 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12861 min(hba_cdb_limit, SD_CDB_GROUP1); 12862 #endif 12863 12864 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12865 ? sizeof (struct scsi_arq_status) : 1); 12866 un->un_cmd_timeout = (ushort_t)sd_io_time; 12867 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12868 } 12869 12870 12871 /* 12872 * Function: sd_initpkt_for_buf 12873 * 12874 * Description: Allocate and initialize for transport a scsi_pkt struct, 12875 * based upon the info specified in the given buf struct. 12876 * 12877 * Assumes the xb_blkno in the request is absolute (ie, 12878 * relative to the start of the device (NOT partition!). 12879 * Also assumes that the request is using the native block 12880 * size of the device (as returned by the READ CAPACITY 12881 * command). 12882 * 12883 * Return Code: SD_PKT_ALLOC_SUCCESS 12884 * SD_PKT_ALLOC_FAILURE 12885 * SD_PKT_ALLOC_FAILURE_NO_DMA 12886 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12887 * 12888 * Context: Kernel thread and may be called from software interrupt context 12889 * as part of a sdrunout callback. This function may not block or 12890 * call routines that block 12891 */ 12892 12893 static int 12894 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12895 { 12896 struct sd_xbuf *xp; 12897 struct scsi_pkt *pktp = NULL; 12898 struct sd_lun *un; 12899 size_t blockcount; 12900 daddr_t startblock; 12901 int rval; 12902 int cmd_flags; 12903 12904 ASSERT(bp != NULL); 12905 ASSERT(pktpp != NULL); 12906 xp = SD_GET_XBUF(bp); 12907 ASSERT(xp != NULL); 12908 un = SD_GET_UN(bp); 12909 ASSERT(un != NULL); 12910 ASSERT(mutex_owned(SD_MUTEX(un))); 12911 ASSERT(bp->b_resid == 0); 12912 12913 SD_TRACE(SD_LOG_IO_CORE, un, 12914 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12915 12916 mutex_exit(SD_MUTEX(un)); 12917 12918 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12919 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12920 /* 12921 * Already have a scsi_pkt -- just need DMA resources. 12922 * We must recompute the CDB in case the mapping returns 12923 * a nonzero pkt_resid. 12924 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12925 * that is being retried, the unmap/remap of the DMA resouces 12926 * will result in the entire transfer starting over again 12927 * from the very first block. 12928 */ 12929 ASSERT(xp->xb_pktp != NULL); 12930 pktp = xp->xb_pktp; 12931 } else { 12932 pktp = NULL; 12933 } 12934 #endif /* __i386 || __amd64 */ 12935 12936 startblock = xp->xb_blkno; /* Absolute block num. */ 12937 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12938 12939 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12940 12941 /* 12942 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12943 * call scsi_init_pkt, and build the CDB. 12944 */ 12945 rval = sd_setup_rw_pkt(un, &pktp, bp, 12946 cmd_flags, sdrunout, (caddr_t)un, 12947 startblock, blockcount); 12948 12949 if (rval == 0) { 12950 /* 12951 * Success. 12952 * 12953 * If partial DMA is being used and required for this transfer. 12954 * set it up here. 12955 */ 12956 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12957 (pktp->pkt_resid != 0)) { 12958 12959 /* 12960 * Save the CDB length and pkt_resid for the 12961 * next xfer 12962 */ 12963 xp->xb_dma_resid = pktp->pkt_resid; 12964 12965 /* rezero resid */ 12966 pktp->pkt_resid = 0; 12967 12968 } else { 12969 xp->xb_dma_resid = 0; 12970 } 12971 12972 pktp->pkt_flags = un->un_tagflags; 12973 pktp->pkt_time = un->un_cmd_timeout; 12974 pktp->pkt_comp = sdintr; 12975 12976 pktp->pkt_private = bp; 12977 *pktpp = pktp; 12978 12979 SD_TRACE(SD_LOG_IO_CORE, un, 12980 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12981 12982 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12983 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12984 #endif 12985 12986 mutex_enter(SD_MUTEX(un)); 12987 return (SD_PKT_ALLOC_SUCCESS); 12988 12989 } 12990 12991 /* 12992 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12993 * from sd_setup_rw_pkt. 12994 */ 12995 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12996 12997 if (rval == SD_PKT_ALLOC_FAILURE) { 12998 *pktpp = NULL; 12999 /* 13000 * Set the driver state to RWAIT to indicate the driver 13001 * is waiting on resource allocations. The driver will not 13002 * suspend, pm_suspend, or detatch while the state is RWAIT. 13003 */ 13004 mutex_enter(SD_MUTEX(un)); 13005 New_state(un, SD_STATE_RWAIT); 13006 13007 SD_ERROR(SD_LOG_IO_CORE, un, 13008 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13009 13010 if ((bp->b_flags & B_ERROR) != 0) { 13011 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13012 } 13013 return (SD_PKT_ALLOC_FAILURE); 13014 } else { 13015 /* 13016 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13017 * 13018 * This should never happen. Maybe someone messed with the 13019 * kernel's minphys? 13020 */ 13021 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13022 "Request rejected: too large for CDB: " 13023 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13024 SD_ERROR(SD_LOG_IO_CORE, un, 13025 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13026 mutex_enter(SD_MUTEX(un)); 13027 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13028 13029 } 13030 } 13031 13032 13033 /* 13034 * Function: sd_destroypkt_for_buf 13035 * 13036 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13037 * 13038 * Context: Kernel thread or interrupt context 13039 */ 13040 13041 static void 13042 sd_destroypkt_for_buf(struct buf *bp) 13043 { 13044 ASSERT(bp != NULL); 13045 ASSERT(SD_GET_UN(bp) != NULL); 13046 13047 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13048 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13049 13050 ASSERT(SD_GET_PKTP(bp) != NULL); 13051 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13052 13053 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13054 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13055 } 13056 13057 /* 13058 * Function: sd_setup_rw_pkt 13059 * 13060 * Description: Determines appropriate CDB group for the requested LBA 13061 * and transfer length, calls scsi_init_pkt, and builds 13062 * the CDB. Do not use for partial DMA transfers except 13063 * for the initial transfer since the CDB size must 13064 * remain constant. 13065 * 13066 * Context: Kernel thread and may be called from software interrupt 13067 * context as part of a sdrunout callback. This function may not 13068 * block or call routines that block 13069 */ 13070 13071 13072 int 13073 sd_setup_rw_pkt(struct sd_lun *un, 13074 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13075 int (*callback)(caddr_t), caddr_t callback_arg, 13076 diskaddr_t lba, uint32_t blockcount) 13077 { 13078 struct scsi_pkt *return_pktp; 13079 union scsi_cdb *cdbp; 13080 struct sd_cdbinfo *cp = NULL; 13081 int i; 13082 13083 /* 13084 * See which size CDB to use, based upon the request. 13085 */ 13086 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13087 13088 /* 13089 * Check lba and block count against sd_cdbtab limits. 13090 * In the partial DMA case, we have to use the same size 13091 * CDB for all the transfers. Check lba + blockcount 13092 * against the max LBA so we know that segment of the 13093 * transfer can use the CDB we select. 13094 */ 13095 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13096 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13097 13098 /* 13099 * The command will fit into the CDB type 13100 * specified by sd_cdbtab[i]. 13101 */ 13102 cp = sd_cdbtab + i; 13103 13104 /* 13105 * Call scsi_init_pkt so we can fill in the 13106 * CDB. 13107 */ 13108 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13109 bp, cp->sc_grpcode, un->un_status_len, 0, 13110 flags, callback, callback_arg); 13111 13112 if (return_pktp != NULL) { 13113 13114 /* 13115 * Return new value of pkt 13116 */ 13117 *pktpp = return_pktp; 13118 13119 /* 13120 * To be safe, zero the CDB insuring there is 13121 * no leftover data from a previous command. 13122 */ 13123 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13124 13125 /* 13126 * Handle partial DMA mapping 13127 */ 13128 if (return_pktp->pkt_resid != 0) { 13129 13130 /* 13131 * Not going to xfer as many blocks as 13132 * originally expected 13133 */ 13134 blockcount -= 13135 SD_BYTES2TGTBLOCKS(un, 13136 return_pktp->pkt_resid); 13137 } 13138 13139 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13140 13141 /* 13142 * Set command byte based on the CDB 13143 * type we matched. 13144 */ 13145 cdbp->scc_cmd = cp->sc_grpmask | 13146 ((bp->b_flags & B_READ) ? 13147 SCMD_READ : SCMD_WRITE); 13148 13149 SD_FILL_SCSI1_LUN(un, return_pktp); 13150 13151 /* 13152 * Fill in LBA and length 13153 */ 13154 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13155 (cp->sc_grpcode == CDB_GROUP4) || 13156 (cp->sc_grpcode == CDB_GROUP0) || 13157 (cp->sc_grpcode == CDB_GROUP5)); 13158 13159 if (cp->sc_grpcode == CDB_GROUP1) { 13160 FORMG1ADDR(cdbp, lba); 13161 FORMG1COUNT(cdbp, blockcount); 13162 return (0); 13163 } else if (cp->sc_grpcode == CDB_GROUP4) { 13164 FORMG4LONGADDR(cdbp, lba); 13165 FORMG4COUNT(cdbp, blockcount); 13166 return (0); 13167 } else if (cp->sc_grpcode == CDB_GROUP0) { 13168 FORMG0ADDR(cdbp, lba); 13169 FORMG0COUNT(cdbp, blockcount); 13170 return (0); 13171 } else if (cp->sc_grpcode == CDB_GROUP5) { 13172 FORMG5ADDR(cdbp, lba); 13173 FORMG5COUNT(cdbp, blockcount); 13174 return (0); 13175 } 13176 13177 /* 13178 * It should be impossible to not match one 13179 * of the CDB types above, so we should never 13180 * reach this point. Set the CDB command byte 13181 * to test-unit-ready to avoid writing 13182 * to somewhere we don't intend. 13183 */ 13184 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13185 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13186 } else { 13187 /* 13188 * Couldn't get scsi_pkt 13189 */ 13190 return (SD_PKT_ALLOC_FAILURE); 13191 } 13192 } 13193 } 13194 13195 /* 13196 * None of the available CDB types were suitable. This really 13197 * should never happen: on a 64 bit system we support 13198 * READ16/WRITE16 which will hold an entire 64 bit disk address 13199 * and on a 32 bit system we will refuse to bind to a device 13200 * larger than 2TB so addresses will never be larger than 32 bits. 13201 */ 13202 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13203 } 13204 13205 /* 13206 * Function: sd_setup_next_rw_pkt 13207 * 13208 * Description: Setup packet for partial DMA transfers, except for the 13209 * initial transfer. sd_setup_rw_pkt should be used for 13210 * the initial transfer. 13211 * 13212 * Context: Kernel thread and may be called from interrupt context. 13213 */ 13214 13215 int 13216 sd_setup_next_rw_pkt(struct sd_lun *un, 13217 struct scsi_pkt *pktp, struct buf *bp, 13218 diskaddr_t lba, uint32_t blockcount) 13219 { 13220 uchar_t com; 13221 union scsi_cdb *cdbp; 13222 uchar_t cdb_group_id; 13223 13224 ASSERT(pktp != NULL); 13225 ASSERT(pktp->pkt_cdbp != NULL); 13226 13227 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13228 com = cdbp->scc_cmd; 13229 cdb_group_id = CDB_GROUPID(com); 13230 13231 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13232 (cdb_group_id == CDB_GROUPID_1) || 13233 (cdb_group_id == CDB_GROUPID_4) || 13234 (cdb_group_id == CDB_GROUPID_5)); 13235 13236 /* 13237 * Move pkt to the next portion of the xfer. 13238 * func is NULL_FUNC so we do not have to release 13239 * the disk mutex here. 13240 */ 13241 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13242 NULL_FUNC, NULL) == pktp) { 13243 /* Success. Handle partial DMA */ 13244 if (pktp->pkt_resid != 0) { 13245 blockcount -= 13246 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13247 } 13248 13249 cdbp->scc_cmd = com; 13250 SD_FILL_SCSI1_LUN(un, pktp); 13251 if (cdb_group_id == CDB_GROUPID_1) { 13252 FORMG1ADDR(cdbp, lba); 13253 FORMG1COUNT(cdbp, blockcount); 13254 return (0); 13255 } else if (cdb_group_id == CDB_GROUPID_4) { 13256 FORMG4LONGADDR(cdbp, lba); 13257 FORMG4COUNT(cdbp, blockcount); 13258 return (0); 13259 } else if (cdb_group_id == CDB_GROUPID_0) { 13260 FORMG0ADDR(cdbp, lba); 13261 FORMG0COUNT(cdbp, blockcount); 13262 return (0); 13263 } else if (cdb_group_id == CDB_GROUPID_5) { 13264 FORMG5ADDR(cdbp, lba); 13265 FORMG5COUNT(cdbp, blockcount); 13266 return (0); 13267 } 13268 13269 /* Unreachable */ 13270 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13271 } 13272 13273 /* 13274 * Error setting up next portion of cmd transfer. 13275 * Something is definitely very wrong and this 13276 * should not happen. 13277 */ 13278 return (SD_PKT_ALLOC_FAILURE); 13279 } 13280 13281 /* 13282 * Function: sd_initpkt_for_uscsi 13283 * 13284 * Description: Allocate and initialize for transport a scsi_pkt struct, 13285 * based upon the info specified in the given uscsi_cmd struct. 13286 * 13287 * Return Code: SD_PKT_ALLOC_SUCCESS 13288 * SD_PKT_ALLOC_FAILURE 13289 * SD_PKT_ALLOC_FAILURE_NO_DMA 13290 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13291 * 13292 * Context: Kernel thread and may be called from software interrupt context 13293 * as part of a sdrunout callback. This function may not block or 13294 * call routines that block 13295 */ 13296 13297 static int 13298 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13299 { 13300 struct uscsi_cmd *uscmd; 13301 struct sd_xbuf *xp; 13302 struct scsi_pkt *pktp; 13303 struct sd_lun *un; 13304 uint32_t flags = 0; 13305 13306 ASSERT(bp != NULL); 13307 ASSERT(pktpp != NULL); 13308 xp = SD_GET_XBUF(bp); 13309 ASSERT(xp != NULL); 13310 un = SD_GET_UN(bp); 13311 ASSERT(un != NULL); 13312 ASSERT(mutex_owned(SD_MUTEX(un))); 13313 13314 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13315 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13316 ASSERT(uscmd != NULL); 13317 13318 SD_TRACE(SD_LOG_IO_CORE, un, 13319 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13320 13321 /* 13322 * Allocate the scsi_pkt for the command. 13323 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13324 * during scsi_init_pkt time and will continue to use the 13325 * same path as long as the same scsi_pkt is used without 13326 * intervening scsi_dma_free(). Since uscsi command does 13327 * not call scsi_dmafree() before retry failed command, it 13328 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13329 * set such that scsi_vhci can use other available path for 13330 * retry. Besides, ucsci command does not allow DMA breakup, 13331 * so there is no need to set PKT_DMA_PARTIAL flag. 13332 */ 13333 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13334 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13335 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13336 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13337 - sizeof (struct scsi_extended_sense)), 0, 13338 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13339 sdrunout, (caddr_t)un); 13340 } else { 13341 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13342 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13343 sizeof (struct scsi_arq_status), 0, 13344 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13345 sdrunout, (caddr_t)un); 13346 } 13347 13348 if (pktp == NULL) { 13349 *pktpp = NULL; 13350 /* 13351 * Set the driver state to RWAIT to indicate the driver 13352 * is waiting on resource allocations. The driver will not 13353 * suspend, pm_suspend, or detatch while the state is RWAIT. 13354 */ 13355 New_state(un, SD_STATE_RWAIT); 13356 13357 SD_ERROR(SD_LOG_IO_CORE, un, 13358 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13359 13360 if ((bp->b_flags & B_ERROR) != 0) { 13361 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13362 } 13363 return (SD_PKT_ALLOC_FAILURE); 13364 } 13365 13366 /* 13367 * We do not do DMA breakup for USCSI commands, so return failure 13368 * here if all the needed DMA resources were not allocated. 13369 */ 13370 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13371 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13372 scsi_destroy_pkt(pktp); 13373 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13374 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13375 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13376 } 13377 13378 /* Init the cdb from the given uscsi struct */ 13379 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13380 uscmd->uscsi_cdb[0], 0, 0, 0); 13381 13382 SD_FILL_SCSI1_LUN(un, pktp); 13383 13384 /* 13385 * Set up the optional USCSI flags. See the uscsi (7I) man page 13386 * for listing of the supported flags. 13387 */ 13388 13389 if (uscmd->uscsi_flags & USCSI_SILENT) { 13390 flags |= FLAG_SILENT; 13391 } 13392 13393 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13394 flags |= FLAG_DIAGNOSE; 13395 } 13396 13397 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13398 flags |= FLAG_ISOLATE; 13399 } 13400 13401 if (un->un_f_is_fibre == FALSE) { 13402 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13403 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13404 } 13405 } 13406 13407 /* 13408 * Set the pkt flags here so we save time later. 13409 * Note: These flags are NOT in the uscsi man page!!! 13410 */ 13411 if (uscmd->uscsi_flags & USCSI_HEAD) { 13412 flags |= FLAG_HEAD; 13413 } 13414 13415 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13416 flags |= FLAG_NOINTR; 13417 } 13418 13419 /* 13420 * For tagged queueing, things get a bit complicated. 13421 * Check first for head of queue and last for ordered queue. 13422 * If neither head nor order, use the default driver tag flags. 13423 */ 13424 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13425 if (uscmd->uscsi_flags & USCSI_HTAG) { 13426 flags |= FLAG_HTAG; 13427 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13428 flags |= FLAG_OTAG; 13429 } else { 13430 flags |= un->un_tagflags & FLAG_TAGMASK; 13431 } 13432 } 13433 13434 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13435 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13436 } 13437 13438 pktp->pkt_flags = flags; 13439 13440 /* Transfer uscsi information to scsi_pkt */ 13441 (void) scsi_uscsi_pktinit(uscmd, pktp); 13442 13443 /* Copy the caller's CDB into the pkt... */ 13444 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13445 13446 if (uscmd->uscsi_timeout == 0) { 13447 pktp->pkt_time = un->un_uscsi_timeout; 13448 } else { 13449 pktp->pkt_time = uscmd->uscsi_timeout; 13450 } 13451 13452 /* need it later to identify USCSI request in sdintr */ 13453 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13454 13455 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13456 13457 pktp->pkt_private = bp; 13458 pktp->pkt_comp = sdintr; 13459 *pktpp = pktp; 13460 13461 SD_TRACE(SD_LOG_IO_CORE, un, 13462 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13463 13464 return (SD_PKT_ALLOC_SUCCESS); 13465 } 13466 13467 13468 /* 13469 * Function: sd_destroypkt_for_uscsi 13470 * 13471 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13472 * IOs.. Also saves relevant info into the associated uscsi_cmd 13473 * struct. 13474 * 13475 * Context: May be called under interrupt context 13476 */ 13477 13478 static void 13479 sd_destroypkt_for_uscsi(struct buf *bp) 13480 { 13481 struct uscsi_cmd *uscmd; 13482 struct sd_xbuf *xp; 13483 struct scsi_pkt *pktp; 13484 struct sd_lun *un; 13485 struct sd_uscsi_info *suip; 13486 13487 ASSERT(bp != NULL); 13488 xp = SD_GET_XBUF(bp); 13489 ASSERT(xp != NULL); 13490 un = SD_GET_UN(bp); 13491 ASSERT(un != NULL); 13492 ASSERT(!mutex_owned(SD_MUTEX(un))); 13493 pktp = SD_GET_PKTP(bp); 13494 ASSERT(pktp != NULL); 13495 13496 SD_TRACE(SD_LOG_IO_CORE, un, 13497 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13498 13499 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13500 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13501 ASSERT(uscmd != NULL); 13502 13503 /* Save the status and the residual into the uscsi_cmd struct */ 13504 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13505 uscmd->uscsi_resid = bp->b_resid; 13506 13507 /* Transfer scsi_pkt information to uscsi */ 13508 (void) scsi_uscsi_pktfini(pktp, uscmd); 13509 13510 /* 13511 * If enabled, copy any saved sense data into the area specified 13512 * by the uscsi command. 13513 */ 13514 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13515 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13516 /* 13517 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13518 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13519 */ 13520 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13521 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13522 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13523 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13524 MAX_SENSE_LENGTH); 13525 } else { 13526 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13527 SENSE_LENGTH); 13528 } 13529 } 13530 /* 13531 * The following assignments are for SCSI FMA. 13532 */ 13533 ASSERT(xp->xb_private != NULL); 13534 suip = (struct sd_uscsi_info *)xp->xb_private; 13535 suip->ui_pkt_reason = pktp->pkt_reason; 13536 suip->ui_pkt_state = pktp->pkt_state; 13537 suip->ui_pkt_statistics = pktp->pkt_statistics; 13538 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13539 13540 /* We are done with the scsi_pkt; free it now */ 13541 ASSERT(SD_GET_PKTP(bp) != NULL); 13542 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13543 13544 SD_TRACE(SD_LOG_IO_CORE, un, 13545 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13546 } 13547 13548 13549 /* 13550 * Function: sd_bioclone_alloc 13551 * 13552 * Description: Allocate a buf(9S) and init it as per the given buf 13553 * and the various arguments. The associated sd_xbuf 13554 * struct is (nearly) duplicated. The struct buf *bp 13555 * argument is saved in new_xp->xb_private. 13556 * 13557 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13558 * datalen - size of data area for the shadow bp 13559 * blkno - starting LBA 13560 * func - function pointer for b_iodone in the shadow buf. (May 13561 * be NULL if none.) 13562 * 13563 * Return Code: Pointer to allocates buf(9S) struct 13564 * 13565 * Context: Can sleep. 13566 */ 13567 13568 static struct buf * 13569 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13570 daddr_t blkno, int (*func)(struct buf *)) 13571 { 13572 struct sd_lun *un; 13573 struct sd_xbuf *xp; 13574 struct sd_xbuf *new_xp; 13575 struct buf *new_bp; 13576 13577 ASSERT(bp != NULL); 13578 xp = SD_GET_XBUF(bp); 13579 ASSERT(xp != NULL); 13580 un = SD_GET_UN(bp); 13581 ASSERT(un != NULL); 13582 ASSERT(!mutex_owned(SD_MUTEX(un))); 13583 13584 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13585 NULL, KM_SLEEP); 13586 13587 new_bp->b_lblkno = blkno; 13588 13589 /* 13590 * Allocate an xbuf for the shadow bp and copy the contents of the 13591 * original xbuf into it. 13592 */ 13593 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13594 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13595 13596 /* 13597 * The given bp is automatically saved in the xb_private member 13598 * of the new xbuf. Callers are allowed to depend on this. 13599 */ 13600 new_xp->xb_private = bp; 13601 13602 new_bp->b_private = new_xp; 13603 13604 return (new_bp); 13605 } 13606 13607 /* 13608 * Function: sd_shadow_buf_alloc 13609 * 13610 * Description: Allocate a buf(9S) and init it as per the given buf 13611 * and the various arguments. The associated sd_xbuf 13612 * struct is (nearly) duplicated. The struct buf *bp 13613 * argument is saved in new_xp->xb_private. 13614 * 13615 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13616 * datalen - size of data area for the shadow bp 13617 * bflags - B_READ or B_WRITE (pseudo flag) 13618 * blkno - starting LBA 13619 * func - function pointer for b_iodone in the shadow buf. (May 13620 * be NULL if none.) 13621 * 13622 * Return Code: Pointer to allocates buf(9S) struct 13623 * 13624 * Context: Can sleep. 13625 */ 13626 13627 static struct buf * 13628 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13629 daddr_t blkno, int (*func)(struct buf *)) 13630 { 13631 struct sd_lun *un; 13632 struct sd_xbuf *xp; 13633 struct sd_xbuf *new_xp; 13634 struct buf *new_bp; 13635 13636 ASSERT(bp != NULL); 13637 xp = SD_GET_XBUF(bp); 13638 ASSERT(xp != NULL); 13639 un = SD_GET_UN(bp); 13640 ASSERT(un != NULL); 13641 ASSERT(!mutex_owned(SD_MUTEX(un))); 13642 13643 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13644 bp_mapin(bp); 13645 } 13646 13647 bflags &= (B_READ | B_WRITE); 13648 #if defined(__i386) || defined(__amd64) 13649 new_bp = getrbuf(KM_SLEEP); 13650 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13651 new_bp->b_bcount = datalen; 13652 new_bp->b_flags = bflags | 13653 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13654 #else 13655 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13656 datalen, bflags, SLEEP_FUNC, NULL); 13657 #endif 13658 new_bp->av_forw = NULL; 13659 new_bp->av_back = NULL; 13660 new_bp->b_dev = bp->b_dev; 13661 new_bp->b_blkno = blkno; 13662 new_bp->b_iodone = func; 13663 new_bp->b_edev = bp->b_edev; 13664 new_bp->b_resid = 0; 13665 13666 /* We need to preserve the B_FAILFAST flag */ 13667 if (bp->b_flags & B_FAILFAST) { 13668 new_bp->b_flags |= B_FAILFAST; 13669 } 13670 13671 /* 13672 * Allocate an xbuf for the shadow bp and copy the contents of the 13673 * original xbuf into it. 13674 */ 13675 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13676 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13677 13678 /* Need later to copy data between the shadow buf & original buf! */ 13679 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13680 13681 /* 13682 * The given bp is automatically saved in the xb_private member 13683 * of the new xbuf. Callers are allowed to depend on this. 13684 */ 13685 new_xp->xb_private = bp; 13686 13687 new_bp->b_private = new_xp; 13688 13689 return (new_bp); 13690 } 13691 13692 /* 13693 * Function: sd_bioclone_free 13694 * 13695 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13696 * in the larger than partition operation. 13697 * 13698 * Context: May be called under interrupt context 13699 */ 13700 13701 static void 13702 sd_bioclone_free(struct buf *bp) 13703 { 13704 struct sd_xbuf *xp; 13705 13706 ASSERT(bp != NULL); 13707 xp = SD_GET_XBUF(bp); 13708 ASSERT(xp != NULL); 13709 13710 /* 13711 * Call bp_mapout() before freeing the buf, in case a lower 13712 * layer or HBA had done a bp_mapin(). we must do this here 13713 * as we are the "originator" of the shadow buf. 13714 */ 13715 bp_mapout(bp); 13716 13717 /* 13718 * Null out b_iodone before freeing the bp, to ensure that the driver 13719 * never gets confused by a stale value in this field. (Just a little 13720 * extra defensiveness here.) 13721 */ 13722 bp->b_iodone = NULL; 13723 13724 freerbuf(bp); 13725 13726 kmem_free(xp, sizeof (struct sd_xbuf)); 13727 } 13728 13729 /* 13730 * Function: sd_shadow_buf_free 13731 * 13732 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13733 * 13734 * Context: May be called under interrupt context 13735 */ 13736 13737 static void 13738 sd_shadow_buf_free(struct buf *bp) 13739 { 13740 struct sd_xbuf *xp; 13741 13742 ASSERT(bp != NULL); 13743 xp = SD_GET_XBUF(bp); 13744 ASSERT(xp != NULL); 13745 13746 #if defined(__sparc) 13747 /* 13748 * Call bp_mapout() before freeing the buf, in case a lower 13749 * layer or HBA had done a bp_mapin(). we must do this here 13750 * as we are the "originator" of the shadow buf. 13751 */ 13752 bp_mapout(bp); 13753 #endif 13754 13755 /* 13756 * Null out b_iodone before freeing the bp, to ensure that the driver 13757 * never gets confused by a stale value in this field. (Just a little 13758 * extra defensiveness here.) 13759 */ 13760 bp->b_iodone = NULL; 13761 13762 #if defined(__i386) || defined(__amd64) 13763 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13764 freerbuf(bp); 13765 #else 13766 scsi_free_consistent_buf(bp); 13767 #endif 13768 13769 kmem_free(xp, sizeof (struct sd_xbuf)); 13770 } 13771 13772 13773 /* 13774 * Function: sd_print_transport_rejected_message 13775 * 13776 * Description: This implements the ludicrously complex rules for printing 13777 * a "transport rejected" message. This is to address the 13778 * specific problem of having a flood of this error message 13779 * produced when a failover occurs. 13780 * 13781 * Context: Any. 13782 */ 13783 13784 static void 13785 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13786 int code) 13787 { 13788 ASSERT(un != NULL); 13789 ASSERT(mutex_owned(SD_MUTEX(un))); 13790 ASSERT(xp != NULL); 13791 13792 /* 13793 * Print the "transport rejected" message under the following 13794 * conditions: 13795 * 13796 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13797 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13798 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13799 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13800 * scsi_transport(9F) (which indicates that the target might have 13801 * gone off-line). This uses the un->un_tran_fatal_count 13802 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13803 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13804 * from scsi_transport(). 13805 * 13806 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13807 * the preceeding cases in order for the message to be printed. 13808 */ 13809 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13810 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13811 (code != TRAN_FATAL_ERROR) || 13812 (un->un_tran_fatal_count == 1)) { 13813 switch (code) { 13814 case TRAN_BADPKT: 13815 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13816 "transport rejected bad packet\n"); 13817 break; 13818 case TRAN_FATAL_ERROR: 13819 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13820 "transport rejected fatal error\n"); 13821 break; 13822 default: 13823 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13824 "transport rejected (%d)\n", code); 13825 break; 13826 } 13827 } 13828 } 13829 } 13830 13831 13832 /* 13833 * Function: sd_add_buf_to_waitq 13834 * 13835 * Description: Add the given buf(9S) struct to the wait queue for the 13836 * instance. If sorting is enabled, then the buf is added 13837 * to the queue via an elevator sort algorithm (a la 13838 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13839 * If sorting is not enabled, then the buf is just added 13840 * to the end of the wait queue. 13841 * 13842 * Return Code: void 13843 * 13844 * Context: Does not sleep/block, therefore technically can be called 13845 * from any context. However if sorting is enabled then the 13846 * execution time is indeterminate, and may take long if 13847 * the wait queue grows large. 13848 */ 13849 13850 static void 13851 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13852 { 13853 struct buf *ap; 13854 13855 ASSERT(bp != NULL); 13856 ASSERT(un != NULL); 13857 ASSERT(mutex_owned(SD_MUTEX(un))); 13858 13859 /* If the queue is empty, add the buf as the only entry & return. */ 13860 if (un->un_waitq_headp == NULL) { 13861 ASSERT(un->un_waitq_tailp == NULL); 13862 un->un_waitq_headp = un->un_waitq_tailp = bp; 13863 bp->av_forw = NULL; 13864 return; 13865 } 13866 13867 ASSERT(un->un_waitq_tailp != NULL); 13868 13869 /* 13870 * If sorting is disabled, just add the buf to the tail end of 13871 * the wait queue and return. 13872 */ 13873 if (un->un_f_disksort_disabled) { 13874 un->un_waitq_tailp->av_forw = bp; 13875 un->un_waitq_tailp = bp; 13876 bp->av_forw = NULL; 13877 return; 13878 } 13879 13880 /* 13881 * Sort thru the list of requests currently on the wait queue 13882 * and add the new buf request at the appropriate position. 13883 * 13884 * The un->un_waitq_headp is an activity chain pointer on which 13885 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13886 * first queue holds those requests which are positioned after 13887 * the current SD_GET_BLKNO() (in the first request); the second holds 13888 * requests which came in after their SD_GET_BLKNO() number was passed. 13889 * Thus we implement a one way scan, retracting after reaching 13890 * the end of the drive to the first request on the second 13891 * queue, at which time it becomes the first queue. 13892 * A one-way scan is natural because of the way UNIX read-ahead 13893 * blocks are allocated. 13894 * 13895 * If we lie after the first request, then we must locate the 13896 * second request list and add ourselves to it. 13897 */ 13898 ap = un->un_waitq_headp; 13899 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13900 while (ap->av_forw != NULL) { 13901 /* 13902 * Look for an "inversion" in the (normally 13903 * ascending) block numbers. This indicates 13904 * the start of the second request list. 13905 */ 13906 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13907 /* 13908 * Search the second request list for the 13909 * first request at a larger block number. 13910 * We go before that; however if there is 13911 * no such request, we go at the end. 13912 */ 13913 do { 13914 if (SD_GET_BLKNO(bp) < 13915 SD_GET_BLKNO(ap->av_forw)) { 13916 goto insert; 13917 } 13918 ap = ap->av_forw; 13919 } while (ap->av_forw != NULL); 13920 goto insert; /* after last */ 13921 } 13922 ap = ap->av_forw; 13923 } 13924 13925 /* 13926 * No inversions... we will go after the last, and 13927 * be the first request in the second request list. 13928 */ 13929 goto insert; 13930 } 13931 13932 /* 13933 * Request is at/after the current request... 13934 * sort in the first request list. 13935 */ 13936 while (ap->av_forw != NULL) { 13937 /* 13938 * We want to go after the current request (1) if 13939 * there is an inversion after it (i.e. it is the end 13940 * of the first request list), or (2) if the next 13941 * request is a larger block no. than our request. 13942 */ 13943 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13944 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13945 goto insert; 13946 } 13947 ap = ap->av_forw; 13948 } 13949 13950 /* 13951 * Neither a second list nor a larger request, therefore 13952 * we go at the end of the first list (which is the same 13953 * as the end of the whole schebang). 13954 */ 13955 insert: 13956 bp->av_forw = ap->av_forw; 13957 ap->av_forw = bp; 13958 13959 /* 13960 * If we inserted onto the tail end of the waitq, make sure the 13961 * tail pointer is updated. 13962 */ 13963 if (ap == un->un_waitq_tailp) { 13964 un->un_waitq_tailp = bp; 13965 } 13966 } 13967 13968 13969 /* 13970 * Function: sd_start_cmds 13971 * 13972 * Description: Remove and transport cmds from the driver queues. 13973 * 13974 * Arguments: un - pointer to the unit (soft state) struct for the target. 13975 * 13976 * immed_bp - ptr to a buf to be transported immediately. Only 13977 * the immed_bp is transported; bufs on the waitq are not 13978 * processed and the un_retry_bp is not checked. If immed_bp is 13979 * NULL, then normal queue processing is performed. 13980 * 13981 * Context: May be called from kernel thread context, interrupt context, 13982 * or runout callback context. This function may not block or 13983 * call routines that block. 13984 */ 13985 13986 static void 13987 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13988 { 13989 struct sd_xbuf *xp; 13990 struct buf *bp; 13991 void (*statp)(kstat_io_t *); 13992 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13993 void (*saved_statp)(kstat_io_t *); 13994 #endif 13995 int rval; 13996 struct sd_fm_internal *sfip = NULL; 13997 13998 ASSERT(un != NULL); 13999 ASSERT(mutex_owned(SD_MUTEX(un))); 14000 ASSERT(un->un_ncmds_in_transport >= 0); 14001 ASSERT(un->un_throttle >= 0); 14002 14003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14004 14005 do { 14006 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14007 saved_statp = NULL; 14008 #endif 14009 14010 /* 14011 * If we are syncing or dumping, fail the command to 14012 * avoid recursively calling back into scsi_transport(). 14013 * The dump I/O itself uses a separate code path so this 14014 * only prevents non-dump I/O from being sent while dumping. 14015 * File system sync takes place before dumping begins. 14016 * During panic, filesystem I/O is allowed provided 14017 * un_in_callback is <= 1. This is to prevent recursion 14018 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14019 * sd_start_cmds and so on. See panic.c for more information 14020 * about the states the system can be in during panic. 14021 */ 14022 if ((un->un_state == SD_STATE_DUMPING) || 14023 (ddi_in_panic() && (un->un_in_callback > 1))) { 14024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14025 "sd_start_cmds: panicking\n"); 14026 goto exit; 14027 } 14028 14029 if ((bp = immed_bp) != NULL) { 14030 /* 14031 * We have a bp that must be transported immediately. 14032 * It's OK to transport the immed_bp here without doing 14033 * the throttle limit check because the immed_bp is 14034 * always used in a retry/recovery case. This means 14035 * that we know we are not at the throttle limit by 14036 * virtue of the fact that to get here we must have 14037 * already gotten a command back via sdintr(). This also 14038 * relies on (1) the command on un_retry_bp preventing 14039 * further commands from the waitq from being issued; 14040 * and (2) the code in sd_retry_command checking the 14041 * throttle limit before issuing a delayed or immediate 14042 * retry. This holds even if the throttle limit is 14043 * currently ratcheted down from its maximum value. 14044 */ 14045 statp = kstat_runq_enter; 14046 if (bp == un->un_retry_bp) { 14047 ASSERT((un->un_retry_statp == NULL) || 14048 (un->un_retry_statp == kstat_waitq_enter) || 14049 (un->un_retry_statp == 14050 kstat_runq_back_to_waitq)); 14051 /* 14052 * If the waitq kstat was incremented when 14053 * sd_set_retry_bp() queued this bp for a retry, 14054 * then we must set up statp so that the waitq 14055 * count will get decremented correctly below. 14056 * Also we must clear un->un_retry_statp to 14057 * ensure that we do not act on a stale value 14058 * in this field. 14059 */ 14060 if ((un->un_retry_statp == kstat_waitq_enter) || 14061 (un->un_retry_statp == 14062 kstat_runq_back_to_waitq)) { 14063 statp = kstat_waitq_to_runq; 14064 } 14065 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14066 saved_statp = un->un_retry_statp; 14067 #endif 14068 un->un_retry_statp = NULL; 14069 14070 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14071 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14072 "un_throttle:%d un_ncmds_in_transport:%d\n", 14073 un, un->un_retry_bp, un->un_throttle, 14074 un->un_ncmds_in_transport); 14075 } else { 14076 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14077 "processing priority bp:0x%p\n", bp); 14078 } 14079 14080 } else if ((bp = un->un_waitq_headp) != NULL) { 14081 /* 14082 * A command on the waitq is ready to go, but do not 14083 * send it if: 14084 * 14085 * (1) the throttle limit has been reached, or 14086 * (2) a retry is pending, or 14087 * (3) a START_STOP_UNIT callback pending, or 14088 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14089 * command is pending. 14090 * 14091 * For all of these conditions, IO processing will 14092 * restart after the condition is cleared. 14093 */ 14094 if (un->un_ncmds_in_transport >= un->un_throttle) { 14095 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14096 "sd_start_cmds: exiting, " 14097 "throttle limit reached!\n"); 14098 goto exit; 14099 } 14100 if (un->un_retry_bp != NULL) { 14101 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14102 "sd_start_cmds: exiting, retry pending!\n"); 14103 goto exit; 14104 } 14105 if (un->un_startstop_timeid != NULL) { 14106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14107 "sd_start_cmds: exiting, " 14108 "START_STOP pending!\n"); 14109 goto exit; 14110 } 14111 if (un->un_direct_priority_timeid != NULL) { 14112 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14113 "sd_start_cmds: exiting, " 14114 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14115 goto exit; 14116 } 14117 14118 /* Dequeue the command */ 14119 un->un_waitq_headp = bp->av_forw; 14120 if (un->un_waitq_headp == NULL) { 14121 un->un_waitq_tailp = NULL; 14122 } 14123 bp->av_forw = NULL; 14124 statp = kstat_waitq_to_runq; 14125 SD_TRACE(SD_LOG_IO_CORE, un, 14126 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14127 14128 } else { 14129 /* No work to do so bail out now */ 14130 SD_TRACE(SD_LOG_IO_CORE, un, 14131 "sd_start_cmds: no more work, exiting!\n"); 14132 goto exit; 14133 } 14134 14135 /* 14136 * Reset the state to normal. This is the mechanism by which 14137 * the state transitions from either SD_STATE_RWAIT or 14138 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14139 * If state is SD_STATE_PM_CHANGING then this command is 14140 * part of the device power control and the state must 14141 * not be put back to normal. Doing so would would 14142 * allow new commands to proceed when they shouldn't, 14143 * the device may be going off. 14144 */ 14145 if ((un->un_state != SD_STATE_SUSPENDED) && 14146 (un->un_state != SD_STATE_PM_CHANGING)) { 14147 New_state(un, SD_STATE_NORMAL); 14148 } 14149 14150 xp = SD_GET_XBUF(bp); 14151 ASSERT(xp != NULL); 14152 14153 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14154 /* 14155 * Allocate the scsi_pkt if we need one, or attach DMA 14156 * resources if we have a scsi_pkt that needs them. The 14157 * latter should only occur for commands that are being 14158 * retried. 14159 */ 14160 if ((xp->xb_pktp == NULL) || 14161 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14162 #else 14163 if (xp->xb_pktp == NULL) { 14164 #endif 14165 /* 14166 * There is no scsi_pkt allocated for this buf. Call 14167 * the initpkt function to allocate & init one. 14168 * 14169 * The scsi_init_pkt runout callback functionality is 14170 * implemented as follows: 14171 * 14172 * 1) The initpkt function always calls 14173 * scsi_init_pkt(9F) with sdrunout specified as the 14174 * callback routine. 14175 * 2) A successful packet allocation is initialized and 14176 * the I/O is transported. 14177 * 3) The I/O associated with an allocation resource 14178 * failure is left on its queue to be retried via 14179 * runout or the next I/O. 14180 * 4) The I/O associated with a DMA error is removed 14181 * from the queue and failed with EIO. Processing of 14182 * the transport queues is also halted to be 14183 * restarted via runout or the next I/O. 14184 * 5) The I/O associated with a CDB size or packet 14185 * size error is removed from the queue and failed 14186 * with EIO. Processing of the transport queues is 14187 * continued. 14188 * 14189 * Note: there is no interface for canceling a runout 14190 * callback. To prevent the driver from detaching or 14191 * suspending while a runout is pending the driver 14192 * state is set to SD_STATE_RWAIT 14193 * 14194 * Note: using the scsi_init_pkt callback facility can 14195 * result in an I/O request persisting at the head of 14196 * the list which cannot be satisfied even after 14197 * multiple retries. In the future the driver may 14198 * implement some kind of maximum runout count before 14199 * failing an I/O. 14200 * 14201 * Note: the use of funcp below may seem superfluous, 14202 * but it helps warlock figure out the correct 14203 * initpkt function calls (see [s]sd.wlcmd). 14204 */ 14205 struct scsi_pkt *pktp; 14206 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14207 14208 ASSERT(bp != un->un_rqs_bp); 14209 14210 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14211 switch ((*funcp)(bp, &pktp)) { 14212 case SD_PKT_ALLOC_SUCCESS: 14213 xp->xb_pktp = pktp; 14214 SD_TRACE(SD_LOG_IO_CORE, un, 14215 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14216 pktp); 14217 goto got_pkt; 14218 14219 case SD_PKT_ALLOC_FAILURE: 14220 /* 14221 * Temporary (hopefully) resource depletion. 14222 * Since retries and RQS commands always have a 14223 * scsi_pkt allocated, these cases should never 14224 * get here. So the only cases this needs to 14225 * handle is a bp from the waitq (which we put 14226 * back onto the waitq for sdrunout), or a bp 14227 * sent as an immed_bp (which we just fail). 14228 */ 14229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14230 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14231 14232 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14233 14234 if (bp == immed_bp) { 14235 /* 14236 * If SD_XB_DMA_FREED is clear, then 14237 * this is a failure to allocate a 14238 * scsi_pkt, and we must fail the 14239 * command. 14240 */ 14241 if ((xp->xb_pkt_flags & 14242 SD_XB_DMA_FREED) == 0) { 14243 break; 14244 } 14245 14246 /* 14247 * If this immediate command is NOT our 14248 * un_retry_bp, then we must fail it. 14249 */ 14250 if (bp != un->un_retry_bp) { 14251 break; 14252 } 14253 14254 /* 14255 * We get here if this cmd is our 14256 * un_retry_bp that was DMAFREED, but 14257 * scsi_init_pkt() failed to reallocate 14258 * DMA resources when we attempted to 14259 * retry it. This can happen when an 14260 * mpxio failover is in progress, but 14261 * we don't want to just fail the 14262 * command in this case. 14263 * 14264 * Use timeout(9F) to restart it after 14265 * a 100ms delay. We don't want to 14266 * let sdrunout() restart it, because 14267 * sdrunout() is just supposed to start 14268 * commands that are sitting on the 14269 * wait queue. The un_retry_bp stays 14270 * set until the command completes, but 14271 * sdrunout can be called many times 14272 * before that happens. Since sdrunout 14273 * cannot tell if the un_retry_bp is 14274 * already in the transport, it could 14275 * end up calling scsi_transport() for 14276 * the un_retry_bp multiple times. 14277 * 14278 * Also: don't schedule the callback 14279 * if some other callback is already 14280 * pending. 14281 */ 14282 if (un->un_retry_statp == NULL) { 14283 /* 14284 * restore the kstat pointer to 14285 * keep kstat counts coherent 14286 * when we do retry the command. 14287 */ 14288 un->un_retry_statp = 14289 saved_statp; 14290 } 14291 14292 if ((un->un_startstop_timeid == NULL) && 14293 (un->un_retry_timeid == NULL) && 14294 (un->un_direct_priority_timeid == 14295 NULL)) { 14296 14297 un->un_retry_timeid = 14298 timeout( 14299 sd_start_retry_command, 14300 un, SD_RESTART_TIMEOUT); 14301 } 14302 goto exit; 14303 } 14304 14305 #else 14306 if (bp == immed_bp) { 14307 break; /* Just fail the command */ 14308 } 14309 #endif 14310 14311 /* Add the buf back to the head of the waitq */ 14312 bp->av_forw = un->un_waitq_headp; 14313 un->un_waitq_headp = bp; 14314 if (un->un_waitq_tailp == NULL) { 14315 un->un_waitq_tailp = bp; 14316 } 14317 goto exit; 14318 14319 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14320 /* 14321 * HBA DMA resource failure. Fail the command 14322 * and continue processing of the queues. 14323 */ 14324 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14325 "sd_start_cmds: " 14326 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14327 break; 14328 14329 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14330 /* 14331 * Note:x86: Partial DMA mapping not supported 14332 * for USCSI commands, and all the needed DMA 14333 * resources were not allocated. 14334 */ 14335 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14336 "sd_start_cmds: " 14337 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14338 break; 14339 14340 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14341 /* 14342 * Note:x86: Request cannot fit into CDB based 14343 * on lba and len. 14344 */ 14345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14346 "sd_start_cmds: " 14347 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14348 break; 14349 14350 default: 14351 /* Should NEVER get here! */ 14352 panic("scsi_initpkt error"); 14353 /*NOTREACHED*/ 14354 } 14355 14356 /* 14357 * Fatal error in allocating a scsi_pkt for this buf. 14358 * Update kstats & return the buf with an error code. 14359 * We must use sd_return_failed_command_no_restart() to 14360 * avoid a recursive call back into sd_start_cmds(). 14361 * However this also means that we must keep processing 14362 * the waitq here in order to avoid stalling. 14363 */ 14364 if (statp == kstat_waitq_to_runq) { 14365 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14366 } 14367 sd_return_failed_command_no_restart(un, bp, EIO); 14368 if (bp == immed_bp) { 14369 /* immed_bp is gone by now, so clear this */ 14370 immed_bp = NULL; 14371 } 14372 continue; 14373 } 14374 got_pkt: 14375 if (bp == immed_bp) { 14376 /* goto the head of the class.... */ 14377 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14378 } 14379 14380 un->un_ncmds_in_transport++; 14381 SD_UPDATE_KSTATS(un, statp, bp); 14382 14383 /* 14384 * Call scsi_transport() to send the command to the target. 14385 * According to SCSA architecture, we must drop the mutex here 14386 * before calling scsi_transport() in order to avoid deadlock. 14387 * Note that the scsi_pkt's completion routine can be executed 14388 * (from interrupt context) even before the call to 14389 * scsi_transport() returns. 14390 */ 14391 SD_TRACE(SD_LOG_IO_CORE, un, 14392 "sd_start_cmds: calling scsi_transport()\n"); 14393 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14394 14395 mutex_exit(SD_MUTEX(un)); 14396 rval = scsi_transport(xp->xb_pktp); 14397 mutex_enter(SD_MUTEX(un)); 14398 14399 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14400 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14401 14402 switch (rval) { 14403 case TRAN_ACCEPT: 14404 /* Clear this with every pkt accepted by the HBA */ 14405 un->un_tran_fatal_count = 0; 14406 break; /* Success; try the next cmd (if any) */ 14407 14408 case TRAN_BUSY: 14409 un->un_ncmds_in_transport--; 14410 ASSERT(un->un_ncmds_in_transport >= 0); 14411 14412 /* 14413 * Don't retry request sense, the sense data 14414 * is lost when another request is sent. 14415 * Free up the rqs buf and retry 14416 * the original failed cmd. Update kstat. 14417 */ 14418 if (bp == un->un_rqs_bp) { 14419 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14420 bp = sd_mark_rqs_idle(un, xp); 14421 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14422 NULL, NULL, EIO, un->un_busy_timeout / 500, 14423 kstat_waitq_enter); 14424 goto exit; 14425 } 14426 14427 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14428 /* 14429 * Free the DMA resources for the scsi_pkt. This will 14430 * allow mpxio to select another path the next time 14431 * we call scsi_transport() with this scsi_pkt. 14432 * See sdintr() for the rationalization behind this. 14433 */ 14434 if ((un->un_f_is_fibre == TRUE) && 14435 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14436 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14437 scsi_dmafree(xp->xb_pktp); 14438 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14439 } 14440 #endif 14441 14442 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14443 /* 14444 * Commands that are SD_PATH_DIRECT_PRIORITY 14445 * are for error recovery situations. These do 14446 * not use the normal command waitq, so if they 14447 * get a TRAN_BUSY we cannot put them back onto 14448 * the waitq for later retry. One possible 14449 * problem is that there could already be some 14450 * other command on un_retry_bp that is waiting 14451 * for this one to complete, so we would be 14452 * deadlocked if we put this command back onto 14453 * the waitq for later retry (since un_retry_bp 14454 * must complete before the driver gets back to 14455 * commands on the waitq). 14456 * 14457 * To avoid deadlock we must schedule a callback 14458 * that will restart this command after a set 14459 * interval. This should keep retrying for as 14460 * long as the underlying transport keeps 14461 * returning TRAN_BUSY (just like for other 14462 * commands). Use the same timeout interval as 14463 * for the ordinary TRAN_BUSY retry. 14464 */ 14465 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14466 "sd_start_cmds: scsi_transport() returned " 14467 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14468 14469 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14470 un->un_direct_priority_timeid = 14471 timeout(sd_start_direct_priority_command, 14472 bp, un->un_busy_timeout / 500); 14473 14474 goto exit; 14475 } 14476 14477 /* 14478 * For TRAN_BUSY, we want to reduce the throttle value, 14479 * unless we are retrying a command. 14480 */ 14481 if (bp != un->un_retry_bp) { 14482 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14483 } 14484 14485 /* 14486 * Set up the bp to be tried again 10 ms later. 14487 * Note:x86: Is there a timeout value in the sd_lun 14488 * for this condition? 14489 */ 14490 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14491 kstat_runq_back_to_waitq); 14492 goto exit; 14493 14494 case TRAN_FATAL_ERROR: 14495 un->un_tran_fatal_count++; 14496 /* FALLTHRU */ 14497 14498 case TRAN_BADPKT: 14499 default: 14500 un->un_ncmds_in_transport--; 14501 ASSERT(un->un_ncmds_in_transport >= 0); 14502 14503 /* 14504 * If this is our REQUEST SENSE command with a 14505 * transport error, we must get back the pointers 14506 * to the original buf, and mark the REQUEST 14507 * SENSE command as "available". 14508 */ 14509 if (bp == un->un_rqs_bp) { 14510 bp = sd_mark_rqs_idle(un, xp); 14511 xp = SD_GET_XBUF(bp); 14512 } else { 14513 /* 14514 * Legacy behavior: do not update transport 14515 * error count for request sense commands. 14516 */ 14517 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14518 } 14519 14520 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14521 sd_print_transport_rejected_message(un, xp, rval); 14522 14523 /* 14524 * This command will be terminated by SD driver due 14525 * to a fatal transport error. We should post 14526 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14527 * of "fail" for any command to indicate this 14528 * situation. 14529 */ 14530 if (xp->xb_ena > 0) { 14531 ASSERT(un->un_fm_private != NULL); 14532 sfip = un->un_fm_private; 14533 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14534 sd_ssc_extract_info(&sfip->fm_ssc, un, 14535 xp->xb_pktp, bp, xp); 14536 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14537 } 14538 14539 /* 14540 * We must use sd_return_failed_command_no_restart() to 14541 * avoid a recursive call back into sd_start_cmds(). 14542 * However this also means that we must keep processing 14543 * the waitq here in order to avoid stalling. 14544 */ 14545 sd_return_failed_command_no_restart(un, bp, EIO); 14546 14547 /* 14548 * Notify any threads waiting in sd_ddi_suspend() that 14549 * a command completion has occurred. 14550 */ 14551 if (un->un_state == SD_STATE_SUSPENDED) { 14552 cv_broadcast(&un->un_disk_busy_cv); 14553 } 14554 14555 if (bp == immed_bp) { 14556 /* immed_bp is gone by now, so clear this */ 14557 immed_bp = NULL; 14558 } 14559 break; 14560 } 14561 14562 } while (immed_bp == NULL); 14563 14564 exit: 14565 ASSERT(mutex_owned(SD_MUTEX(un))); 14566 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14567 } 14568 14569 14570 /* 14571 * Function: sd_return_command 14572 * 14573 * Description: Returns a command to its originator (with or without an 14574 * error). Also starts commands waiting to be transported 14575 * to the target. 14576 * 14577 * Context: May be called from interrupt, kernel, or timeout context 14578 */ 14579 14580 static void 14581 sd_return_command(struct sd_lun *un, struct buf *bp) 14582 { 14583 struct sd_xbuf *xp; 14584 struct scsi_pkt *pktp; 14585 struct sd_fm_internal *sfip; 14586 14587 ASSERT(bp != NULL); 14588 ASSERT(un != NULL); 14589 ASSERT(mutex_owned(SD_MUTEX(un))); 14590 ASSERT(bp != un->un_rqs_bp); 14591 xp = SD_GET_XBUF(bp); 14592 ASSERT(xp != NULL); 14593 14594 pktp = SD_GET_PKTP(bp); 14595 sfip = (struct sd_fm_internal *)un->un_fm_private; 14596 ASSERT(sfip != NULL); 14597 14598 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14599 14600 /* 14601 * Note: check for the "sdrestart failed" case. 14602 */ 14603 if ((un->un_partial_dma_supported == 1) && 14604 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14605 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14606 (xp->xb_pktp->pkt_resid == 0)) { 14607 14608 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14609 /* 14610 * Successfully set up next portion of cmd 14611 * transfer, try sending it 14612 */ 14613 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14614 NULL, NULL, 0, (clock_t)0, NULL); 14615 sd_start_cmds(un, NULL); 14616 return; /* Note:x86: need a return here? */ 14617 } 14618 } 14619 14620 /* 14621 * If this is the failfast bp, clear it from un_failfast_bp. This 14622 * can happen if upon being re-tried the failfast bp either 14623 * succeeded or encountered another error (possibly even a different 14624 * error than the one that precipitated the failfast state, but in 14625 * that case it would have had to exhaust retries as well). Regardless, 14626 * this should not occur whenever the instance is in the active 14627 * failfast state. 14628 */ 14629 if (bp == un->un_failfast_bp) { 14630 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14631 un->un_failfast_bp = NULL; 14632 } 14633 14634 /* 14635 * Clear the failfast state upon successful completion of ANY cmd. 14636 */ 14637 if (bp->b_error == 0) { 14638 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14639 /* 14640 * If this is a successful command, but used to be retried, 14641 * we will take it as a recovered command and post an 14642 * ereport with driver-assessment of "recovered". 14643 */ 14644 if (xp->xb_ena > 0) { 14645 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14646 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14647 } 14648 } else { 14649 /* 14650 * If this is a failed non-USCSI command we will post an 14651 * ereport with driver-assessment set accordingly("fail" or 14652 * "fatal"). 14653 */ 14654 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 14655 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14656 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14657 } 14658 } 14659 14660 /* 14661 * This is used if the command was retried one or more times. Show that 14662 * we are done with it, and allow processing of the waitq to resume. 14663 */ 14664 if (bp == un->un_retry_bp) { 14665 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14666 "sd_return_command: un:0x%p: " 14667 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14668 un->un_retry_bp = NULL; 14669 un->un_retry_statp = NULL; 14670 } 14671 14672 SD_UPDATE_RDWR_STATS(un, bp); 14673 SD_UPDATE_PARTITION_STATS(un, bp); 14674 14675 switch (un->un_state) { 14676 case SD_STATE_SUSPENDED: 14677 /* 14678 * Notify any threads waiting in sd_ddi_suspend() that 14679 * a command completion has occurred. 14680 */ 14681 cv_broadcast(&un->un_disk_busy_cv); 14682 break; 14683 default: 14684 sd_start_cmds(un, NULL); 14685 break; 14686 } 14687 14688 /* Return this command up the iodone chain to its originator. */ 14689 mutex_exit(SD_MUTEX(un)); 14690 14691 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14692 xp->xb_pktp = NULL; 14693 14694 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14695 14696 ASSERT(!mutex_owned(SD_MUTEX(un))); 14697 mutex_enter(SD_MUTEX(un)); 14698 14699 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14700 } 14701 14702 14703 /* 14704 * Function: sd_return_failed_command 14705 * 14706 * Description: Command completion when an error occurred. 14707 * 14708 * Context: May be called from interrupt context 14709 */ 14710 14711 static void 14712 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14713 { 14714 ASSERT(bp != NULL); 14715 ASSERT(un != NULL); 14716 ASSERT(mutex_owned(SD_MUTEX(un))); 14717 14718 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14719 "sd_return_failed_command: entry\n"); 14720 14721 /* 14722 * b_resid could already be nonzero due to a partial data 14723 * transfer, so do not change it here. 14724 */ 14725 SD_BIOERROR(bp, errcode); 14726 14727 sd_return_command(un, bp); 14728 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14729 "sd_return_failed_command: exit\n"); 14730 } 14731 14732 14733 /* 14734 * Function: sd_return_failed_command_no_restart 14735 * 14736 * Description: Same as sd_return_failed_command, but ensures that no 14737 * call back into sd_start_cmds will be issued. 14738 * 14739 * Context: May be called from interrupt context 14740 */ 14741 14742 static void 14743 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14744 int errcode) 14745 { 14746 struct sd_xbuf *xp; 14747 14748 ASSERT(bp != NULL); 14749 ASSERT(un != NULL); 14750 ASSERT(mutex_owned(SD_MUTEX(un))); 14751 xp = SD_GET_XBUF(bp); 14752 ASSERT(xp != NULL); 14753 ASSERT(errcode != 0); 14754 14755 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14756 "sd_return_failed_command_no_restart: entry\n"); 14757 14758 /* 14759 * b_resid could already be nonzero due to a partial data 14760 * transfer, so do not change it here. 14761 */ 14762 SD_BIOERROR(bp, errcode); 14763 14764 /* 14765 * If this is the failfast bp, clear it. This can happen if the 14766 * failfast bp encounterd a fatal error when we attempted to 14767 * re-try it (such as a scsi_transport(9F) failure). However 14768 * we should NOT be in an active failfast state if the failfast 14769 * bp is not NULL. 14770 */ 14771 if (bp == un->un_failfast_bp) { 14772 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14773 un->un_failfast_bp = NULL; 14774 } 14775 14776 if (bp == un->un_retry_bp) { 14777 /* 14778 * This command was retried one or more times. Show that we are 14779 * done with it, and allow processing of the waitq to resume. 14780 */ 14781 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14782 "sd_return_failed_command_no_restart: " 14783 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14784 un->un_retry_bp = NULL; 14785 un->un_retry_statp = NULL; 14786 } 14787 14788 SD_UPDATE_RDWR_STATS(un, bp); 14789 SD_UPDATE_PARTITION_STATS(un, bp); 14790 14791 mutex_exit(SD_MUTEX(un)); 14792 14793 if (xp->xb_pktp != NULL) { 14794 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14795 xp->xb_pktp = NULL; 14796 } 14797 14798 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14799 14800 mutex_enter(SD_MUTEX(un)); 14801 14802 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14803 "sd_return_failed_command_no_restart: exit\n"); 14804 } 14805 14806 14807 /* 14808 * Function: sd_retry_command 14809 * 14810 * Description: queue up a command for retry, or (optionally) fail it 14811 * if retry counts are exhausted. 14812 * 14813 * Arguments: un - Pointer to the sd_lun struct for the target. 14814 * 14815 * bp - Pointer to the buf for the command to be retried. 14816 * 14817 * retry_check_flag - Flag to see which (if any) of the retry 14818 * counts should be decremented/checked. If the indicated 14819 * retry count is exhausted, then the command will not be 14820 * retried; it will be failed instead. This should use a 14821 * value equal to one of the following: 14822 * 14823 * SD_RETRIES_NOCHECK 14824 * SD_RESD_RETRIES_STANDARD 14825 * SD_RETRIES_VICTIM 14826 * 14827 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14828 * if the check should be made to see of FLAG_ISOLATE is set 14829 * in the pkt. If FLAG_ISOLATE is set, then the command is 14830 * not retried, it is simply failed. 14831 * 14832 * user_funcp - Ptr to function to call before dispatching the 14833 * command. May be NULL if no action needs to be performed. 14834 * (Primarily intended for printing messages.) 14835 * 14836 * user_arg - Optional argument to be passed along to 14837 * the user_funcp call. 14838 * 14839 * failure_code - errno return code to set in the bp if the 14840 * command is going to be failed. 14841 * 14842 * retry_delay - Retry delay interval in (clock_t) units. May 14843 * be zero which indicates that the retry should be retried 14844 * immediately (ie, without an intervening delay). 14845 * 14846 * statp - Ptr to kstat function to be updated if the command 14847 * is queued for a delayed retry. May be NULL if no kstat 14848 * update is desired. 14849 * 14850 * Context: May be called from interrupt context. 14851 */ 14852 14853 static void 14854 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14855 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14856 code), void *user_arg, int failure_code, clock_t retry_delay, 14857 void (*statp)(kstat_io_t *)) 14858 { 14859 struct sd_xbuf *xp; 14860 struct scsi_pkt *pktp; 14861 struct sd_fm_internal *sfip; 14862 14863 ASSERT(un != NULL); 14864 ASSERT(mutex_owned(SD_MUTEX(un))); 14865 ASSERT(bp != NULL); 14866 xp = SD_GET_XBUF(bp); 14867 ASSERT(xp != NULL); 14868 pktp = SD_GET_PKTP(bp); 14869 ASSERT(pktp != NULL); 14870 14871 sfip = (struct sd_fm_internal *)un->un_fm_private; 14872 ASSERT(sfip != NULL); 14873 14874 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14875 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14876 14877 /* 14878 * If we are syncing or dumping, fail the command to avoid 14879 * recursively calling back into scsi_transport(). 14880 */ 14881 if (ddi_in_panic()) { 14882 goto fail_command_no_log; 14883 } 14884 14885 /* 14886 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14887 * log an error and fail the command. 14888 */ 14889 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14890 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14891 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14892 sd_dump_memory(un, SD_LOG_IO, "CDB", 14893 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14894 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14895 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14896 goto fail_command; 14897 } 14898 14899 /* 14900 * If we are suspended, then put the command onto head of the 14901 * wait queue since we don't want to start more commands, and 14902 * clear the un_retry_bp. Next time when we are resumed, will 14903 * handle the command in the wait queue. 14904 */ 14905 switch (un->un_state) { 14906 case SD_STATE_SUSPENDED: 14907 case SD_STATE_DUMPING: 14908 bp->av_forw = un->un_waitq_headp; 14909 un->un_waitq_headp = bp; 14910 if (un->un_waitq_tailp == NULL) { 14911 un->un_waitq_tailp = bp; 14912 } 14913 if (bp == un->un_retry_bp) { 14914 un->un_retry_bp = NULL; 14915 un->un_retry_statp = NULL; 14916 } 14917 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14918 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14919 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14920 return; 14921 default: 14922 break; 14923 } 14924 14925 /* 14926 * If the caller wants us to check FLAG_ISOLATE, then see if that 14927 * is set; if it is then we do not want to retry the command. 14928 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14929 */ 14930 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14931 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14932 goto fail_command; 14933 } 14934 } 14935 14936 14937 /* 14938 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14939 * command timeout or a selection timeout has occurred. This means 14940 * that we were unable to establish an kind of communication with 14941 * the target, and subsequent retries and/or commands are likely 14942 * to encounter similar results and take a long time to complete. 14943 * 14944 * If this is a failfast error condition, we need to update the 14945 * failfast state, even if this bp does not have B_FAILFAST set. 14946 */ 14947 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14948 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14949 ASSERT(un->un_failfast_bp == NULL); 14950 /* 14951 * If we are already in the active failfast state, and 14952 * another failfast error condition has been detected, 14953 * then fail this command if it has B_FAILFAST set. 14954 * If B_FAILFAST is clear, then maintain the legacy 14955 * behavior of retrying heroically, even tho this will 14956 * take a lot more time to fail the command. 14957 */ 14958 if (bp->b_flags & B_FAILFAST) { 14959 goto fail_command; 14960 } 14961 } else { 14962 /* 14963 * We're not in the active failfast state, but we 14964 * have a failfast error condition, so we must begin 14965 * transition to the next state. We do this regardless 14966 * of whether or not this bp has B_FAILFAST set. 14967 */ 14968 if (un->un_failfast_bp == NULL) { 14969 /* 14970 * This is the first bp to meet a failfast 14971 * condition so save it on un_failfast_bp & 14972 * do normal retry processing. Do not enter 14973 * active failfast state yet. This marks 14974 * entry into the "failfast pending" state. 14975 */ 14976 un->un_failfast_bp = bp; 14977 14978 } else if (un->un_failfast_bp == bp) { 14979 /* 14980 * This is the second time *this* bp has 14981 * encountered a failfast error condition, 14982 * so enter active failfast state & flush 14983 * queues as appropriate. 14984 */ 14985 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14986 un->un_failfast_bp = NULL; 14987 sd_failfast_flushq(un); 14988 14989 /* 14990 * Fail this bp now if B_FAILFAST set; 14991 * otherwise continue with retries. (It would 14992 * be pretty ironic if this bp succeeded on a 14993 * subsequent retry after we just flushed all 14994 * the queues). 14995 */ 14996 if (bp->b_flags & B_FAILFAST) { 14997 goto fail_command; 14998 } 14999 15000 #if !defined(lint) && !defined(__lint) 15001 } else { 15002 /* 15003 * If neither of the preceeding conditionals 15004 * was true, it means that there is some 15005 * *other* bp that has met an inital failfast 15006 * condition and is currently either being 15007 * retried or is waiting to be retried. In 15008 * that case we should perform normal retry 15009 * processing on *this* bp, since there is a 15010 * chance that the current failfast condition 15011 * is transient and recoverable. If that does 15012 * not turn out to be the case, then retries 15013 * will be cleared when the wait queue is 15014 * flushed anyway. 15015 */ 15016 #endif 15017 } 15018 } 15019 } else { 15020 /* 15021 * SD_RETRIES_FAILFAST is clear, which indicates that we 15022 * likely were able to at least establish some level of 15023 * communication with the target and subsequent commands 15024 * and/or retries are likely to get through to the target, 15025 * In this case we want to be aggressive about clearing 15026 * the failfast state. Note that this does not affect 15027 * the "failfast pending" condition. 15028 */ 15029 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15030 } 15031 15032 15033 /* 15034 * Check the specified retry count to see if we can still do 15035 * any retries with this pkt before we should fail it. 15036 */ 15037 switch (retry_check_flag & SD_RETRIES_MASK) { 15038 case SD_RETRIES_VICTIM: 15039 /* 15040 * Check the victim retry count. If exhausted, then fall 15041 * thru & check against the standard retry count. 15042 */ 15043 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15044 /* Increment count & proceed with the retry */ 15045 xp->xb_victim_retry_count++; 15046 break; 15047 } 15048 /* Victim retries exhausted, fall back to std. retries... */ 15049 /* FALLTHRU */ 15050 15051 case SD_RETRIES_STANDARD: 15052 if (xp->xb_retry_count >= un->un_retry_count) { 15053 /* Retries exhausted, fail the command */ 15054 SD_TRACE(SD_LOG_IO_CORE, un, 15055 "sd_retry_command: retries exhausted!\n"); 15056 /* 15057 * update b_resid for failed SCMD_READ & SCMD_WRITE 15058 * commands with nonzero pkt_resid. 15059 */ 15060 if ((pktp->pkt_reason == CMD_CMPLT) && 15061 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15062 (pktp->pkt_resid != 0)) { 15063 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15064 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15065 SD_UPDATE_B_RESID(bp, pktp); 15066 } 15067 } 15068 goto fail_command; 15069 } 15070 xp->xb_retry_count++; 15071 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15072 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15073 break; 15074 15075 case SD_RETRIES_UA: 15076 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15077 /* Retries exhausted, fail the command */ 15078 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15079 "Unit Attention retries exhausted. " 15080 "Check the target.\n"); 15081 goto fail_command; 15082 } 15083 xp->xb_ua_retry_count++; 15084 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15085 "sd_retry_command: retry count:%d\n", 15086 xp->xb_ua_retry_count); 15087 break; 15088 15089 case SD_RETRIES_BUSY: 15090 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15091 /* Retries exhausted, fail the command */ 15092 SD_TRACE(SD_LOG_IO_CORE, un, 15093 "sd_retry_command: retries exhausted!\n"); 15094 goto fail_command; 15095 } 15096 xp->xb_retry_count++; 15097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15098 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15099 break; 15100 15101 case SD_RETRIES_NOCHECK: 15102 default: 15103 /* No retry count to check. Just proceed with the retry */ 15104 break; 15105 } 15106 15107 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15108 15109 /* 15110 * If this is a non-USCSI command being retried 15111 * during execution last time, we should post an ereport with 15112 * driver-assessment of the value "retry". 15113 * For partial DMA, request sense and STATUS_QFULL, there are no 15114 * hardware errors, we bypass ereport posting. 15115 */ 15116 if (failure_code != 0) { 15117 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15118 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15119 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15120 } 15121 } 15122 15123 /* 15124 * If we were given a zero timeout, we must attempt to retry the 15125 * command immediately (ie, without a delay). 15126 */ 15127 if (retry_delay == 0) { 15128 /* 15129 * Check some limiting conditions to see if we can actually 15130 * do the immediate retry. If we cannot, then we must 15131 * fall back to queueing up a delayed retry. 15132 */ 15133 if (un->un_ncmds_in_transport >= un->un_throttle) { 15134 /* 15135 * We are at the throttle limit for the target, 15136 * fall back to delayed retry. 15137 */ 15138 retry_delay = un->un_busy_timeout; 15139 statp = kstat_waitq_enter; 15140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15141 "sd_retry_command: immed. retry hit " 15142 "throttle!\n"); 15143 } else { 15144 /* 15145 * We're clear to proceed with the immediate retry. 15146 * First call the user-provided function (if any) 15147 */ 15148 if (user_funcp != NULL) { 15149 (*user_funcp)(un, bp, user_arg, 15150 SD_IMMEDIATE_RETRY_ISSUED); 15151 #ifdef __lock_lint 15152 sd_print_incomplete_msg(un, bp, user_arg, 15153 SD_IMMEDIATE_RETRY_ISSUED); 15154 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15155 SD_IMMEDIATE_RETRY_ISSUED); 15156 sd_print_sense_failed_msg(un, bp, user_arg, 15157 SD_IMMEDIATE_RETRY_ISSUED); 15158 #endif 15159 } 15160 15161 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15162 "sd_retry_command: issuing immediate retry\n"); 15163 15164 /* 15165 * Call sd_start_cmds() to transport the command to 15166 * the target. 15167 */ 15168 sd_start_cmds(un, bp); 15169 15170 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15171 "sd_retry_command exit\n"); 15172 return; 15173 } 15174 } 15175 15176 /* 15177 * Set up to retry the command after a delay. 15178 * First call the user-provided function (if any) 15179 */ 15180 if (user_funcp != NULL) { 15181 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15182 } 15183 15184 sd_set_retry_bp(un, bp, retry_delay, statp); 15185 15186 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15187 return; 15188 15189 fail_command: 15190 15191 if (user_funcp != NULL) { 15192 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15193 } 15194 15195 fail_command_no_log: 15196 15197 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15198 "sd_retry_command: returning failed command\n"); 15199 15200 sd_return_failed_command(un, bp, failure_code); 15201 15202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15203 } 15204 15205 15206 /* 15207 * Function: sd_set_retry_bp 15208 * 15209 * Description: Set up the given bp for retry. 15210 * 15211 * Arguments: un - ptr to associated softstate 15212 * bp - ptr to buf(9S) for the command 15213 * retry_delay - time interval before issuing retry (may be 0) 15214 * statp - optional pointer to kstat function 15215 * 15216 * Context: May be called under interrupt context 15217 */ 15218 15219 static void 15220 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15221 void (*statp)(kstat_io_t *)) 15222 { 15223 ASSERT(un != NULL); 15224 ASSERT(mutex_owned(SD_MUTEX(un))); 15225 ASSERT(bp != NULL); 15226 15227 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15228 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15229 15230 /* 15231 * Indicate that the command is being retried. This will not allow any 15232 * other commands on the wait queue to be transported to the target 15233 * until this command has been completed (success or failure). The 15234 * "retry command" is not transported to the target until the given 15235 * time delay expires, unless the user specified a 0 retry_delay. 15236 * 15237 * Note: the timeout(9F) callback routine is what actually calls 15238 * sd_start_cmds() to transport the command, with the exception of a 15239 * zero retry_delay. The only current implementor of a zero retry delay 15240 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15241 */ 15242 if (un->un_retry_bp == NULL) { 15243 ASSERT(un->un_retry_statp == NULL); 15244 un->un_retry_bp = bp; 15245 15246 /* 15247 * If the user has not specified a delay the command should 15248 * be queued and no timeout should be scheduled. 15249 */ 15250 if (retry_delay == 0) { 15251 /* 15252 * Save the kstat pointer that will be used in the 15253 * call to SD_UPDATE_KSTATS() below, so that 15254 * sd_start_cmds() can correctly decrement the waitq 15255 * count when it is time to transport this command. 15256 */ 15257 un->un_retry_statp = statp; 15258 goto done; 15259 } 15260 } 15261 15262 if (un->un_retry_bp == bp) { 15263 /* 15264 * Save the kstat pointer that will be used in the call to 15265 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15266 * correctly decrement the waitq count when it is time to 15267 * transport this command. 15268 */ 15269 un->un_retry_statp = statp; 15270 15271 /* 15272 * Schedule a timeout if: 15273 * 1) The user has specified a delay. 15274 * 2) There is not a START_STOP_UNIT callback pending. 15275 * 15276 * If no delay has been specified, then it is up to the caller 15277 * to ensure that IO processing continues without stalling. 15278 * Effectively, this means that the caller will issue the 15279 * required call to sd_start_cmds(). The START_STOP_UNIT 15280 * callback does this after the START STOP UNIT command has 15281 * completed. In either of these cases we should not schedule 15282 * a timeout callback here. Also don't schedule the timeout if 15283 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15284 */ 15285 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15286 (un->un_direct_priority_timeid == NULL)) { 15287 un->un_retry_timeid = 15288 timeout(sd_start_retry_command, un, retry_delay); 15289 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15290 "sd_set_retry_bp: setting timeout: un: 0x%p" 15291 " bp:0x%p un_retry_timeid:0x%p\n", 15292 un, bp, un->un_retry_timeid); 15293 } 15294 } else { 15295 /* 15296 * We only get in here if there is already another command 15297 * waiting to be retried. In this case, we just put the 15298 * given command onto the wait queue, so it can be transported 15299 * after the current retry command has completed. 15300 * 15301 * Also we have to make sure that if the command at the head 15302 * of the wait queue is the un_failfast_bp, that we do not 15303 * put ahead of it any other commands that are to be retried. 15304 */ 15305 if ((un->un_failfast_bp != NULL) && 15306 (un->un_failfast_bp == un->un_waitq_headp)) { 15307 /* 15308 * Enqueue this command AFTER the first command on 15309 * the wait queue (which is also un_failfast_bp). 15310 */ 15311 bp->av_forw = un->un_waitq_headp->av_forw; 15312 un->un_waitq_headp->av_forw = bp; 15313 if (un->un_waitq_headp == un->un_waitq_tailp) { 15314 un->un_waitq_tailp = bp; 15315 } 15316 } else { 15317 /* Enqueue this command at the head of the waitq. */ 15318 bp->av_forw = un->un_waitq_headp; 15319 un->un_waitq_headp = bp; 15320 if (un->un_waitq_tailp == NULL) { 15321 un->un_waitq_tailp = bp; 15322 } 15323 } 15324 15325 if (statp == NULL) { 15326 statp = kstat_waitq_enter; 15327 } 15328 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15329 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15330 } 15331 15332 done: 15333 if (statp != NULL) { 15334 SD_UPDATE_KSTATS(un, statp, bp); 15335 } 15336 15337 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15338 "sd_set_retry_bp: exit un:0x%p\n", un); 15339 } 15340 15341 15342 /* 15343 * Function: sd_start_retry_command 15344 * 15345 * Description: Start the command that has been waiting on the target's 15346 * retry queue. Called from timeout(9F) context after the 15347 * retry delay interval has expired. 15348 * 15349 * Arguments: arg - pointer to associated softstate for the device. 15350 * 15351 * Context: timeout(9F) thread context. May not sleep. 15352 */ 15353 15354 static void 15355 sd_start_retry_command(void *arg) 15356 { 15357 struct sd_lun *un = arg; 15358 15359 ASSERT(un != NULL); 15360 ASSERT(!mutex_owned(SD_MUTEX(un))); 15361 15362 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15363 "sd_start_retry_command: entry\n"); 15364 15365 mutex_enter(SD_MUTEX(un)); 15366 15367 un->un_retry_timeid = NULL; 15368 15369 if (un->un_retry_bp != NULL) { 15370 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15371 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15372 un, un->un_retry_bp); 15373 sd_start_cmds(un, un->un_retry_bp); 15374 } 15375 15376 mutex_exit(SD_MUTEX(un)); 15377 15378 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15379 "sd_start_retry_command: exit\n"); 15380 } 15381 15382 15383 /* 15384 * Function: sd_start_direct_priority_command 15385 * 15386 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15387 * received TRAN_BUSY when we called scsi_transport() to send it 15388 * to the underlying HBA. This function is called from timeout(9F) 15389 * context after the delay interval has expired. 15390 * 15391 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15392 * 15393 * Context: timeout(9F) thread context. May not sleep. 15394 */ 15395 15396 static void 15397 sd_start_direct_priority_command(void *arg) 15398 { 15399 struct buf *priority_bp = arg; 15400 struct sd_lun *un; 15401 15402 ASSERT(priority_bp != NULL); 15403 un = SD_GET_UN(priority_bp); 15404 ASSERT(un != NULL); 15405 ASSERT(!mutex_owned(SD_MUTEX(un))); 15406 15407 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15408 "sd_start_direct_priority_command: entry\n"); 15409 15410 mutex_enter(SD_MUTEX(un)); 15411 un->un_direct_priority_timeid = NULL; 15412 sd_start_cmds(un, priority_bp); 15413 mutex_exit(SD_MUTEX(un)); 15414 15415 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15416 "sd_start_direct_priority_command: exit\n"); 15417 } 15418 15419 15420 /* 15421 * Function: sd_send_request_sense_command 15422 * 15423 * Description: Sends a REQUEST SENSE command to the target 15424 * 15425 * Context: May be called from interrupt context. 15426 */ 15427 15428 static void 15429 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15430 struct scsi_pkt *pktp) 15431 { 15432 ASSERT(bp != NULL); 15433 ASSERT(un != NULL); 15434 ASSERT(mutex_owned(SD_MUTEX(un))); 15435 15436 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15437 "entry: buf:0x%p\n", bp); 15438 15439 /* 15440 * If we are syncing or dumping, then fail the command to avoid a 15441 * recursive callback into scsi_transport(). Also fail the command 15442 * if we are suspended (legacy behavior). 15443 */ 15444 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15445 (un->un_state == SD_STATE_DUMPING)) { 15446 sd_return_failed_command(un, bp, EIO); 15447 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15448 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15449 return; 15450 } 15451 15452 /* 15453 * Retry the failed command and don't issue the request sense if: 15454 * 1) the sense buf is busy 15455 * 2) we have 1 or more outstanding commands on the target 15456 * (the sense data will be cleared or invalidated any way) 15457 * 15458 * Note: There could be an issue with not checking a retry limit here, 15459 * the problem is determining which retry limit to check. 15460 */ 15461 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15462 /* Don't retry if the command is flagged as non-retryable */ 15463 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15464 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15465 NULL, NULL, 0, un->un_busy_timeout, 15466 kstat_waitq_enter); 15467 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15468 "sd_send_request_sense_command: " 15469 "at full throttle, retrying exit\n"); 15470 } else { 15471 sd_return_failed_command(un, bp, EIO); 15472 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15473 "sd_send_request_sense_command: " 15474 "at full throttle, non-retryable exit\n"); 15475 } 15476 return; 15477 } 15478 15479 sd_mark_rqs_busy(un, bp); 15480 sd_start_cmds(un, un->un_rqs_bp); 15481 15482 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15483 "sd_send_request_sense_command: exit\n"); 15484 } 15485 15486 15487 /* 15488 * Function: sd_mark_rqs_busy 15489 * 15490 * Description: Indicate that the request sense bp for this instance is 15491 * in use. 15492 * 15493 * Context: May be called under interrupt context 15494 */ 15495 15496 static void 15497 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15498 { 15499 struct sd_xbuf *sense_xp; 15500 15501 ASSERT(un != NULL); 15502 ASSERT(bp != NULL); 15503 ASSERT(mutex_owned(SD_MUTEX(un))); 15504 ASSERT(un->un_sense_isbusy == 0); 15505 15506 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15507 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15508 15509 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15510 ASSERT(sense_xp != NULL); 15511 15512 SD_INFO(SD_LOG_IO, un, 15513 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15514 15515 ASSERT(sense_xp->xb_pktp != NULL); 15516 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15517 == (FLAG_SENSING | FLAG_HEAD)); 15518 15519 un->un_sense_isbusy = 1; 15520 un->un_rqs_bp->b_resid = 0; 15521 sense_xp->xb_pktp->pkt_resid = 0; 15522 sense_xp->xb_pktp->pkt_reason = 0; 15523 15524 /* So we can get back the bp at interrupt time! */ 15525 sense_xp->xb_sense_bp = bp; 15526 15527 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15528 15529 /* 15530 * Mark this buf as awaiting sense data. (This is already set in 15531 * the pkt_flags for the RQS packet.) 15532 */ 15533 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15534 15535 /* Request sense down same path */ 15536 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15537 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15538 sense_xp->xb_pktp->pkt_path_instance = 15539 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15540 15541 sense_xp->xb_retry_count = 0; 15542 sense_xp->xb_victim_retry_count = 0; 15543 sense_xp->xb_ua_retry_count = 0; 15544 sense_xp->xb_nr_retry_count = 0; 15545 sense_xp->xb_dma_resid = 0; 15546 15547 /* Clean up the fields for auto-request sense */ 15548 sense_xp->xb_sense_status = 0; 15549 sense_xp->xb_sense_state = 0; 15550 sense_xp->xb_sense_resid = 0; 15551 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15552 15553 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15554 } 15555 15556 15557 /* 15558 * Function: sd_mark_rqs_idle 15559 * 15560 * Description: SD_MUTEX must be held continuously through this routine 15561 * to prevent reuse of the rqs struct before the caller can 15562 * complete it's processing. 15563 * 15564 * Return Code: Pointer to the RQS buf 15565 * 15566 * Context: May be called under interrupt context 15567 */ 15568 15569 static struct buf * 15570 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15571 { 15572 struct buf *bp; 15573 ASSERT(un != NULL); 15574 ASSERT(sense_xp != NULL); 15575 ASSERT(mutex_owned(SD_MUTEX(un))); 15576 ASSERT(un->un_sense_isbusy != 0); 15577 15578 un->un_sense_isbusy = 0; 15579 bp = sense_xp->xb_sense_bp; 15580 sense_xp->xb_sense_bp = NULL; 15581 15582 /* This pkt is no longer interested in getting sense data */ 15583 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15584 15585 return (bp); 15586 } 15587 15588 15589 15590 /* 15591 * Function: sd_alloc_rqs 15592 * 15593 * Description: Set up the unit to receive auto request sense data 15594 * 15595 * Return Code: DDI_SUCCESS or DDI_FAILURE 15596 * 15597 * Context: Called under attach(9E) context 15598 */ 15599 15600 static int 15601 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15602 { 15603 struct sd_xbuf *xp; 15604 15605 ASSERT(un != NULL); 15606 ASSERT(!mutex_owned(SD_MUTEX(un))); 15607 ASSERT(un->un_rqs_bp == NULL); 15608 ASSERT(un->un_rqs_pktp == NULL); 15609 15610 /* 15611 * First allocate the required buf and scsi_pkt structs, then set up 15612 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15613 */ 15614 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15615 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15616 if (un->un_rqs_bp == NULL) { 15617 return (DDI_FAILURE); 15618 } 15619 15620 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15621 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15622 15623 if (un->un_rqs_pktp == NULL) { 15624 sd_free_rqs(un); 15625 return (DDI_FAILURE); 15626 } 15627 15628 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15629 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15630 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15631 15632 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15633 15634 /* Set up the other needed members in the ARQ scsi_pkt. */ 15635 un->un_rqs_pktp->pkt_comp = sdintr; 15636 un->un_rqs_pktp->pkt_time = sd_io_time; 15637 un->un_rqs_pktp->pkt_flags |= 15638 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15639 15640 /* 15641 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15642 * provide any intpkt, destroypkt routines as we take care of 15643 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15644 */ 15645 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15646 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15647 xp->xb_pktp = un->un_rqs_pktp; 15648 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15649 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15650 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15651 15652 /* 15653 * Save the pointer to the request sense private bp so it can 15654 * be retrieved in sdintr. 15655 */ 15656 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15657 ASSERT(un->un_rqs_bp->b_private == xp); 15658 15659 /* 15660 * See if the HBA supports auto-request sense for the specified 15661 * target/lun. If it does, then try to enable it (if not already 15662 * enabled). 15663 * 15664 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15665 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15666 * return success. However, in both of these cases ARQ is always 15667 * enabled and scsi_ifgetcap will always return true. The best approach 15668 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15669 * 15670 * The 3rd case is the HBA (adp) always return enabled on 15671 * scsi_ifgetgetcap even when it's not enable, the best approach 15672 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15673 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15674 */ 15675 15676 if (un->un_f_is_fibre == TRUE) { 15677 un->un_f_arq_enabled = TRUE; 15678 } else { 15679 #if defined(__i386) || defined(__amd64) 15680 /* 15681 * Circumvent the Adaptec bug, remove this code when 15682 * the bug is fixed 15683 */ 15684 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15685 #endif 15686 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15687 case 0: 15688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15689 "sd_alloc_rqs: HBA supports ARQ\n"); 15690 /* 15691 * ARQ is supported by this HBA but currently is not 15692 * enabled. Attempt to enable it and if successful then 15693 * mark this instance as ARQ enabled. 15694 */ 15695 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15696 == 1) { 15697 /* Successfully enabled ARQ in the HBA */ 15698 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15699 "sd_alloc_rqs: ARQ enabled\n"); 15700 un->un_f_arq_enabled = TRUE; 15701 } else { 15702 /* Could not enable ARQ in the HBA */ 15703 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15704 "sd_alloc_rqs: failed ARQ enable\n"); 15705 un->un_f_arq_enabled = FALSE; 15706 } 15707 break; 15708 case 1: 15709 /* 15710 * ARQ is supported by this HBA and is already enabled. 15711 * Just mark ARQ as enabled for this instance. 15712 */ 15713 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15714 "sd_alloc_rqs: ARQ already enabled\n"); 15715 un->un_f_arq_enabled = TRUE; 15716 break; 15717 default: 15718 /* 15719 * ARQ is not supported by this HBA; disable it for this 15720 * instance. 15721 */ 15722 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15723 "sd_alloc_rqs: HBA does not support ARQ\n"); 15724 un->un_f_arq_enabled = FALSE; 15725 break; 15726 } 15727 } 15728 15729 return (DDI_SUCCESS); 15730 } 15731 15732 15733 /* 15734 * Function: sd_free_rqs 15735 * 15736 * Description: Cleanup for the pre-instance RQS command. 15737 * 15738 * Context: Kernel thread context 15739 */ 15740 15741 static void 15742 sd_free_rqs(struct sd_lun *un) 15743 { 15744 ASSERT(un != NULL); 15745 15746 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15747 15748 /* 15749 * If consistent memory is bound to a scsi_pkt, the pkt 15750 * has to be destroyed *before* freeing the consistent memory. 15751 * Don't change the sequence of this operations. 15752 * scsi_destroy_pkt() might access memory, which isn't allowed, 15753 * after it was freed in scsi_free_consistent_buf(). 15754 */ 15755 if (un->un_rqs_pktp != NULL) { 15756 scsi_destroy_pkt(un->un_rqs_pktp); 15757 un->un_rqs_pktp = NULL; 15758 } 15759 15760 if (un->un_rqs_bp != NULL) { 15761 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15762 if (xp != NULL) { 15763 kmem_free(xp, sizeof (struct sd_xbuf)); 15764 } 15765 scsi_free_consistent_buf(un->un_rqs_bp); 15766 un->un_rqs_bp = NULL; 15767 } 15768 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15769 } 15770 15771 15772 15773 /* 15774 * Function: sd_reduce_throttle 15775 * 15776 * Description: Reduces the maximum # of outstanding commands on a 15777 * target to the current number of outstanding commands. 15778 * Queues a tiemout(9F) callback to restore the limit 15779 * after a specified interval has elapsed. 15780 * Typically used when we get a TRAN_BUSY return code 15781 * back from scsi_transport(). 15782 * 15783 * Arguments: un - ptr to the sd_lun softstate struct 15784 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15785 * 15786 * Context: May be called from interrupt context 15787 */ 15788 15789 static void 15790 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15791 { 15792 ASSERT(un != NULL); 15793 ASSERT(mutex_owned(SD_MUTEX(un))); 15794 ASSERT(un->un_ncmds_in_transport >= 0); 15795 15796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15797 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15798 un, un->un_throttle, un->un_ncmds_in_transport); 15799 15800 if (un->un_throttle > 1) { 15801 if (un->un_f_use_adaptive_throttle == TRUE) { 15802 switch (throttle_type) { 15803 case SD_THROTTLE_TRAN_BUSY: 15804 if (un->un_busy_throttle == 0) { 15805 un->un_busy_throttle = un->un_throttle; 15806 } 15807 break; 15808 case SD_THROTTLE_QFULL: 15809 un->un_busy_throttle = 0; 15810 break; 15811 default: 15812 ASSERT(FALSE); 15813 } 15814 15815 if (un->un_ncmds_in_transport > 0) { 15816 un->un_throttle = un->un_ncmds_in_transport; 15817 } 15818 15819 } else { 15820 if (un->un_ncmds_in_transport == 0) { 15821 un->un_throttle = 1; 15822 } else { 15823 un->un_throttle = un->un_ncmds_in_transport; 15824 } 15825 } 15826 } 15827 15828 /* Reschedule the timeout if none is currently active */ 15829 if (un->un_reset_throttle_timeid == NULL) { 15830 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15831 un, SD_THROTTLE_RESET_INTERVAL); 15832 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15833 "sd_reduce_throttle: timeout scheduled!\n"); 15834 } 15835 15836 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15837 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15838 } 15839 15840 15841 15842 /* 15843 * Function: sd_restore_throttle 15844 * 15845 * Description: Callback function for timeout(9F). Resets the current 15846 * value of un->un_throttle to its default. 15847 * 15848 * Arguments: arg - pointer to associated softstate for the device. 15849 * 15850 * Context: May be called from interrupt context 15851 */ 15852 15853 static void 15854 sd_restore_throttle(void *arg) 15855 { 15856 struct sd_lun *un = arg; 15857 15858 ASSERT(un != NULL); 15859 ASSERT(!mutex_owned(SD_MUTEX(un))); 15860 15861 mutex_enter(SD_MUTEX(un)); 15862 15863 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15864 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15865 15866 un->un_reset_throttle_timeid = NULL; 15867 15868 if (un->un_f_use_adaptive_throttle == TRUE) { 15869 /* 15870 * If un_busy_throttle is nonzero, then it contains the 15871 * value that un_throttle was when we got a TRAN_BUSY back 15872 * from scsi_transport(). We want to revert back to this 15873 * value. 15874 * 15875 * In the QFULL case, the throttle limit will incrementally 15876 * increase until it reaches max throttle. 15877 */ 15878 if (un->un_busy_throttle > 0) { 15879 un->un_throttle = un->un_busy_throttle; 15880 un->un_busy_throttle = 0; 15881 } else { 15882 /* 15883 * increase throttle by 10% open gate slowly, schedule 15884 * another restore if saved throttle has not been 15885 * reached 15886 */ 15887 short throttle; 15888 if (sd_qfull_throttle_enable) { 15889 throttle = un->un_throttle + 15890 max((un->un_throttle / 10), 1); 15891 un->un_throttle = 15892 (throttle < un->un_saved_throttle) ? 15893 throttle : un->un_saved_throttle; 15894 if (un->un_throttle < un->un_saved_throttle) { 15895 un->un_reset_throttle_timeid = 15896 timeout(sd_restore_throttle, 15897 un, 15898 SD_QFULL_THROTTLE_RESET_INTERVAL); 15899 } 15900 } 15901 } 15902 15903 /* 15904 * If un_throttle has fallen below the low-water mark, we 15905 * restore the maximum value here (and allow it to ratchet 15906 * down again if necessary). 15907 */ 15908 if (un->un_throttle < un->un_min_throttle) { 15909 un->un_throttle = un->un_saved_throttle; 15910 } 15911 } else { 15912 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15913 "restoring limit from 0x%x to 0x%x\n", 15914 un->un_throttle, un->un_saved_throttle); 15915 un->un_throttle = un->un_saved_throttle; 15916 } 15917 15918 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15919 "sd_restore_throttle: calling sd_start_cmds!\n"); 15920 15921 sd_start_cmds(un, NULL); 15922 15923 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15924 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15925 un, un->un_throttle); 15926 15927 mutex_exit(SD_MUTEX(un)); 15928 15929 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15930 } 15931 15932 /* 15933 * Function: sdrunout 15934 * 15935 * Description: Callback routine for scsi_init_pkt when a resource allocation 15936 * fails. 15937 * 15938 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15939 * soft state instance. 15940 * 15941 * Return Code: The scsi_init_pkt routine allows for the callback function to 15942 * return a 0 indicating the callback should be rescheduled or a 1 15943 * indicating not to reschedule. This routine always returns 1 15944 * because the driver always provides a callback function to 15945 * scsi_init_pkt. This results in a callback always being scheduled 15946 * (via the scsi_init_pkt callback implementation) if a resource 15947 * failure occurs. 15948 * 15949 * Context: This callback function may not block or call routines that block 15950 * 15951 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15952 * request persisting at the head of the list which cannot be 15953 * satisfied even after multiple retries. In the future the driver 15954 * may implement some time of maximum runout count before failing 15955 * an I/O. 15956 */ 15957 15958 static int 15959 sdrunout(caddr_t arg) 15960 { 15961 struct sd_lun *un = (struct sd_lun *)arg; 15962 15963 ASSERT(un != NULL); 15964 ASSERT(!mutex_owned(SD_MUTEX(un))); 15965 15966 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15967 15968 mutex_enter(SD_MUTEX(un)); 15969 sd_start_cmds(un, NULL); 15970 mutex_exit(SD_MUTEX(un)); 15971 /* 15972 * This callback routine always returns 1 (i.e. do not reschedule) 15973 * because we always specify sdrunout as the callback handler for 15974 * scsi_init_pkt inside the call to sd_start_cmds. 15975 */ 15976 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15977 return (1); 15978 } 15979 15980 15981 /* 15982 * Function: sdintr 15983 * 15984 * Description: Completion callback routine for scsi_pkt(9S) structs 15985 * sent to the HBA driver via scsi_transport(9F). 15986 * 15987 * Context: Interrupt context 15988 */ 15989 15990 static void 15991 sdintr(struct scsi_pkt *pktp) 15992 { 15993 struct buf *bp; 15994 struct sd_xbuf *xp; 15995 struct sd_lun *un; 15996 size_t actual_len; 15997 sd_ssc_t *sscp; 15998 15999 ASSERT(pktp != NULL); 16000 bp = (struct buf *)pktp->pkt_private; 16001 ASSERT(bp != NULL); 16002 xp = SD_GET_XBUF(bp); 16003 ASSERT(xp != NULL); 16004 ASSERT(xp->xb_pktp != NULL); 16005 un = SD_GET_UN(bp); 16006 ASSERT(un != NULL); 16007 ASSERT(!mutex_owned(SD_MUTEX(un))); 16008 16009 #ifdef SD_FAULT_INJECTION 16010 16011 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16012 /* SD FaultInjection */ 16013 sd_faultinjection(pktp); 16014 16015 #endif /* SD_FAULT_INJECTION */ 16016 16017 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16018 " xp:0x%p, un:0x%p\n", bp, xp, un); 16019 16020 mutex_enter(SD_MUTEX(un)); 16021 16022 ASSERT(un->un_fm_private != NULL); 16023 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16024 ASSERT(sscp != NULL); 16025 16026 /* Reduce the count of the #commands currently in transport */ 16027 un->un_ncmds_in_transport--; 16028 ASSERT(un->un_ncmds_in_transport >= 0); 16029 16030 /* Increment counter to indicate that the callback routine is active */ 16031 un->un_in_callback++; 16032 16033 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16034 16035 #ifdef SDDEBUG 16036 if (bp == un->un_retry_bp) { 16037 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16038 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16039 un, un->un_retry_bp, un->un_ncmds_in_transport); 16040 } 16041 #endif 16042 16043 /* 16044 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16045 * state if needed. 16046 */ 16047 if (pktp->pkt_reason == CMD_DEV_GONE) { 16048 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16049 "Command failed to complete...Device is gone\n"); 16050 if (un->un_mediastate != DKIO_DEV_GONE) { 16051 un->un_mediastate = DKIO_DEV_GONE; 16052 cv_broadcast(&un->un_state_cv); 16053 } 16054 sd_return_failed_command(un, bp, EIO); 16055 goto exit; 16056 } 16057 16058 if (pktp->pkt_state & STATE_XARQ_DONE) { 16059 SD_TRACE(SD_LOG_COMMON, un, 16060 "sdintr: extra sense data received. pkt=%p\n", pktp); 16061 } 16062 16063 /* 16064 * First see if the pkt has auto-request sense data with it.... 16065 * Look at the packet state first so we don't take a performance 16066 * hit looking at the arq enabled flag unless absolutely necessary. 16067 */ 16068 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16069 (un->un_f_arq_enabled == TRUE)) { 16070 /* 16071 * The HBA did an auto request sense for this command so check 16072 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16073 * driver command that should not be retried. 16074 */ 16075 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16076 /* 16077 * Save the relevant sense info into the xp for the 16078 * original cmd. 16079 */ 16080 struct scsi_arq_status *asp; 16081 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16082 xp->xb_sense_status = 16083 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16084 xp->xb_sense_state = asp->sts_rqpkt_state; 16085 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16086 if (pktp->pkt_state & STATE_XARQ_DONE) { 16087 actual_len = MAX_SENSE_LENGTH - 16088 xp->xb_sense_resid; 16089 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16090 MAX_SENSE_LENGTH); 16091 } else { 16092 if (xp->xb_sense_resid > SENSE_LENGTH) { 16093 actual_len = MAX_SENSE_LENGTH - 16094 xp->xb_sense_resid; 16095 } else { 16096 actual_len = SENSE_LENGTH - 16097 xp->xb_sense_resid; 16098 } 16099 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16100 if ((((struct uscsi_cmd *) 16101 (xp->xb_pktinfo))->uscsi_rqlen) > 16102 actual_len) { 16103 xp->xb_sense_resid = 16104 (((struct uscsi_cmd *) 16105 (xp->xb_pktinfo))-> 16106 uscsi_rqlen) - actual_len; 16107 } else { 16108 xp->xb_sense_resid = 0; 16109 } 16110 } 16111 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16112 SENSE_LENGTH); 16113 } 16114 16115 /* fail the command */ 16116 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16117 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16118 sd_return_failed_command(un, bp, EIO); 16119 goto exit; 16120 } 16121 16122 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16123 /* 16124 * We want to either retry or fail this command, so free 16125 * the DMA resources here. If we retry the command then 16126 * the DMA resources will be reallocated in sd_start_cmds(). 16127 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16128 * causes the *entire* transfer to start over again from the 16129 * beginning of the request, even for PARTIAL chunks that 16130 * have already transferred successfully. 16131 */ 16132 if ((un->un_f_is_fibre == TRUE) && 16133 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16134 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16135 scsi_dmafree(pktp); 16136 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16137 } 16138 #endif 16139 16140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16141 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16142 16143 sd_handle_auto_request_sense(un, bp, xp, pktp); 16144 goto exit; 16145 } 16146 16147 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16148 if (pktp->pkt_flags & FLAG_SENSING) { 16149 /* This pktp is from the unit's REQUEST_SENSE command */ 16150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16151 "sdintr: sd_handle_request_sense\n"); 16152 sd_handle_request_sense(un, bp, xp, pktp); 16153 goto exit; 16154 } 16155 16156 /* 16157 * Check to see if the command successfully completed as requested; 16158 * this is the most common case (and also the hot performance path). 16159 * 16160 * Requirements for successful completion are: 16161 * pkt_reason is CMD_CMPLT and packet status is status good. 16162 * In addition: 16163 * - A residual of zero indicates successful completion no matter what 16164 * the command is. 16165 * - If the residual is not zero and the command is not a read or 16166 * write, then it's still defined as successful completion. In other 16167 * words, if the command is a read or write the residual must be 16168 * zero for successful completion. 16169 * - If the residual is not zero and the command is a read or 16170 * write, and it's a USCSICMD, then it's still defined as 16171 * successful completion. 16172 */ 16173 if ((pktp->pkt_reason == CMD_CMPLT) && 16174 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16175 16176 /* 16177 * Since this command is returned with a good status, we 16178 * can reset the count for Sonoma failover. 16179 */ 16180 un->un_sonoma_failure_count = 0; 16181 16182 /* 16183 * Return all USCSI commands on good status 16184 */ 16185 if (pktp->pkt_resid == 0) { 16186 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16187 "sdintr: returning command for resid == 0\n"); 16188 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16189 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16190 SD_UPDATE_B_RESID(bp, pktp); 16191 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16192 "sdintr: returning command for resid != 0\n"); 16193 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16194 SD_UPDATE_B_RESID(bp, pktp); 16195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16196 "sdintr: returning uscsi command\n"); 16197 } else { 16198 goto not_successful; 16199 } 16200 sd_return_command(un, bp); 16201 16202 /* 16203 * Decrement counter to indicate that the callback routine 16204 * is done. 16205 */ 16206 un->un_in_callback--; 16207 ASSERT(un->un_in_callback >= 0); 16208 mutex_exit(SD_MUTEX(un)); 16209 16210 return; 16211 } 16212 16213 not_successful: 16214 16215 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16216 /* 16217 * The following is based upon knowledge of the underlying transport 16218 * and its use of DMA resources. This code should be removed when 16219 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16220 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16221 * and sd_start_cmds(). 16222 * 16223 * Free any DMA resources associated with this command if there 16224 * is a chance it could be retried or enqueued for later retry. 16225 * If we keep the DMA binding then mpxio cannot reissue the 16226 * command on another path whenever a path failure occurs. 16227 * 16228 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16229 * causes the *entire* transfer to start over again from the 16230 * beginning of the request, even for PARTIAL chunks that 16231 * have already transferred successfully. 16232 * 16233 * This is only done for non-uscsi commands (and also skipped for the 16234 * driver's internal RQS command). Also just do this for Fibre Channel 16235 * devices as these are the only ones that support mpxio. 16236 */ 16237 if ((un->un_f_is_fibre == TRUE) && 16238 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16239 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16240 scsi_dmafree(pktp); 16241 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16242 } 16243 #endif 16244 16245 /* 16246 * The command did not successfully complete as requested so check 16247 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16248 * driver command that should not be retried so just return. If 16249 * FLAG_DIAGNOSE is not set the error will be processed below. 16250 */ 16251 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16253 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16254 /* 16255 * Issue a request sense if a check condition caused the error 16256 * (we handle the auto request sense case above), otherwise 16257 * just fail the command. 16258 */ 16259 if ((pktp->pkt_reason == CMD_CMPLT) && 16260 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16261 sd_send_request_sense_command(un, bp, pktp); 16262 } else { 16263 sd_return_failed_command(un, bp, EIO); 16264 } 16265 goto exit; 16266 } 16267 16268 /* 16269 * The command did not successfully complete as requested so process 16270 * the error, retry, and/or attempt recovery. 16271 */ 16272 switch (pktp->pkt_reason) { 16273 case CMD_CMPLT: 16274 switch (SD_GET_PKT_STATUS(pktp)) { 16275 case STATUS_GOOD: 16276 /* 16277 * The command completed successfully with a non-zero 16278 * residual 16279 */ 16280 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16281 "sdintr: STATUS_GOOD \n"); 16282 sd_pkt_status_good(un, bp, xp, pktp); 16283 break; 16284 16285 case STATUS_CHECK: 16286 case STATUS_TERMINATED: 16287 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16288 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16289 sd_pkt_status_check_condition(un, bp, xp, pktp); 16290 break; 16291 16292 case STATUS_BUSY: 16293 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16294 "sdintr: STATUS_BUSY\n"); 16295 sd_pkt_status_busy(un, bp, xp, pktp); 16296 break; 16297 16298 case STATUS_RESERVATION_CONFLICT: 16299 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16300 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16301 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16302 break; 16303 16304 case STATUS_QFULL: 16305 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16306 "sdintr: STATUS_QFULL\n"); 16307 sd_pkt_status_qfull(un, bp, xp, pktp); 16308 break; 16309 16310 case STATUS_MET: 16311 case STATUS_INTERMEDIATE: 16312 case STATUS_SCSI2: 16313 case STATUS_INTERMEDIATE_MET: 16314 case STATUS_ACA_ACTIVE: 16315 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16316 "Unexpected SCSI status received: 0x%x\n", 16317 SD_GET_PKT_STATUS(pktp)); 16318 /* 16319 * Mark the ssc_flags when detected invalid status 16320 * code for non-USCSI command. 16321 */ 16322 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16323 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16324 "stat-code"); 16325 } 16326 sd_return_failed_command(un, bp, EIO); 16327 break; 16328 16329 default: 16330 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16331 "Invalid SCSI status received: 0x%x\n", 16332 SD_GET_PKT_STATUS(pktp)); 16333 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16334 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16335 "stat-code"); 16336 } 16337 sd_return_failed_command(un, bp, EIO); 16338 break; 16339 16340 } 16341 break; 16342 16343 case CMD_INCOMPLETE: 16344 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16345 "sdintr: CMD_INCOMPLETE\n"); 16346 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16347 break; 16348 case CMD_TRAN_ERR: 16349 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16350 "sdintr: CMD_TRAN_ERR\n"); 16351 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16352 break; 16353 case CMD_RESET: 16354 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16355 "sdintr: CMD_RESET \n"); 16356 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16357 break; 16358 case CMD_ABORTED: 16359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16360 "sdintr: CMD_ABORTED \n"); 16361 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16362 break; 16363 case CMD_TIMEOUT: 16364 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16365 "sdintr: CMD_TIMEOUT\n"); 16366 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16367 break; 16368 case CMD_UNX_BUS_FREE: 16369 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16370 "sdintr: CMD_UNX_BUS_FREE \n"); 16371 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16372 break; 16373 case CMD_TAG_REJECT: 16374 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16375 "sdintr: CMD_TAG_REJECT\n"); 16376 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16377 break; 16378 default: 16379 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16380 "sdintr: default\n"); 16381 /* 16382 * Mark the ssc_flags for detecting invliad pkt_reason. 16383 */ 16384 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16385 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16386 "pkt-reason"); 16387 } 16388 sd_pkt_reason_default(un, bp, xp, pktp); 16389 break; 16390 } 16391 16392 exit: 16393 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16394 16395 /* Decrement counter to indicate that the callback routine is done. */ 16396 un->un_in_callback--; 16397 ASSERT(un->un_in_callback >= 0); 16398 16399 /* 16400 * At this point, the pkt has been dispatched, ie, it is either 16401 * being re-tried or has been returned to its caller and should 16402 * not be referenced. 16403 */ 16404 16405 mutex_exit(SD_MUTEX(un)); 16406 } 16407 16408 16409 /* 16410 * Function: sd_print_incomplete_msg 16411 * 16412 * Description: Prints the error message for a CMD_INCOMPLETE error. 16413 * 16414 * Arguments: un - ptr to associated softstate for the device. 16415 * bp - ptr to the buf(9S) for the command. 16416 * arg - message string ptr 16417 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16418 * or SD_NO_RETRY_ISSUED. 16419 * 16420 * Context: May be called under interrupt context 16421 */ 16422 16423 static void 16424 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16425 { 16426 struct scsi_pkt *pktp; 16427 char *msgp; 16428 char *cmdp = arg; 16429 16430 ASSERT(un != NULL); 16431 ASSERT(mutex_owned(SD_MUTEX(un))); 16432 ASSERT(bp != NULL); 16433 ASSERT(arg != NULL); 16434 pktp = SD_GET_PKTP(bp); 16435 ASSERT(pktp != NULL); 16436 16437 switch (code) { 16438 case SD_DELAYED_RETRY_ISSUED: 16439 case SD_IMMEDIATE_RETRY_ISSUED: 16440 msgp = "retrying"; 16441 break; 16442 case SD_NO_RETRY_ISSUED: 16443 default: 16444 msgp = "giving up"; 16445 break; 16446 } 16447 16448 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16449 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16450 "incomplete %s- %s\n", cmdp, msgp); 16451 } 16452 } 16453 16454 16455 16456 /* 16457 * Function: sd_pkt_status_good 16458 * 16459 * Description: Processing for a STATUS_GOOD code in pkt_status. 16460 * 16461 * Context: May be called under interrupt context 16462 */ 16463 16464 static void 16465 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16466 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16467 { 16468 char *cmdp; 16469 16470 ASSERT(un != NULL); 16471 ASSERT(mutex_owned(SD_MUTEX(un))); 16472 ASSERT(bp != NULL); 16473 ASSERT(xp != NULL); 16474 ASSERT(pktp != NULL); 16475 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16476 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16477 ASSERT(pktp->pkt_resid != 0); 16478 16479 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16480 16481 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16482 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16483 case SCMD_READ: 16484 cmdp = "read"; 16485 break; 16486 case SCMD_WRITE: 16487 cmdp = "write"; 16488 break; 16489 default: 16490 SD_UPDATE_B_RESID(bp, pktp); 16491 sd_return_command(un, bp); 16492 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16493 return; 16494 } 16495 16496 /* 16497 * See if we can retry the read/write, preferrably immediately. 16498 * If retries are exhaused, then sd_retry_command() will update 16499 * the b_resid count. 16500 */ 16501 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16502 cmdp, EIO, (clock_t)0, NULL); 16503 16504 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16505 } 16506 16507 16508 16509 16510 16511 /* 16512 * Function: sd_handle_request_sense 16513 * 16514 * Description: Processing for non-auto Request Sense command. 16515 * 16516 * Arguments: un - ptr to associated softstate 16517 * sense_bp - ptr to buf(9S) for the RQS command 16518 * sense_xp - ptr to the sd_xbuf for the RQS command 16519 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16520 * 16521 * Context: May be called under interrupt context 16522 */ 16523 16524 static void 16525 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16526 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16527 { 16528 struct buf *cmd_bp; /* buf for the original command */ 16529 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16530 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16531 size_t actual_len; /* actual sense data length */ 16532 16533 ASSERT(un != NULL); 16534 ASSERT(mutex_owned(SD_MUTEX(un))); 16535 ASSERT(sense_bp != NULL); 16536 ASSERT(sense_xp != NULL); 16537 ASSERT(sense_pktp != NULL); 16538 16539 /* 16540 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16541 * RQS command and not the original command. 16542 */ 16543 ASSERT(sense_pktp == un->un_rqs_pktp); 16544 ASSERT(sense_bp == un->un_rqs_bp); 16545 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16546 (FLAG_SENSING | FLAG_HEAD)); 16547 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16548 FLAG_SENSING) == FLAG_SENSING); 16549 16550 /* These are the bp, xp, and pktp for the original command */ 16551 cmd_bp = sense_xp->xb_sense_bp; 16552 cmd_xp = SD_GET_XBUF(cmd_bp); 16553 cmd_pktp = SD_GET_PKTP(cmd_bp); 16554 16555 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16556 /* 16557 * The REQUEST SENSE command failed. Release the REQUEST 16558 * SENSE command for re-use, get back the bp for the original 16559 * command, and attempt to re-try the original command if 16560 * FLAG_DIAGNOSE is not set in the original packet. 16561 */ 16562 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16563 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16564 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16565 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16566 NULL, NULL, EIO, (clock_t)0, NULL); 16567 return; 16568 } 16569 } 16570 16571 /* 16572 * Save the relevant sense info into the xp for the original cmd. 16573 * 16574 * Note: if the request sense failed the state info will be zero 16575 * as set in sd_mark_rqs_busy() 16576 */ 16577 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16578 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16579 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16580 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16581 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16582 SENSE_LENGTH)) { 16583 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16584 MAX_SENSE_LENGTH); 16585 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16586 } else { 16587 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16588 SENSE_LENGTH); 16589 if (actual_len < SENSE_LENGTH) { 16590 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16591 } else { 16592 cmd_xp->xb_sense_resid = 0; 16593 } 16594 } 16595 16596 /* 16597 * Free up the RQS command.... 16598 * NOTE: 16599 * Must do this BEFORE calling sd_validate_sense_data! 16600 * sd_validate_sense_data may return the original command in 16601 * which case the pkt will be freed and the flags can no 16602 * longer be touched. 16603 * SD_MUTEX is held through this process until the command 16604 * is dispatched based upon the sense data, so there are 16605 * no race conditions. 16606 */ 16607 (void) sd_mark_rqs_idle(un, sense_xp); 16608 16609 /* 16610 * For a retryable command see if we have valid sense data, if so then 16611 * turn it over to sd_decode_sense() to figure out the right course of 16612 * action. Just fail a non-retryable command. 16613 */ 16614 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16615 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16616 SD_SENSE_DATA_IS_VALID) { 16617 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16618 } 16619 } else { 16620 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16621 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16622 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16623 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16624 sd_return_failed_command(un, cmd_bp, EIO); 16625 } 16626 } 16627 16628 16629 16630 16631 /* 16632 * Function: sd_handle_auto_request_sense 16633 * 16634 * Description: Processing for auto-request sense information. 16635 * 16636 * Arguments: un - ptr to associated softstate 16637 * bp - ptr to buf(9S) for the command 16638 * xp - ptr to the sd_xbuf for the command 16639 * pktp - ptr to the scsi_pkt(9S) for the command 16640 * 16641 * Context: May be called under interrupt context 16642 */ 16643 16644 static void 16645 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16646 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16647 { 16648 struct scsi_arq_status *asp; 16649 size_t actual_len; 16650 16651 ASSERT(un != NULL); 16652 ASSERT(mutex_owned(SD_MUTEX(un))); 16653 ASSERT(bp != NULL); 16654 ASSERT(xp != NULL); 16655 ASSERT(pktp != NULL); 16656 ASSERT(pktp != un->un_rqs_pktp); 16657 ASSERT(bp != un->un_rqs_bp); 16658 16659 /* 16660 * For auto-request sense, we get a scsi_arq_status back from 16661 * the HBA, with the sense data in the sts_sensedata member. 16662 * The pkt_scbp of the packet points to this scsi_arq_status. 16663 */ 16664 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16665 16666 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16667 /* 16668 * The auto REQUEST SENSE failed; see if we can re-try 16669 * the original command. 16670 */ 16671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16672 "auto request sense failed (reason=%s)\n", 16673 scsi_rname(asp->sts_rqpkt_reason)); 16674 16675 sd_reset_target(un, pktp); 16676 16677 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16678 NULL, NULL, EIO, (clock_t)0, NULL); 16679 return; 16680 } 16681 16682 /* Save the relevant sense info into the xp for the original cmd. */ 16683 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16684 xp->xb_sense_state = asp->sts_rqpkt_state; 16685 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16686 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16687 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16688 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16689 MAX_SENSE_LENGTH); 16690 } else { 16691 if (xp->xb_sense_resid > SENSE_LENGTH) { 16692 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16693 } else { 16694 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16695 } 16696 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16697 if ((((struct uscsi_cmd *) 16698 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16699 xp->xb_sense_resid = (((struct uscsi_cmd *) 16700 (xp->xb_pktinfo))->uscsi_rqlen) - 16701 actual_len; 16702 } else { 16703 xp->xb_sense_resid = 0; 16704 } 16705 } 16706 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16707 } 16708 16709 /* 16710 * See if we have valid sense data, if so then turn it over to 16711 * sd_decode_sense() to figure out the right course of action. 16712 */ 16713 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16714 SD_SENSE_DATA_IS_VALID) { 16715 sd_decode_sense(un, bp, xp, pktp); 16716 } 16717 } 16718 16719 16720 /* 16721 * Function: sd_print_sense_failed_msg 16722 * 16723 * Description: Print log message when RQS has failed. 16724 * 16725 * Arguments: un - ptr to associated softstate 16726 * bp - ptr to buf(9S) for the command 16727 * arg - generic message string ptr 16728 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16729 * or SD_NO_RETRY_ISSUED 16730 * 16731 * Context: May be called from interrupt context 16732 */ 16733 16734 static void 16735 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16736 int code) 16737 { 16738 char *msgp = arg; 16739 16740 ASSERT(un != NULL); 16741 ASSERT(mutex_owned(SD_MUTEX(un))); 16742 ASSERT(bp != NULL); 16743 16744 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16745 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16746 } 16747 } 16748 16749 16750 /* 16751 * Function: sd_validate_sense_data 16752 * 16753 * Description: Check the given sense data for validity. 16754 * If the sense data is not valid, the command will 16755 * be either failed or retried! 16756 * 16757 * Return Code: SD_SENSE_DATA_IS_INVALID 16758 * SD_SENSE_DATA_IS_VALID 16759 * 16760 * Context: May be called from interrupt context 16761 */ 16762 16763 static int 16764 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16765 size_t actual_len) 16766 { 16767 struct scsi_extended_sense *esp; 16768 struct scsi_pkt *pktp; 16769 char *msgp = NULL; 16770 sd_ssc_t *sscp; 16771 16772 ASSERT(un != NULL); 16773 ASSERT(mutex_owned(SD_MUTEX(un))); 16774 ASSERT(bp != NULL); 16775 ASSERT(bp != un->un_rqs_bp); 16776 ASSERT(xp != NULL); 16777 ASSERT(un->un_fm_private != NULL); 16778 16779 pktp = SD_GET_PKTP(bp); 16780 ASSERT(pktp != NULL); 16781 16782 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16783 ASSERT(sscp != NULL); 16784 16785 /* 16786 * Check the status of the RQS command (auto or manual). 16787 */ 16788 switch (xp->xb_sense_status & STATUS_MASK) { 16789 case STATUS_GOOD: 16790 break; 16791 16792 case STATUS_RESERVATION_CONFLICT: 16793 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16794 return (SD_SENSE_DATA_IS_INVALID); 16795 16796 case STATUS_BUSY: 16797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16798 "Busy Status on REQUEST SENSE\n"); 16799 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16800 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16801 return (SD_SENSE_DATA_IS_INVALID); 16802 16803 case STATUS_QFULL: 16804 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16805 "QFULL Status on REQUEST SENSE\n"); 16806 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16807 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16808 return (SD_SENSE_DATA_IS_INVALID); 16809 16810 case STATUS_CHECK: 16811 case STATUS_TERMINATED: 16812 msgp = "Check Condition on REQUEST SENSE\n"; 16813 goto sense_failed; 16814 16815 default: 16816 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16817 goto sense_failed; 16818 } 16819 16820 /* 16821 * See if we got the minimum required amount of sense data. 16822 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16823 * or less. 16824 */ 16825 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16826 (actual_len == 0)) { 16827 msgp = "Request Sense couldn't get sense data\n"; 16828 goto sense_failed; 16829 } 16830 16831 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16832 msgp = "Not enough sense information\n"; 16833 /* Mark the ssc_flags for detecting invalid sense data */ 16834 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16835 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16836 "sense-data"); 16837 } 16838 goto sense_failed; 16839 } 16840 16841 /* 16842 * We require the extended sense data 16843 */ 16844 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16845 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16846 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16847 static char tmp[8]; 16848 static char buf[148]; 16849 char *p = (char *)(xp->xb_sense_data); 16850 int i; 16851 16852 mutex_enter(&sd_sense_mutex); 16853 (void) strcpy(buf, "undecodable sense information:"); 16854 for (i = 0; i < actual_len; i++) { 16855 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16856 (void) strcpy(&buf[strlen(buf)], tmp); 16857 } 16858 i = strlen(buf); 16859 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16860 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16861 mutex_exit(&sd_sense_mutex); 16862 } 16863 16864 /* Mark the ssc_flags for detecting invalid sense data */ 16865 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16866 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16867 "sense-data"); 16868 } 16869 16870 /* Note: Legacy behavior, fail the command with no retry */ 16871 sd_return_failed_command(un, bp, EIO); 16872 return (SD_SENSE_DATA_IS_INVALID); 16873 } 16874 16875 /* 16876 * Check that es_code is valid (es_class concatenated with es_code 16877 * make up the "response code" field. es_class will always be 7, so 16878 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16879 * format. 16880 */ 16881 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16882 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16883 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16884 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16885 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16886 /* Mark the ssc_flags for detecting invalid sense data */ 16887 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16888 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16889 "sense-data"); 16890 } 16891 goto sense_failed; 16892 } 16893 16894 return (SD_SENSE_DATA_IS_VALID); 16895 16896 sense_failed: 16897 /* 16898 * If the request sense failed (for whatever reason), attempt 16899 * to retry the original command. 16900 */ 16901 #if defined(__i386) || defined(__amd64) 16902 /* 16903 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16904 * sddef.h for Sparc platform, and x86 uses 1 binary 16905 * for both SCSI/FC. 16906 * The SD_RETRY_DELAY value need to be adjusted here 16907 * when SD_RETRY_DELAY change in sddef.h 16908 */ 16909 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16910 sd_print_sense_failed_msg, msgp, EIO, 16911 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16912 #else 16913 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16914 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16915 #endif 16916 16917 return (SD_SENSE_DATA_IS_INVALID); 16918 } 16919 16920 /* 16921 * Function: sd_decode_sense 16922 * 16923 * Description: Take recovery action(s) when SCSI Sense Data is received. 16924 * 16925 * Context: Interrupt context. 16926 */ 16927 16928 static void 16929 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16930 struct scsi_pkt *pktp) 16931 { 16932 uint8_t sense_key; 16933 16934 ASSERT(un != NULL); 16935 ASSERT(mutex_owned(SD_MUTEX(un))); 16936 ASSERT(bp != NULL); 16937 ASSERT(bp != un->un_rqs_bp); 16938 ASSERT(xp != NULL); 16939 ASSERT(pktp != NULL); 16940 16941 sense_key = scsi_sense_key(xp->xb_sense_data); 16942 16943 switch (sense_key) { 16944 case KEY_NO_SENSE: 16945 sd_sense_key_no_sense(un, bp, xp, pktp); 16946 break; 16947 case KEY_RECOVERABLE_ERROR: 16948 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 16949 bp, xp, pktp); 16950 break; 16951 case KEY_NOT_READY: 16952 sd_sense_key_not_ready(un, xp->xb_sense_data, 16953 bp, xp, pktp); 16954 break; 16955 case KEY_MEDIUM_ERROR: 16956 case KEY_HARDWARE_ERROR: 16957 sd_sense_key_medium_or_hardware_error(un, 16958 xp->xb_sense_data, bp, xp, pktp); 16959 break; 16960 case KEY_ILLEGAL_REQUEST: 16961 sd_sense_key_illegal_request(un, bp, xp, pktp); 16962 break; 16963 case KEY_UNIT_ATTENTION: 16964 sd_sense_key_unit_attention(un, xp->xb_sense_data, 16965 bp, xp, pktp); 16966 break; 16967 case KEY_WRITE_PROTECT: 16968 case KEY_VOLUME_OVERFLOW: 16969 case KEY_MISCOMPARE: 16970 sd_sense_key_fail_command(un, bp, xp, pktp); 16971 break; 16972 case KEY_BLANK_CHECK: 16973 sd_sense_key_blank_check(un, bp, xp, pktp); 16974 break; 16975 case KEY_ABORTED_COMMAND: 16976 sd_sense_key_aborted_command(un, bp, xp, pktp); 16977 break; 16978 case KEY_VENDOR_UNIQUE: 16979 case KEY_COPY_ABORTED: 16980 case KEY_EQUAL: 16981 case KEY_RESERVED: 16982 default: 16983 sd_sense_key_default(un, xp->xb_sense_data, 16984 bp, xp, pktp); 16985 break; 16986 } 16987 } 16988 16989 16990 /* 16991 * Function: sd_dump_memory 16992 * 16993 * Description: Debug logging routine to print the contents of a user provided 16994 * buffer. The output of the buffer is broken up into 256 byte 16995 * segments due to a size constraint of the scsi_log. 16996 * implementation. 16997 * 16998 * Arguments: un - ptr to softstate 16999 * comp - component mask 17000 * title - "title" string to preceed data when printed 17001 * data - ptr to data block to be printed 17002 * len - size of data block to be printed 17003 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17004 * 17005 * Context: May be called from interrupt context 17006 */ 17007 17008 #define SD_DUMP_MEMORY_BUF_SIZE 256 17009 17010 static char *sd_dump_format_string[] = { 17011 " 0x%02x", 17012 " %c" 17013 }; 17014 17015 static void 17016 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17017 int len, int fmt) 17018 { 17019 int i, j; 17020 int avail_count; 17021 int start_offset; 17022 int end_offset; 17023 size_t entry_len; 17024 char *bufp; 17025 char *local_buf; 17026 char *format_string; 17027 17028 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17029 17030 /* 17031 * In the debug version of the driver, this function is called from a 17032 * number of places which are NOPs in the release driver. 17033 * The debug driver therefore has additional methods of filtering 17034 * debug output. 17035 */ 17036 #ifdef SDDEBUG 17037 /* 17038 * In the debug version of the driver we can reduce the amount of debug 17039 * messages by setting sd_error_level to something other than 17040 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17041 * sd_component_mask. 17042 */ 17043 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17044 (sd_error_level != SCSI_ERR_ALL)) { 17045 return; 17046 } 17047 if (((sd_component_mask & comp) == 0) || 17048 (sd_error_level != SCSI_ERR_ALL)) { 17049 return; 17050 } 17051 #else 17052 if (sd_error_level != SCSI_ERR_ALL) { 17053 return; 17054 } 17055 #endif 17056 17057 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17058 bufp = local_buf; 17059 /* 17060 * Available length is the length of local_buf[], minus the 17061 * length of the title string, minus one for the ":", minus 17062 * one for the newline, minus one for the NULL terminator. 17063 * This gives the #bytes available for holding the printed 17064 * values from the given data buffer. 17065 */ 17066 if (fmt == SD_LOG_HEX) { 17067 format_string = sd_dump_format_string[0]; 17068 } else /* SD_LOG_CHAR */ { 17069 format_string = sd_dump_format_string[1]; 17070 } 17071 /* 17072 * Available count is the number of elements from the given 17073 * data buffer that we can fit into the available length. 17074 * This is based upon the size of the format string used. 17075 * Make one entry and find it's size. 17076 */ 17077 (void) sprintf(bufp, format_string, data[0]); 17078 entry_len = strlen(bufp); 17079 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17080 17081 j = 0; 17082 while (j < len) { 17083 bufp = local_buf; 17084 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17085 start_offset = j; 17086 17087 end_offset = start_offset + avail_count; 17088 17089 (void) sprintf(bufp, "%s:", title); 17090 bufp += strlen(bufp); 17091 for (i = start_offset; ((i < end_offset) && (j < len)); 17092 i++, j++) { 17093 (void) sprintf(bufp, format_string, data[i]); 17094 bufp += entry_len; 17095 } 17096 (void) sprintf(bufp, "\n"); 17097 17098 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17099 } 17100 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17101 } 17102 17103 /* 17104 * Function: sd_print_sense_msg 17105 * 17106 * Description: Log a message based upon the given sense data. 17107 * 17108 * Arguments: un - ptr to associated softstate 17109 * bp - ptr to buf(9S) for the command 17110 * arg - ptr to associate sd_sense_info struct 17111 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17112 * or SD_NO_RETRY_ISSUED 17113 * 17114 * Context: May be called from interrupt context 17115 */ 17116 17117 static void 17118 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17119 { 17120 struct sd_xbuf *xp; 17121 struct scsi_pkt *pktp; 17122 uint8_t *sensep; 17123 daddr_t request_blkno; 17124 diskaddr_t err_blkno; 17125 int severity; 17126 int pfa_flag; 17127 extern struct scsi_key_strings scsi_cmds[]; 17128 17129 ASSERT(un != NULL); 17130 ASSERT(mutex_owned(SD_MUTEX(un))); 17131 ASSERT(bp != NULL); 17132 xp = SD_GET_XBUF(bp); 17133 ASSERT(xp != NULL); 17134 pktp = SD_GET_PKTP(bp); 17135 ASSERT(pktp != NULL); 17136 ASSERT(arg != NULL); 17137 17138 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17139 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17140 17141 if ((code == SD_DELAYED_RETRY_ISSUED) || 17142 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17143 severity = SCSI_ERR_RETRYABLE; 17144 } 17145 17146 /* Use absolute block number for the request block number */ 17147 request_blkno = xp->xb_blkno; 17148 17149 /* 17150 * Now try to get the error block number from the sense data 17151 */ 17152 sensep = xp->xb_sense_data; 17153 17154 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17155 (uint64_t *)&err_blkno)) { 17156 /* 17157 * We retrieved the error block number from the information 17158 * portion of the sense data. 17159 * 17160 * For USCSI commands we are better off using the error 17161 * block no. as the requested block no. (This is the best 17162 * we can estimate.) 17163 */ 17164 if ((SD_IS_BUFIO(xp) == FALSE) && 17165 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17166 request_blkno = err_blkno; 17167 } 17168 } else { 17169 /* 17170 * Without the es_valid bit set (for fixed format) or an 17171 * information descriptor (for descriptor format) we cannot 17172 * be certain of the error blkno, so just use the 17173 * request_blkno. 17174 */ 17175 err_blkno = (diskaddr_t)request_blkno; 17176 } 17177 17178 /* 17179 * The following will log the buffer contents for the release driver 17180 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17181 * level is set to verbose. 17182 */ 17183 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17184 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17185 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17186 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17187 17188 if (pfa_flag == FALSE) { 17189 /* This is normally only set for USCSI */ 17190 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17191 return; 17192 } 17193 17194 if ((SD_IS_BUFIO(xp) == TRUE) && 17195 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17196 (severity < sd_error_level))) { 17197 return; 17198 } 17199 } 17200 /* 17201 * Check for Sonoma Failover and keep a count of how many failed I/O's 17202 */ 17203 if ((SD_IS_LSI(un)) && 17204 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17205 (scsi_sense_asc(sensep) == 0x94) && 17206 (scsi_sense_ascq(sensep) == 0x01)) { 17207 un->un_sonoma_failure_count++; 17208 if (un->un_sonoma_failure_count > 1) { 17209 return; 17210 } 17211 } 17212 17213 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17214 request_blkno, err_blkno, scsi_cmds, 17215 (struct scsi_extended_sense *)sensep, 17216 un->un_additional_codes, NULL); 17217 } 17218 17219 /* 17220 * Function: sd_sense_key_no_sense 17221 * 17222 * Description: Recovery action when sense data was not received. 17223 * 17224 * Context: May be called from interrupt context 17225 */ 17226 17227 static void 17228 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17229 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17230 { 17231 struct sd_sense_info si; 17232 17233 ASSERT(un != NULL); 17234 ASSERT(mutex_owned(SD_MUTEX(un))); 17235 ASSERT(bp != NULL); 17236 ASSERT(xp != NULL); 17237 ASSERT(pktp != NULL); 17238 17239 si.ssi_severity = SCSI_ERR_FATAL; 17240 si.ssi_pfa_flag = FALSE; 17241 17242 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17243 17244 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17245 &si, EIO, (clock_t)0, NULL); 17246 } 17247 17248 17249 /* 17250 * Function: sd_sense_key_recoverable_error 17251 * 17252 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17253 * 17254 * Context: May be called from interrupt context 17255 */ 17256 17257 static void 17258 sd_sense_key_recoverable_error(struct sd_lun *un, 17259 uint8_t *sense_datap, 17260 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17261 { 17262 struct sd_sense_info si; 17263 uint8_t asc = scsi_sense_asc(sense_datap); 17264 17265 ASSERT(un != NULL); 17266 ASSERT(mutex_owned(SD_MUTEX(un))); 17267 ASSERT(bp != NULL); 17268 ASSERT(xp != NULL); 17269 ASSERT(pktp != NULL); 17270 17271 /* 17272 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17273 */ 17274 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17275 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17276 si.ssi_severity = SCSI_ERR_INFO; 17277 si.ssi_pfa_flag = TRUE; 17278 } else { 17279 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17280 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17281 si.ssi_severity = SCSI_ERR_RECOVERED; 17282 si.ssi_pfa_flag = FALSE; 17283 } 17284 17285 if (pktp->pkt_resid == 0) { 17286 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17287 sd_return_command(un, bp); 17288 return; 17289 } 17290 17291 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17292 &si, EIO, (clock_t)0, NULL); 17293 } 17294 17295 17296 17297 17298 /* 17299 * Function: sd_sense_key_not_ready 17300 * 17301 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17302 * 17303 * Context: May be called from interrupt context 17304 */ 17305 17306 static void 17307 sd_sense_key_not_ready(struct sd_lun *un, 17308 uint8_t *sense_datap, 17309 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17310 { 17311 struct sd_sense_info si; 17312 uint8_t asc = scsi_sense_asc(sense_datap); 17313 uint8_t ascq = scsi_sense_ascq(sense_datap); 17314 17315 ASSERT(un != NULL); 17316 ASSERT(mutex_owned(SD_MUTEX(un))); 17317 ASSERT(bp != NULL); 17318 ASSERT(xp != NULL); 17319 ASSERT(pktp != NULL); 17320 17321 si.ssi_severity = SCSI_ERR_FATAL; 17322 si.ssi_pfa_flag = FALSE; 17323 17324 /* 17325 * Update error stats after first NOT READY error. Disks may have 17326 * been powered down and may need to be restarted. For CDROMs, 17327 * report NOT READY errors only if media is present. 17328 */ 17329 if ((ISCD(un) && (asc == 0x3A)) || 17330 (xp->xb_nr_retry_count > 0)) { 17331 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17332 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17333 } 17334 17335 /* 17336 * Just fail if the "not ready" retry limit has been reached. 17337 */ 17338 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17339 /* Special check for error message printing for removables. */ 17340 if (un->un_f_has_removable_media && (asc == 0x04) && 17341 (ascq >= 0x04)) { 17342 si.ssi_severity = SCSI_ERR_ALL; 17343 } 17344 goto fail_command; 17345 } 17346 17347 /* 17348 * Check the ASC and ASCQ in the sense data as needed, to determine 17349 * what to do. 17350 */ 17351 switch (asc) { 17352 case 0x04: /* LOGICAL UNIT NOT READY */ 17353 /* 17354 * disk drives that don't spin up result in a very long delay 17355 * in format without warning messages. We will log a message 17356 * if the error level is set to verbose. 17357 */ 17358 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17359 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17360 "logical unit not ready, resetting disk\n"); 17361 } 17362 17363 /* 17364 * There are different requirements for CDROMs and disks for 17365 * the number of retries. If a CD-ROM is giving this, it is 17366 * probably reading TOC and is in the process of getting 17367 * ready, so we should keep on trying for a long time to make 17368 * sure that all types of media are taken in account (for 17369 * some media the drive takes a long time to read TOC). For 17370 * disks we do not want to retry this too many times as this 17371 * can cause a long hang in format when the drive refuses to 17372 * spin up (a very common failure). 17373 */ 17374 switch (ascq) { 17375 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17376 /* 17377 * Disk drives frequently refuse to spin up which 17378 * results in a very long hang in format without 17379 * warning messages. 17380 * 17381 * Note: This code preserves the legacy behavior of 17382 * comparing xb_nr_retry_count against zero for fibre 17383 * channel targets instead of comparing against the 17384 * un_reset_retry_count value. The reason for this 17385 * discrepancy has been so utterly lost beneath the 17386 * Sands of Time that even Indiana Jones could not 17387 * find it. 17388 */ 17389 if (un->un_f_is_fibre == TRUE) { 17390 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17391 (xp->xb_nr_retry_count > 0)) && 17392 (un->un_startstop_timeid == NULL)) { 17393 scsi_log(SD_DEVINFO(un), sd_label, 17394 CE_WARN, "logical unit not ready, " 17395 "resetting disk\n"); 17396 sd_reset_target(un, pktp); 17397 } 17398 } else { 17399 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17400 (xp->xb_nr_retry_count > 17401 un->un_reset_retry_count)) && 17402 (un->un_startstop_timeid == NULL)) { 17403 scsi_log(SD_DEVINFO(un), sd_label, 17404 CE_WARN, "logical unit not ready, " 17405 "resetting disk\n"); 17406 sd_reset_target(un, pktp); 17407 } 17408 } 17409 break; 17410 17411 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17412 /* 17413 * If the target is in the process of becoming 17414 * ready, just proceed with the retry. This can 17415 * happen with CD-ROMs that take a long time to 17416 * read TOC after a power cycle or reset. 17417 */ 17418 goto do_retry; 17419 17420 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17421 break; 17422 17423 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17424 /* 17425 * Retries cannot help here so just fail right away. 17426 */ 17427 goto fail_command; 17428 17429 case 0x88: 17430 /* 17431 * Vendor-unique code for T3/T4: it indicates a 17432 * path problem in a mutipathed config, but as far as 17433 * the target driver is concerned it equates to a fatal 17434 * error, so we should just fail the command right away 17435 * (without printing anything to the console). If this 17436 * is not a T3/T4, fall thru to the default recovery 17437 * action. 17438 * T3/T4 is FC only, don't need to check is_fibre 17439 */ 17440 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17441 sd_return_failed_command(un, bp, EIO); 17442 return; 17443 } 17444 /* FALLTHRU */ 17445 17446 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17447 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17448 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17449 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17450 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17451 default: /* Possible future codes in SCSI spec? */ 17452 /* 17453 * For removable-media devices, do not retry if 17454 * ASCQ > 2 as these result mostly from USCSI commands 17455 * on MMC devices issued to check status of an 17456 * operation initiated in immediate mode. Also for 17457 * ASCQ >= 4 do not print console messages as these 17458 * mainly represent a user-initiated operation 17459 * instead of a system failure. 17460 */ 17461 if (un->un_f_has_removable_media) { 17462 si.ssi_severity = SCSI_ERR_ALL; 17463 goto fail_command; 17464 } 17465 break; 17466 } 17467 17468 /* 17469 * As part of our recovery attempt for the NOT READY 17470 * condition, we issue a START STOP UNIT command. However 17471 * we want to wait for a short delay before attempting this 17472 * as there may still be more commands coming back from the 17473 * target with the check condition. To do this we use 17474 * timeout(9F) to call sd_start_stop_unit_callback() after 17475 * the delay interval expires. (sd_start_stop_unit_callback() 17476 * dispatches sd_start_stop_unit_task(), which will issue 17477 * the actual START STOP UNIT command. The delay interval 17478 * is one-half of the delay that we will use to retry the 17479 * command that generated the NOT READY condition. 17480 * 17481 * Note that we could just dispatch sd_start_stop_unit_task() 17482 * from here and allow it to sleep for the delay interval, 17483 * but then we would be tying up the taskq thread 17484 * uncesessarily for the duration of the delay. 17485 * 17486 * Do not issue the START STOP UNIT if the current command 17487 * is already a START STOP UNIT. 17488 */ 17489 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17490 break; 17491 } 17492 17493 /* 17494 * Do not schedule the timeout if one is already pending. 17495 */ 17496 if (un->un_startstop_timeid != NULL) { 17497 SD_INFO(SD_LOG_ERROR, un, 17498 "sd_sense_key_not_ready: restart already issued to" 17499 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17500 ddi_get_instance(SD_DEVINFO(un))); 17501 break; 17502 } 17503 17504 /* 17505 * Schedule the START STOP UNIT command, then queue the command 17506 * for a retry. 17507 * 17508 * Note: A timeout is not scheduled for this retry because we 17509 * want the retry to be serial with the START_STOP_UNIT. The 17510 * retry will be started when the START_STOP_UNIT is completed 17511 * in sd_start_stop_unit_task. 17512 */ 17513 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17514 un, un->un_busy_timeout / 2); 17515 xp->xb_nr_retry_count++; 17516 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17517 return; 17518 17519 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17520 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17521 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17522 "unit does not respond to selection\n"); 17523 } 17524 break; 17525 17526 case 0x3A: /* MEDIUM NOT PRESENT */ 17527 if (sd_error_level >= SCSI_ERR_FATAL) { 17528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17529 "Caddy not inserted in drive\n"); 17530 } 17531 17532 sr_ejected(un); 17533 un->un_mediastate = DKIO_EJECTED; 17534 /* The state has changed, inform the media watch routines */ 17535 cv_broadcast(&un->un_state_cv); 17536 /* Just fail if no media is present in the drive. */ 17537 goto fail_command; 17538 17539 default: 17540 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17541 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17542 "Unit not Ready. Additional sense code 0x%x\n", 17543 asc); 17544 } 17545 break; 17546 } 17547 17548 do_retry: 17549 17550 /* 17551 * Retry the command, as some targets may report NOT READY for 17552 * several seconds after being reset. 17553 */ 17554 xp->xb_nr_retry_count++; 17555 si.ssi_severity = SCSI_ERR_RETRYABLE; 17556 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17557 &si, EIO, un->un_busy_timeout, NULL); 17558 17559 return; 17560 17561 fail_command: 17562 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17563 sd_return_failed_command(un, bp, EIO); 17564 } 17565 17566 17567 17568 /* 17569 * Function: sd_sense_key_medium_or_hardware_error 17570 * 17571 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17572 * sense key. 17573 * 17574 * Context: May be called from interrupt context 17575 */ 17576 17577 static void 17578 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17579 uint8_t *sense_datap, 17580 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17581 { 17582 struct sd_sense_info si; 17583 uint8_t sense_key = scsi_sense_key(sense_datap); 17584 uint8_t asc = scsi_sense_asc(sense_datap); 17585 17586 ASSERT(un != NULL); 17587 ASSERT(mutex_owned(SD_MUTEX(un))); 17588 ASSERT(bp != NULL); 17589 ASSERT(xp != NULL); 17590 ASSERT(pktp != NULL); 17591 17592 si.ssi_severity = SCSI_ERR_FATAL; 17593 si.ssi_pfa_flag = FALSE; 17594 17595 if (sense_key == KEY_MEDIUM_ERROR) { 17596 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17597 } 17598 17599 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17600 17601 if ((un->un_reset_retry_count != 0) && 17602 (xp->xb_retry_count == un->un_reset_retry_count)) { 17603 mutex_exit(SD_MUTEX(un)); 17604 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17605 if (un->un_f_allow_bus_device_reset == TRUE) { 17606 17607 boolean_t try_resetting_target = B_TRUE; 17608 17609 /* 17610 * We need to be able to handle specific ASC when we are 17611 * handling a KEY_HARDWARE_ERROR. In particular 17612 * taking the default action of resetting the target may 17613 * not be the appropriate way to attempt recovery. 17614 * Resetting a target because of a single LUN failure 17615 * victimizes all LUNs on that target. 17616 * 17617 * This is true for the LSI arrays, if an LSI 17618 * array controller returns an ASC of 0x84 (LUN Dead) we 17619 * should trust it. 17620 */ 17621 17622 if (sense_key == KEY_HARDWARE_ERROR) { 17623 switch (asc) { 17624 case 0x84: 17625 if (SD_IS_LSI(un)) { 17626 try_resetting_target = B_FALSE; 17627 } 17628 break; 17629 default: 17630 break; 17631 } 17632 } 17633 17634 if (try_resetting_target == B_TRUE) { 17635 int reset_retval = 0; 17636 if (un->un_f_lun_reset_enabled == TRUE) { 17637 SD_TRACE(SD_LOG_IO_CORE, un, 17638 "sd_sense_key_medium_or_hardware_" 17639 "error: issuing RESET_LUN\n"); 17640 reset_retval = 17641 scsi_reset(SD_ADDRESS(un), 17642 RESET_LUN); 17643 } 17644 if (reset_retval == 0) { 17645 SD_TRACE(SD_LOG_IO_CORE, un, 17646 "sd_sense_key_medium_or_hardware_" 17647 "error: issuing RESET_TARGET\n"); 17648 (void) scsi_reset(SD_ADDRESS(un), 17649 RESET_TARGET); 17650 } 17651 } 17652 } 17653 mutex_enter(SD_MUTEX(un)); 17654 } 17655 17656 /* 17657 * This really ought to be a fatal error, but we will retry anyway 17658 * as some drives report this as a spurious error. 17659 */ 17660 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17661 &si, EIO, (clock_t)0, NULL); 17662 } 17663 17664 17665 17666 /* 17667 * Function: sd_sense_key_illegal_request 17668 * 17669 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17670 * 17671 * Context: May be called from interrupt context 17672 */ 17673 17674 static void 17675 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17676 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17677 { 17678 struct sd_sense_info si; 17679 17680 ASSERT(un != NULL); 17681 ASSERT(mutex_owned(SD_MUTEX(un))); 17682 ASSERT(bp != NULL); 17683 ASSERT(xp != NULL); 17684 ASSERT(pktp != NULL); 17685 17686 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17687 17688 si.ssi_severity = SCSI_ERR_INFO; 17689 si.ssi_pfa_flag = FALSE; 17690 17691 /* Pointless to retry if the target thinks it's an illegal request */ 17692 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17693 sd_return_failed_command(un, bp, EIO); 17694 } 17695 17696 17697 17698 17699 /* 17700 * Function: sd_sense_key_unit_attention 17701 * 17702 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17703 * 17704 * Context: May be called from interrupt context 17705 */ 17706 17707 static void 17708 sd_sense_key_unit_attention(struct sd_lun *un, 17709 uint8_t *sense_datap, 17710 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17711 { 17712 /* 17713 * For UNIT ATTENTION we allow retries for one minute. Devices 17714 * like Sonoma can return UNIT ATTENTION close to a minute 17715 * under certain conditions. 17716 */ 17717 int retry_check_flag = SD_RETRIES_UA; 17718 boolean_t kstat_updated = B_FALSE; 17719 struct sd_sense_info si; 17720 uint8_t asc = scsi_sense_asc(sense_datap); 17721 uint8_t ascq = scsi_sense_ascq(sense_datap); 17722 17723 ASSERT(un != NULL); 17724 ASSERT(mutex_owned(SD_MUTEX(un))); 17725 ASSERT(bp != NULL); 17726 ASSERT(xp != NULL); 17727 ASSERT(pktp != NULL); 17728 17729 si.ssi_severity = SCSI_ERR_INFO; 17730 si.ssi_pfa_flag = FALSE; 17731 17732 17733 switch (asc) { 17734 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17735 if (sd_report_pfa != 0) { 17736 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17737 si.ssi_pfa_flag = TRUE; 17738 retry_check_flag = SD_RETRIES_STANDARD; 17739 goto do_retry; 17740 } 17741 17742 break; 17743 17744 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17745 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17746 un->un_resvd_status |= 17747 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17748 } 17749 #ifdef _LP64 17750 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17751 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17752 un, KM_NOSLEEP) == 0) { 17753 /* 17754 * If we can't dispatch the task we'll just 17755 * live without descriptor sense. We can 17756 * try again on the next "unit attention" 17757 */ 17758 SD_ERROR(SD_LOG_ERROR, un, 17759 "sd_sense_key_unit_attention: " 17760 "Could not dispatch " 17761 "sd_reenable_dsense_task\n"); 17762 } 17763 } 17764 #endif /* _LP64 */ 17765 /* FALLTHRU */ 17766 17767 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17768 if (!un->un_f_has_removable_media) { 17769 break; 17770 } 17771 17772 /* 17773 * When we get a unit attention from a removable-media device, 17774 * it may be in a state that will take a long time to recover 17775 * (e.g., from a reset). Since we are executing in interrupt 17776 * context here, we cannot wait around for the device to come 17777 * back. So hand this command off to sd_media_change_task() 17778 * for deferred processing under taskq thread context. (Note 17779 * that the command still may be failed if a problem is 17780 * encountered at a later time.) 17781 */ 17782 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17783 KM_NOSLEEP) == 0) { 17784 /* 17785 * Cannot dispatch the request so fail the command. 17786 */ 17787 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17788 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17789 si.ssi_severity = SCSI_ERR_FATAL; 17790 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17791 sd_return_failed_command(un, bp, EIO); 17792 } 17793 17794 /* 17795 * If failed to dispatch sd_media_change_task(), we already 17796 * updated kstat. If succeed to dispatch sd_media_change_task(), 17797 * we should update kstat later if it encounters an error. So, 17798 * we update kstat_updated flag here. 17799 */ 17800 kstat_updated = B_TRUE; 17801 17802 /* 17803 * Either the command has been successfully dispatched to a 17804 * task Q for retrying, or the dispatch failed. In either case 17805 * do NOT retry again by calling sd_retry_command. This sets up 17806 * two retries of the same command and when one completes and 17807 * frees the resources the other will access freed memory, 17808 * a bad thing. 17809 */ 17810 return; 17811 17812 default: 17813 break; 17814 } 17815 17816 /* 17817 * ASC ASCQ 17818 * 2A 09 Capacity data has changed 17819 * 2A 01 Mode parameters changed 17820 * 3F 0E Reported luns data has changed 17821 * Arrays that support logical unit expansion should report 17822 * capacity changes(2Ah/09). Mode parameters changed and 17823 * reported luns data has changed are the approximation. 17824 */ 17825 if (((asc == 0x2a) && (ascq == 0x09)) || 17826 ((asc == 0x2a) && (ascq == 0x01)) || 17827 ((asc == 0x3f) && (ascq == 0x0e))) { 17828 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 17829 KM_NOSLEEP) == 0) { 17830 SD_ERROR(SD_LOG_ERROR, un, 17831 "sd_sense_key_unit_attention: " 17832 "Could not dispatch sd_target_change_task\n"); 17833 } 17834 } 17835 17836 /* 17837 * Update kstat if we haven't done that. 17838 */ 17839 if (!kstat_updated) { 17840 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17841 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17842 } 17843 17844 do_retry: 17845 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17846 EIO, SD_UA_RETRY_DELAY, NULL); 17847 } 17848 17849 17850 17851 /* 17852 * Function: sd_sense_key_fail_command 17853 * 17854 * Description: Use to fail a command when we don't like the sense key that 17855 * was returned. 17856 * 17857 * Context: May be called from interrupt context 17858 */ 17859 17860 static void 17861 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17862 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17863 { 17864 struct sd_sense_info si; 17865 17866 ASSERT(un != NULL); 17867 ASSERT(mutex_owned(SD_MUTEX(un))); 17868 ASSERT(bp != NULL); 17869 ASSERT(xp != NULL); 17870 ASSERT(pktp != NULL); 17871 17872 si.ssi_severity = SCSI_ERR_FATAL; 17873 si.ssi_pfa_flag = FALSE; 17874 17875 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17876 sd_return_failed_command(un, bp, EIO); 17877 } 17878 17879 17880 17881 /* 17882 * Function: sd_sense_key_blank_check 17883 * 17884 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17885 * Has no monetary connotation. 17886 * 17887 * Context: May be called from interrupt context 17888 */ 17889 17890 static void 17891 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17892 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17893 { 17894 struct sd_sense_info si; 17895 17896 ASSERT(un != NULL); 17897 ASSERT(mutex_owned(SD_MUTEX(un))); 17898 ASSERT(bp != NULL); 17899 ASSERT(xp != NULL); 17900 ASSERT(pktp != NULL); 17901 17902 /* 17903 * Blank check is not fatal for removable devices, therefore 17904 * it does not require a console message. 17905 */ 17906 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 17907 SCSI_ERR_FATAL; 17908 si.ssi_pfa_flag = FALSE; 17909 17910 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17911 sd_return_failed_command(un, bp, EIO); 17912 } 17913 17914 17915 17916 17917 /* 17918 * Function: sd_sense_key_aborted_command 17919 * 17920 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17921 * 17922 * Context: May be called from interrupt context 17923 */ 17924 17925 static void 17926 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17927 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17928 { 17929 struct sd_sense_info si; 17930 17931 ASSERT(un != NULL); 17932 ASSERT(mutex_owned(SD_MUTEX(un))); 17933 ASSERT(bp != NULL); 17934 ASSERT(xp != NULL); 17935 ASSERT(pktp != NULL); 17936 17937 si.ssi_severity = SCSI_ERR_FATAL; 17938 si.ssi_pfa_flag = FALSE; 17939 17940 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17941 17942 /* 17943 * This really ought to be a fatal error, but we will retry anyway 17944 * as some drives report this as a spurious error. 17945 */ 17946 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17947 &si, EIO, drv_usectohz(100000), NULL); 17948 } 17949 17950 17951 17952 /* 17953 * Function: sd_sense_key_default 17954 * 17955 * Description: Default recovery action for several SCSI sense keys (basically 17956 * attempts a retry). 17957 * 17958 * Context: May be called from interrupt context 17959 */ 17960 17961 static void 17962 sd_sense_key_default(struct sd_lun *un, 17963 uint8_t *sense_datap, 17964 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17965 { 17966 struct sd_sense_info si; 17967 uint8_t sense_key = scsi_sense_key(sense_datap); 17968 17969 ASSERT(un != NULL); 17970 ASSERT(mutex_owned(SD_MUTEX(un))); 17971 ASSERT(bp != NULL); 17972 ASSERT(xp != NULL); 17973 ASSERT(pktp != NULL); 17974 17975 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17976 17977 /* 17978 * Undecoded sense key. Attempt retries and hope that will fix 17979 * the problem. Otherwise, we're dead. 17980 */ 17981 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17982 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17983 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17984 } 17985 17986 si.ssi_severity = SCSI_ERR_FATAL; 17987 si.ssi_pfa_flag = FALSE; 17988 17989 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17990 &si, EIO, (clock_t)0, NULL); 17991 } 17992 17993 17994 17995 /* 17996 * Function: sd_print_retry_msg 17997 * 17998 * Description: Print a message indicating the retry action being taken. 17999 * 18000 * Arguments: un - ptr to associated softstate 18001 * bp - ptr to buf(9S) for the command 18002 * arg - not used. 18003 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18004 * or SD_NO_RETRY_ISSUED 18005 * 18006 * Context: May be called from interrupt context 18007 */ 18008 /* ARGSUSED */ 18009 static void 18010 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18011 { 18012 struct sd_xbuf *xp; 18013 struct scsi_pkt *pktp; 18014 char *reasonp; 18015 char *msgp; 18016 18017 ASSERT(un != NULL); 18018 ASSERT(mutex_owned(SD_MUTEX(un))); 18019 ASSERT(bp != NULL); 18020 pktp = SD_GET_PKTP(bp); 18021 ASSERT(pktp != NULL); 18022 xp = SD_GET_XBUF(bp); 18023 ASSERT(xp != NULL); 18024 18025 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18026 mutex_enter(&un->un_pm_mutex); 18027 if ((un->un_state == SD_STATE_SUSPENDED) || 18028 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18029 (pktp->pkt_flags & FLAG_SILENT)) { 18030 mutex_exit(&un->un_pm_mutex); 18031 goto update_pkt_reason; 18032 } 18033 mutex_exit(&un->un_pm_mutex); 18034 18035 /* 18036 * Suppress messages if they are all the same pkt_reason; with 18037 * TQ, many (up to 256) are returned with the same pkt_reason. 18038 * If we are in panic, then suppress the retry messages. 18039 */ 18040 switch (flag) { 18041 case SD_NO_RETRY_ISSUED: 18042 msgp = "giving up"; 18043 break; 18044 case SD_IMMEDIATE_RETRY_ISSUED: 18045 case SD_DELAYED_RETRY_ISSUED: 18046 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18047 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18048 (sd_error_level != SCSI_ERR_ALL))) { 18049 return; 18050 } 18051 msgp = "retrying command"; 18052 break; 18053 default: 18054 goto update_pkt_reason; 18055 } 18056 18057 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18058 scsi_rname(pktp->pkt_reason)); 18059 18060 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18061 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18062 18063 update_pkt_reason: 18064 /* 18065 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18066 * This is to prevent multiple console messages for the same failure 18067 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18068 * when the command is retried successfully because there still may be 18069 * more commands coming back with the same value of pktp->pkt_reason. 18070 */ 18071 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18072 un->un_last_pkt_reason = pktp->pkt_reason; 18073 } 18074 } 18075 18076 18077 /* 18078 * Function: sd_print_cmd_incomplete_msg 18079 * 18080 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18081 * 18082 * Arguments: un - ptr to associated softstate 18083 * bp - ptr to buf(9S) for the command 18084 * arg - passed to sd_print_retry_msg() 18085 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18086 * or SD_NO_RETRY_ISSUED 18087 * 18088 * Context: May be called from interrupt context 18089 */ 18090 18091 static void 18092 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18093 int code) 18094 { 18095 dev_info_t *dip; 18096 18097 ASSERT(un != NULL); 18098 ASSERT(mutex_owned(SD_MUTEX(un))); 18099 ASSERT(bp != NULL); 18100 18101 switch (code) { 18102 case SD_NO_RETRY_ISSUED: 18103 /* Command was failed. Someone turned off this target? */ 18104 if (un->un_state != SD_STATE_OFFLINE) { 18105 /* 18106 * Suppress message if we are detaching and 18107 * device has been disconnected 18108 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18109 * private interface and not part of the DDI 18110 */ 18111 dip = un->un_sd->sd_dev; 18112 if (!(DEVI_IS_DETACHING(dip) && 18113 DEVI_IS_DEVICE_REMOVED(dip))) { 18114 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18115 "disk not responding to selection\n"); 18116 } 18117 New_state(un, SD_STATE_OFFLINE); 18118 } 18119 break; 18120 18121 case SD_DELAYED_RETRY_ISSUED: 18122 case SD_IMMEDIATE_RETRY_ISSUED: 18123 default: 18124 /* Command was successfully queued for retry */ 18125 sd_print_retry_msg(un, bp, arg, code); 18126 break; 18127 } 18128 } 18129 18130 18131 /* 18132 * Function: sd_pkt_reason_cmd_incomplete 18133 * 18134 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18135 * 18136 * Context: May be called from interrupt context 18137 */ 18138 18139 static void 18140 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18141 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18142 { 18143 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18144 18145 ASSERT(un != NULL); 18146 ASSERT(mutex_owned(SD_MUTEX(un))); 18147 ASSERT(bp != NULL); 18148 ASSERT(xp != NULL); 18149 ASSERT(pktp != NULL); 18150 18151 /* Do not do a reset if selection did not complete */ 18152 /* Note: Should this not just check the bit? */ 18153 if (pktp->pkt_state != STATE_GOT_BUS) { 18154 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18155 sd_reset_target(un, pktp); 18156 } 18157 18158 /* 18159 * If the target was not successfully selected, then set 18160 * SD_RETRIES_FAILFAST to indicate that we lost communication 18161 * with the target, and further retries and/or commands are 18162 * likely to take a long time. 18163 */ 18164 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18165 flag |= SD_RETRIES_FAILFAST; 18166 } 18167 18168 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18169 18170 sd_retry_command(un, bp, flag, 18171 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18172 } 18173 18174 18175 18176 /* 18177 * Function: sd_pkt_reason_cmd_tran_err 18178 * 18179 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18180 * 18181 * Context: May be called from interrupt context 18182 */ 18183 18184 static void 18185 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18186 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18187 { 18188 ASSERT(un != NULL); 18189 ASSERT(mutex_owned(SD_MUTEX(un))); 18190 ASSERT(bp != NULL); 18191 ASSERT(xp != NULL); 18192 ASSERT(pktp != NULL); 18193 18194 /* 18195 * Do not reset if we got a parity error, or if 18196 * selection did not complete. 18197 */ 18198 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18199 /* Note: Should this not just check the bit for pkt_state? */ 18200 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18201 (pktp->pkt_state != STATE_GOT_BUS)) { 18202 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18203 sd_reset_target(un, pktp); 18204 } 18205 18206 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18207 18208 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18209 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18210 } 18211 18212 18213 18214 /* 18215 * Function: sd_pkt_reason_cmd_reset 18216 * 18217 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18218 * 18219 * Context: May be called from interrupt context 18220 */ 18221 18222 static void 18223 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18224 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18225 { 18226 ASSERT(un != NULL); 18227 ASSERT(mutex_owned(SD_MUTEX(un))); 18228 ASSERT(bp != NULL); 18229 ASSERT(xp != NULL); 18230 ASSERT(pktp != NULL); 18231 18232 /* The target may still be running the command, so try to reset. */ 18233 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18234 sd_reset_target(un, pktp); 18235 18236 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18237 18238 /* 18239 * If pkt_reason is CMD_RESET chances are that this pkt got 18240 * reset because another target on this bus caused it. The target 18241 * that caused it should get CMD_TIMEOUT with pkt_statistics 18242 * of STAT_TIMEOUT/STAT_DEV_RESET. 18243 */ 18244 18245 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18246 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18247 } 18248 18249 18250 18251 18252 /* 18253 * Function: sd_pkt_reason_cmd_aborted 18254 * 18255 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18256 * 18257 * Context: May be called from interrupt context 18258 */ 18259 18260 static void 18261 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18262 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18263 { 18264 ASSERT(un != NULL); 18265 ASSERT(mutex_owned(SD_MUTEX(un))); 18266 ASSERT(bp != NULL); 18267 ASSERT(xp != NULL); 18268 ASSERT(pktp != NULL); 18269 18270 /* The target may still be running the command, so try to reset. */ 18271 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18272 sd_reset_target(un, pktp); 18273 18274 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18275 18276 /* 18277 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18278 * aborted because another target on this bus caused it. The target 18279 * that caused it should get CMD_TIMEOUT with pkt_statistics 18280 * of STAT_TIMEOUT/STAT_DEV_RESET. 18281 */ 18282 18283 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18284 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18285 } 18286 18287 18288 18289 /* 18290 * Function: sd_pkt_reason_cmd_timeout 18291 * 18292 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18293 * 18294 * Context: May be called from interrupt context 18295 */ 18296 18297 static void 18298 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18299 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18300 { 18301 ASSERT(un != NULL); 18302 ASSERT(mutex_owned(SD_MUTEX(un))); 18303 ASSERT(bp != NULL); 18304 ASSERT(xp != NULL); 18305 ASSERT(pktp != NULL); 18306 18307 18308 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18309 sd_reset_target(un, pktp); 18310 18311 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18312 18313 /* 18314 * A command timeout indicates that we could not establish 18315 * communication with the target, so set SD_RETRIES_FAILFAST 18316 * as further retries/commands are likely to take a long time. 18317 */ 18318 sd_retry_command(un, bp, 18319 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18320 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18321 } 18322 18323 18324 18325 /* 18326 * Function: sd_pkt_reason_cmd_unx_bus_free 18327 * 18328 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18329 * 18330 * Context: May be called from interrupt context 18331 */ 18332 18333 static void 18334 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18335 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18336 { 18337 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18338 18339 ASSERT(un != NULL); 18340 ASSERT(mutex_owned(SD_MUTEX(un))); 18341 ASSERT(bp != NULL); 18342 ASSERT(xp != NULL); 18343 ASSERT(pktp != NULL); 18344 18345 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18346 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18347 18348 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18349 sd_print_retry_msg : NULL; 18350 18351 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18352 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18353 } 18354 18355 18356 /* 18357 * Function: sd_pkt_reason_cmd_tag_reject 18358 * 18359 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18360 * 18361 * Context: May be called from interrupt context 18362 */ 18363 18364 static void 18365 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18366 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18367 { 18368 ASSERT(un != NULL); 18369 ASSERT(mutex_owned(SD_MUTEX(un))); 18370 ASSERT(bp != NULL); 18371 ASSERT(xp != NULL); 18372 ASSERT(pktp != NULL); 18373 18374 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18375 pktp->pkt_flags = 0; 18376 un->un_tagflags = 0; 18377 if (un->un_f_opt_queueing == TRUE) { 18378 un->un_throttle = min(un->un_throttle, 3); 18379 } else { 18380 un->un_throttle = 1; 18381 } 18382 mutex_exit(SD_MUTEX(un)); 18383 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18384 mutex_enter(SD_MUTEX(un)); 18385 18386 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18387 18388 /* Legacy behavior not to check retry counts here. */ 18389 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18390 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18391 } 18392 18393 18394 /* 18395 * Function: sd_pkt_reason_default 18396 * 18397 * Description: Default recovery actions for SCSA pkt_reason values that 18398 * do not have more explicit recovery actions. 18399 * 18400 * Context: May be called from interrupt context 18401 */ 18402 18403 static void 18404 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18405 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18406 { 18407 ASSERT(un != NULL); 18408 ASSERT(mutex_owned(SD_MUTEX(un))); 18409 ASSERT(bp != NULL); 18410 ASSERT(xp != NULL); 18411 ASSERT(pktp != NULL); 18412 18413 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18414 sd_reset_target(un, pktp); 18415 18416 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18417 18418 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18419 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18420 } 18421 18422 18423 18424 /* 18425 * Function: sd_pkt_status_check_condition 18426 * 18427 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18428 * 18429 * Context: May be called from interrupt context 18430 */ 18431 18432 static void 18433 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18434 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18435 { 18436 ASSERT(un != NULL); 18437 ASSERT(mutex_owned(SD_MUTEX(un))); 18438 ASSERT(bp != NULL); 18439 ASSERT(xp != NULL); 18440 ASSERT(pktp != NULL); 18441 18442 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18443 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18444 18445 /* 18446 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18447 * command will be retried after the request sense). Otherwise, retry 18448 * the command. Note: we are issuing the request sense even though the 18449 * retry limit may have been reached for the failed command. 18450 */ 18451 if (un->un_f_arq_enabled == FALSE) { 18452 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18453 "no ARQ, sending request sense command\n"); 18454 sd_send_request_sense_command(un, bp, pktp); 18455 } else { 18456 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18457 "ARQ,retrying request sense command\n"); 18458 #if defined(__i386) || defined(__amd64) 18459 /* 18460 * The SD_RETRY_DELAY value need to be adjusted here 18461 * when SD_RETRY_DELAY change in sddef.h 18462 */ 18463 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18464 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18465 NULL); 18466 #else 18467 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18468 EIO, SD_RETRY_DELAY, NULL); 18469 #endif 18470 } 18471 18472 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18473 } 18474 18475 18476 /* 18477 * Function: sd_pkt_status_busy 18478 * 18479 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18480 * 18481 * Context: May be called from interrupt context 18482 */ 18483 18484 static void 18485 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18486 struct scsi_pkt *pktp) 18487 { 18488 ASSERT(un != NULL); 18489 ASSERT(mutex_owned(SD_MUTEX(un))); 18490 ASSERT(bp != NULL); 18491 ASSERT(xp != NULL); 18492 ASSERT(pktp != NULL); 18493 18494 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18495 "sd_pkt_status_busy: entry\n"); 18496 18497 /* If retries are exhausted, just fail the command. */ 18498 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18499 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18500 "device busy too long\n"); 18501 sd_return_failed_command(un, bp, EIO); 18502 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18503 "sd_pkt_status_busy: exit\n"); 18504 return; 18505 } 18506 xp->xb_retry_count++; 18507 18508 /* 18509 * Try to reset the target. However, we do not want to perform 18510 * more than one reset if the device continues to fail. The reset 18511 * will be performed when the retry count reaches the reset 18512 * threshold. This threshold should be set such that at least 18513 * one retry is issued before the reset is performed. 18514 */ 18515 if (xp->xb_retry_count == 18516 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18517 int rval = 0; 18518 mutex_exit(SD_MUTEX(un)); 18519 if (un->un_f_allow_bus_device_reset == TRUE) { 18520 /* 18521 * First try to reset the LUN; if we cannot then 18522 * try to reset the target. 18523 */ 18524 if (un->un_f_lun_reset_enabled == TRUE) { 18525 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18526 "sd_pkt_status_busy: RESET_LUN\n"); 18527 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18528 } 18529 if (rval == 0) { 18530 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18531 "sd_pkt_status_busy: RESET_TARGET\n"); 18532 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18533 } 18534 } 18535 if (rval == 0) { 18536 /* 18537 * If the RESET_LUN and/or RESET_TARGET failed, 18538 * try RESET_ALL 18539 */ 18540 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18541 "sd_pkt_status_busy: RESET_ALL\n"); 18542 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18543 } 18544 mutex_enter(SD_MUTEX(un)); 18545 if (rval == 0) { 18546 /* 18547 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18548 * At this point we give up & fail the command. 18549 */ 18550 sd_return_failed_command(un, bp, EIO); 18551 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18552 "sd_pkt_status_busy: exit (failed cmd)\n"); 18553 return; 18554 } 18555 } 18556 18557 /* 18558 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18559 * we have already checked the retry counts above. 18560 */ 18561 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18562 EIO, un->un_busy_timeout, NULL); 18563 18564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18565 "sd_pkt_status_busy: exit\n"); 18566 } 18567 18568 18569 /* 18570 * Function: sd_pkt_status_reservation_conflict 18571 * 18572 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18573 * command status. 18574 * 18575 * Context: May be called from interrupt context 18576 */ 18577 18578 static void 18579 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18580 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18581 { 18582 ASSERT(un != NULL); 18583 ASSERT(mutex_owned(SD_MUTEX(un))); 18584 ASSERT(bp != NULL); 18585 ASSERT(xp != NULL); 18586 ASSERT(pktp != NULL); 18587 18588 /* 18589 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18590 * conflict could be due to various reasons like incorrect keys, not 18591 * registered or not reserved etc. So, we return EACCES to the caller. 18592 */ 18593 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18594 int cmd = SD_GET_PKT_OPCODE(pktp); 18595 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18596 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18597 sd_return_failed_command(un, bp, EACCES); 18598 return; 18599 } 18600 } 18601 18602 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18603 18604 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18605 if (sd_failfast_enable != 0) { 18606 /* By definition, we must panic here.... */ 18607 sd_panic_for_res_conflict(un); 18608 /*NOTREACHED*/ 18609 } 18610 SD_ERROR(SD_LOG_IO, un, 18611 "sd_handle_resv_conflict: Disk Reserved\n"); 18612 sd_return_failed_command(un, bp, EACCES); 18613 return; 18614 } 18615 18616 /* 18617 * 1147670: retry only if sd_retry_on_reservation_conflict 18618 * property is set (default is 1). Retries will not succeed 18619 * on a disk reserved by another initiator. HA systems 18620 * may reset this via sd.conf to avoid these retries. 18621 * 18622 * Note: The legacy return code for this failure is EIO, however EACCES 18623 * seems more appropriate for a reservation conflict. 18624 */ 18625 if (sd_retry_on_reservation_conflict == 0) { 18626 SD_ERROR(SD_LOG_IO, un, 18627 "sd_handle_resv_conflict: Device Reserved\n"); 18628 sd_return_failed_command(un, bp, EIO); 18629 return; 18630 } 18631 18632 /* 18633 * Retry the command if we can. 18634 * 18635 * Note: The legacy return code for this failure is EIO, however EACCES 18636 * seems more appropriate for a reservation conflict. 18637 */ 18638 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18639 (clock_t)2, NULL); 18640 } 18641 18642 18643 18644 /* 18645 * Function: sd_pkt_status_qfull 18646 * 18647 * Description: Handle a QUEUE FULL condition from the target. This can 18648 * occur if the HBA does not handle the queue full condition. 18649 * (Basically this means third-party HBAs as Sun HBAs will 18650 * handle the queue full condition.) Note that if there are 18651 * some commands already in the transport, then the queue full 18652 * has occurred because the queue for this nexus is actually 18653 * full. If there are no commands in the transport, then the 18654 * queue full is resulting from some other initiator or lun 18655 * consuming all the resources at the target. 18656 * 18657 * Context: May be called from interrupt context 18658 */ 18659 18660 static void 18661 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18662 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18663 { 18664 ASSERT(un != NULL); 18665 ASSERT(mutex_owned(SD_MUTEX(un))); 18666 ASSERT(bp != NULL); 18667 ASSERT(xp != NULL); 18668 ASSERT(pktp != NULL); 18669 18670 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18671 "sd_pkt_status_qfull: entry\n"); 18672 18673 /* 18674 * Just lower the QFULL throttle and retry the command. Note that 18675 * we do not limit the number of retries here. 18676 */ 18677 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18678 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18679 SD_RESTART_TIMEOUT, NULL); 18680 18681 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18682 "sd_pkt_status_qfull: exit\n"); 18683 } 18684 18685 18686 /* 18687 * Function: sd_reset_target 18688 * 18689 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18690 * RESET_TARGET, or RESET_ALL. 18691 * 18692 * Context: May be called under interrupt context. 18693 */ 18694 18695 static void 18696 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18697 { 18698 int rval = 0; 18699 18700 ASSERT(un != NULL); 18701 ASSERT(mutex_owned(SD_MUTEX(un))); 18702 ASSERT(pktp != NULL); 18703 18704 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18705 18706 /* 18707 * No need to reset if the transport layer has already done so. 18708 */ 18709 if ((pktp->pkt_statistics & 18710 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18711 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18712 "sd_reset_target: no reset\n"); 18713 return; 18714 } 18715 18716 mutex_exit(SD_MUTEX(un)); 18717 18718 if (un->un_f_allow_bus_device_reset == TRUE) { 18719 if (un->un_f_lun_reset_enabled == TRUE) { 18720 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18721 "sd_reset_target: RESET_LUN\n"); 18722 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18723 } 18724 if (rval == 0) { 18725 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18726 "sd_reset_target: RESET_TARGET\n"); 18727 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18728 } 18729 } 18730 18731 if (rval == 0) { 18732 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18733 "sd_reset_target: RESET_ALL\n"); 18734 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18735 } 18736 18737 mutex_enter(SD_MUTEX(un)); 18738 18739 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18740 } 18741 18742 /* 18743 * Function: sd_target_change_task 18744 * 18745 * Description: Handle dynamic target change 18746 * 18747 * Context: Executes in a taskq() thread context 18748 */ 18749 static void 18750 sd_target_change_task(void *arg) 18751 { 18752 struct sd_lun *un = arg; 18753 uint64_t capacity; 18754 diskaddr_t label_cap; 18755 uint_t lbasize; 18756 sd_ssc_t *ssc; 18757 18758 ASSERT(un != NULL); 18759 ASSERT(!mutex_owned(SD_MUTEX(un))); 18760 18761 if ((un->un_f_blockcount_is_valid == FALSE) || 18762 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18763 return; 18764 } 18765 18766 ssc = sd_ssc_init(un); 18767 18768 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18769 &lbasize, SD_PATH_DIRECT) != 0) { 18770 SD_ERROR(SD_LOG_ERROR, un, 18771 "sd_target_change_task: fail to read capacity\n"); 18772 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18773 goto task_exit; 18774 } 18775 18776 mutex_enter(SD_MUTEX(un)); 18777 if (capacity <= un->un_blockcount) { 18778 mutex_exit(SD_MUTEX(un)); 18779 goto task_exit; 18780 } 18781 18782 sd_update_block_info(un, lbasize, capacity); 18783 mutex_exit(SD_MUTEX(un)); 18784 18785 /* 18786 * If lun is EFI labeled and lun capacity is greater than the 18787 * capacity contained in the label, log a sys event. 18788 */ 18789 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18790 (void*)SD_PATH_DIRECT) == 0) { 18791 mutex_enter(SD_MUTEX(un)); 18792 if (un->un_f_blockcount_is_valid && 18793 un->un_blockcount > label_cap) { 18794 mutex_exit(SD_MUTEX(un)); 18795 sd_log_lun_expansion_event(un, KM_SLEEP); 18796 } else { 18797 mutex_exit(SD_MUTEX(un)); 18798 } 18799 } 18800 18801 task_exit: 18802 sd_ssc_fini(ssc); 18803 } 18804 18805 /* 18806 * Function: sd_log_lun_expansion_event 18807 * 18808 * Description: Log lun expansion sys event 18809 * 18810 * Context: Never called from interrupt context 18811 */ 18812 static void 18813 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18814 { 18815 int err; 18816 char *path; 18817 nvlist_t *dle_attr_list; 18818 18819 /* Allocate and build sysevent attribute list */ 18820 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 18821 if (err != 0) { 18822 SD_ERROR(SD_LOG_ERROR, un, 18823 "sd_log_lun_expansion_event: fail to allocate space\n"); 18824 return; 18825 } 18826 18827 path = kmem_alloc(MAXPATHLEN, km_flag); 18828 if (path == NULL) { 18829 nvlist_free(dle_attr_list); 18830 SD_ERROR(SD_LOG_ERROR, un, 18831 "sd_log_lun_expansion_event: fail to allocate space\n"); 18832 return; 18833 } 18834 /* 18835 * Add path attribute to identify the lun. 18836 * We are using minor node 'a' as the sysevent attribute. 18837 */ 18838 (void) snprintf(path, MAXPATHLEN, "/devices"); 18839 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 18840 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 18841 ":a"); 18842 18843 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 18844 if (err != 0) { 18845 nvlist_free(dle_attr_list); 18846 kmem_free(path, MAXPATHLEN); 18847 SD_ERROR(SD_LOG_ERROR, un, 18848 "sd_log_lun_expansion_event: fail to add attribute\n"); 18849 return; 18850 } 18851 18852 /* Log dynamic lun expansion sysevent */ 18853 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 18854 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 18855 if (err != DDI_SUCCESS) { 18856 SD_ERROR(SD_LOG_ERROR, un, 18857 "sd_log_lun_expansion_event: fail to log sysevent\n"); 18858 } 18859 18860 nvlist_free(dle_attr_list); 18861 kmem_free(path, MAXPATHLEN); 18862 } 18863 18864 /* 18865 * Function: sd_media_change_task 18866 * 18867 * Description: Recovery action for CDROM to become available. 18868 * 18869 * Context: Executes in a taskq() thread context 18870 */ 18871 18872 static void 18873 sd_media_change_task(void *arg) 18874 { 18875 struct scsi_pkt *pktp = arg; 18876 struct sd_lun *un; 18877 struct buf *bp; 18878 struct sd_xbuf *xp; 18879 int err = 0; 18880 int retry_count = 0; 18881 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18882 struct sd_sense_info si; 18883 18884 ASSERT(pktp != NULL); 18885 bp = (struct buf *)pktp->pkt_private; 18886 ASSERT(bp != NULL); 18887 xp = SD_GET_XBUF(bp); 18888 ASSERT(xp != NULL); 18889 un = SD_GET_UN(bp); 18890 ASSERT(un != NULL); 18891 ASSERT(!mutex_owned(SD_MUTEX(un))); 18892 ASSERT(un->un_f_monitor_media_state); 18893 18894 si.ssi_severity = SCSI_ERR_INFO; 18895 si.ssi_pfa_flag = FALSE; 18896 18897 /* 18898 * When a reset is issued on a CDROM, it takes a long time to 18899 * recover. First few attempts to read capacity and other things 18900 * related to handling unit attention fail (with a ASC 0x4 and 18901 * ASCQ 0x1). In that case we want to do enough retries and we want 18902 * to limit the retries in other cases of genuine failures like 18903 * no media in drive. 18904 */ 18905 while (retry_count++ < retry_limit) { 18906 if ((err = sd_handle_mchange(un)) == 0) { 18907 break; 18908 } 18909 if (err == EAGAIN) { 18910 retry_limit = SD_UNIT_ATTENTION_RETRY; 18911 } 18912 /* Sleep for 0.5 sec. & try again */ 18913 delay(drv_usectohz(500000)); 18914 } 18915 18916 /* 18917 * Dispatch (retry or fail) the original command here, 18918 * along with appropriate console messages.... 18919 * 18920 * Must grab the mutex before calling sd_retry_command, 18921 * sd_print_sense_msg and sd_return_failed_command. 18922 */ 18923 mutex_enter(SD_MUTEX(un)); 18924 if (err != SD_CMD_SUCCESS) { 18925 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18926 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18927 si.ssi_severity = SCSI_ERR_FATAL; 18928 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18929 sd_return_failed_command(un, bp, EIO); 18930 } else { 18931 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18932 &si, EIO, (clock_t)0, NULL); 18933 } 18934 mutex_exit(SD_MUTEX(un)); 18935 } 18936 18937 18938 18939 /* 18940 * Function: sd_handle_mchange 18941 * 18942 * Description: Perform geometry validation & other recovery when CDROM 18943 * has been removed from drive. 18944 * 18945 * Return Code: 0 for success 18946 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18947 * sd_send_scsi_READ_CAPACITY() 18948 * 18949 * Context: Executes in a taskq() thread context 18950 */ 18951 18952 static int 18953 sd_handle_mchange(struct sd_lun *un) 18954 { 18955 uint64_t capacity; 18956 uint32_t lbasize; 18957 int rval; 18958 sd_ssc_t *ssc; 18959 18960 ASSERT(!mutex_owned(SD_MUTEX(un))); 18961 ASSERT(un->un_f_monitor_media_state); 18962 18963 ssc = sd_ssc_init(un); 18964 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 18965 SD_PATH_DIRECT_PRIORITY); 18966 18967 if (rval != 0) 18968 goto failed; 18969 18970 mutex_enter(SD_MUTEX(un)); 18971 sd_update_block_info(un, lbasize, capacity); 18972 18973 if (un->un_errstats != NULL) { 18974 struct sd_errstats *stp = 18975 (struct sd_errstats *)un->un_errstats->ks_data; 18976 stp->sd_capacity.value.ui64 = (uint64_t) 18977 ((uint64_t)un->un_blockcount * 18978 (uint64_t)un->un_tgt_blocksize); 18979 } 18980 18981 /* 18982 * Check if the media in the device is writable or not 18983 */ 18984 if (ISCD(un)) { 18985 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 18986 } 18987 18988 /* 18989 * Note: Maybe let the strategy/partitioning chain worry about getting 18990 * valid geometry. 18991 */ 18992 mutex_exit(SD_MUTEX(un)); 18993 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 18994 18995 18996 if (cmlb_validate(un->un_cmlbhandle, 0, 18997 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 18998 sd_ssc_fini(ssc); 18999 return (EIO); 19000 } else { 19001 if (un->un_f_pkstats_enabled) { 19002 sd_set_pstats(un); 19003 SD_TRACE(SD_LOG_IO_PARTITION, un, 19004 "sd_handle_mchange: un:0x%p pstats created and " 19005 "set\n", un); 19006 } 19007 } 19008 19009 /* 19010 * Try to lock the door 19011 */ 19012 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19013 SD_PATH_DIRECT_PRIORITY); 19014 failed: 19015 if (rval != 0) 19016 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19017 sd_ssc_fini(ssc); 19018 return (rval); 19019 } 19020 19021 19022 /* 19023 * Function: sd_send_scsi_DOORLOCK 19024 * 19025 * Description: Issue the scsi DOOR LOCK command 19026 * 19027 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19028 * structure for this target. 19029 * flag - SD_REMOVAL_ALLOW 19030 * SD_REMOVAL_PREVENT 19031 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19032 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19033 * to use the USCSI "direct" chain and bypass the normal 19034 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19035 * command is issued as part of an error recovery action. 19036 * 19037 * Return Code: 0 - Success 19038 * errno return code from sd_ssc_send() 19039 * 19040 * Context: Can sleep. 19041 */ 19042 19043 static int 19044 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19045 { 19046 struct scsi_extended_sense sense_buf; 19047 union scsi_cdb cdb; 19048 struct uscsi_cmd ucmd_buf; 19049 int status; 19050 struct sd_lun *un; 19051 19052 ASSERT(ssc != NULL); 19053 un = ssc->ssc_un; 19054 ASSERT(un != NULL); 19055 ASSERT(!mutex_owned(SD_MUTEX(un))); 19056 19057 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19058 19059 /* already determined doorlock is not supported, fake success */ 19060 if (un->un_f_doorlock_supported == FALSE) { 19061 return (0); 19062 } 19063 19064 /* 19065 * If we are ejecting and see an SD_REMOVAL_PREVENT 19066 * ignore the command so we can complete the eject 19067 * operation. 19068 */ 19069 if (flag == SD_REMOVAL_PREVENT) { 19070 mutex_enter(SD_MUTEX(un)); 19071 if (un->un_f_ejecting == TRUE) { 19072 mutex_exit(SD_MUTEX(un)); 19073 return (EAGAIN); 19074 } 19075 mutex_exit(SD_MUTEX(un)); 19076 } 19077 19078 bzero(&cdb, sizeof (cdb)); 19079 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19080 19081 cdb.scc_cmd = SCMD_DOORLOCK; 19082 cdb.cdb_opaque[4] = (uchar_t)flag; 19083 19084 ucmd_buf.uscsi_cdb = (char *)&cdb; 19085 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19086 ucmd_buf.uscsi_bufaddr = NULL; 19087 ucmd_buf.uscsi_buflen = 0; 19088 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19089 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19090 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19091 ucmd_buf.uscsi_timeout = 15; 19092 19093 SD_TRACE(SD_LOG_IO, un, 19094 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19095 19096 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19097 UIO_SYSSPACE, path_flag); 19098 19099 if (status == 0) 19100 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19101 19102 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19103 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19104 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19105 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19106 19107 /* fake success and skip subsequent doorlock commands */ 19108 un->un_f_doorlock_supported = FALSE; 19109 return (0); 19110 } 19111 19112 return (status); 19113 } 19114 19115 /* 19116 * Function: sd_send_scsi_READ_CAPACITY 19117 * 19118 * Description: This routine uses the scsi READ CAPACITY command to determine 19119 * the device capacity in number of blocks and the device native 19120 * block size. If this function returns a failure, then the 19121 * values in *capp and *lbap are undefined. If the capacity 19122 * returned is 0xffffffff then the lun is too large for a 19123 * normal READ CAPACITY command and the results of a 19124 * READ CAPACITY 16 will be used instead. 19125 * 19126 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19127 * capp - ptr to unsigned 64-bit variable to receive the 19128 * capacity value from the command. 19129 * lbap - ptr to unsigned 32-bit varaible to receive the 19130 * block size value from the command 19131 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19132 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19133 * to use the USCSI "direct" chain and bypass the normal 19134 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19135 * command is issued as part of an error recovery action. 19136 * 19137 * Return Code: 0 - Success 19138 * EIO - IO error 19139 * EACCES - Reservation conflict detected 19140 * EAGAIN - Device is becoming ready 19141 * errno return code from sd_ssc_send() 19142 * 19143 * Context: Can sleep. Blocks until command completes. 19144 */ 19145 19146 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19147 19148 static int 19149 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19150 int path_flag) 19151 { 19152 struct scsi_extended_sense sense_buf; 19153 struct uscsi_cmd ucmd_buf; 19154 union scsi_cdb cdb; 19155 uint32_t *capacity_buf; 19156 uint64_t capacity; 19157 uint32_t lbasize; 19158 int status; 19159 struct sd_lun *un; 19160 19161 ASSERT(ssc != NULL); 19162 19163 un = ssc->ssc_un; 19164 ASSERT(un != NULL); 19165 ASSERT(!mutex_owned(SD_MUTEX(un))); 19166 ASSERT(capp != NULL); 19167 ASSERT(lbap != NULL); 19168 19169 SD_TRACE(SD_LOG_IO, un, 19170 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19171 19172 /* 19173 * First send a READ_CAPACITY command to the target. 19174 * (This command is mandatory under SCSI-2.) 19175 * 19176 * Set up the CDB for the READ_CAPACITY command. The Partial 19177 * Medium Indicator bit is cleared. The address field must be 19178 * zero if the PMI bit is zero. 19179 */ 19180 bzero(&cdb, sizeof (cdb)); 19181 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19182 19183 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19184 19185 cdb.scc_cmd = SCMD_READ_CAPACITY; 19186 19187 ucmd_buf.uscsi_cdb = (char *)&cdb; 19188 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19189 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19190 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19191 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19192 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19193 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19194 ucmd_buf.uscsi_timeout = 60; 19195 19196 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19197 UIO_SYSSPACE, path_flag); 19198 19199 switch (status) { 19200 case 0: 19201 /* Return failure if we did not get valid capacity data. */ 19202 if (ucmd_buf.uscsi_resid != 0) { 19203 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19204 "sd_send_scsi_READ_CAPACITY received " 19205 "invalid capacity data"); 19206 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19207 return (EIO); 19208 } 19209 19210 /* 19211 * Read capacity and block size from the READ CAPACITY 10 data. 19212 * This data may be adjusted later due to device specific 19213 * issues. 19214 * 19215 * According to the SCSI spec, the READ CAPACITY 10 19216 * command returns the following: 19217 * 19218 * bytes 0-3: Maximum logical block address available. 19219 * (MSB in byte:0 & LSB in byte:3) 19220 * 19221 * bytes 4-7: Block length in bytes 19222 * (MSB in byte:4 & LSB in byte:7) 19223 * 19224 */ 19225 capacity = BE_32(capacity_buf[0]); 19226 lbasize = BE_32(capacity_buf[1]); 19227 19228 /* 19229 * Done with capacity_buf 19230 */ 19231 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19232 19233 /* 19234 * if the reported capacity is set to all 0xf's, then 19235 * this disk is too large and requires SBC-2 commands. 19236 * Reissue the request using READ CAPACITY 16. 19237 */ 19238 if (capacity == 0xffffffff) { 19239 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19240 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19241 &lbasize, path_flag); 19242 if (status != 0) { 19243 return (status); 19244 } 19245 } 19246 break; /* Success! */ 19247 case EIO: 19248 switch (ucmd_buf.uscsi_status) { 19249 case STATUS_RESERVATION_CONFLICT: 19250 status = EACCES; 19251 break; 19252 case STATUS_CHECK: 19253 /* 19254 * Check condition; look for ASC/ASCQ of 0x04/0x01 19255 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19256 */ 19257 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19258 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19259 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19260 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19261 return (EAGAIN); 19262 } 19263 break; 19264 default: 19265 break; 19266 } 19267 /* FALLTHRU */ 19268 default: 19269 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19270 return (status); 19271 } 19272 19273 /* 19274 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19275 * (2352 and 0 are common) so for these devices always force the value 19276 * to 2048 as required by the ATAPI specs. 19277 */ 19278 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19279 lbasize = 2048; 19280 } 19281 19282 /* 19283 * Get the maximum LBA value from the READ CAPACITY data. 19284 * Here we assume that the Partial Medium Indicator (PMI) bit 19285 * was cleared when issuing the command. This means that the LBA 19286 * returned from the device is the LBA of the last logical block 19287 * on the logical unit. The actual logical block count will be 19288 * this value plus one. 19289 * 19290 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19291 * so scale the capacity value to reflect this. 19292 */ 19293 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19294 19295 /* 19296 * Copy the values from the READ CAPACITY command into the space 19297 * provided by the caller. 19298 */ 19299 *capp = capacity; 19300 *lbap = lbasize; 19301 19302 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19303 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19304 19305 /* 19306 * Both the lbasize and capacity from the device must be nonzero, 19307 * otherwise we assume that the values are not valid and return 19308 * failure to the caller. (4203735) 19309 */ 19310 if ((capacity == 0) || (lbasize == 0)) { 19311 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19312 "sd_send_scsi_READ_CAPACITY received invalid value " 19313 "capacity %llu lbasize %d", capacity, lbasize); 19314 return (EIO); 19315 } 19316 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19317 return (0); 19318 } 19319 19320 /* 19321 * Function: sd_send_scsi_READ_CAPACITY_16 19322 * 19323 * Description: This routine uses the scsi READ CAPACITY 16 command to 19324 * determine the device capacity in number of blocks and the 19325 * device native block size. If this function returns a failure, 19326 * then the values in *capp and *lbap are undefined. 19327 * This routine should always be called by 19328 * sd_send_scsi_READ_CAPACITY which will appy any device 19329 * specific adjustments to capacity and lbasize. 19330 * 19331 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19332 * capp - ptr to unsigned 64-bit variable to receive the 19333 * capacity value from the command. 19334 * lbap - ptr to unsigned 32-bit varaible to receive the 19335 * block size value from the command 19336 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19337 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19338 * to use the USCSI "direct" chain and bypass the normal 19339 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19340 * this command is issued as part of an error recovery 19341 * action. 19342 * 19343 * Return Code: 0 - Success 19344 * EIO - IO error 19345 * EACCES - Reservation conflict detected 19346 * EAGAIN - Device is becoming ready 19347 * errno return code from sd_ssc_send() 19348 * 19349 * Context: Can sleep. Blocks until command completes. 19350 */ 19351 19352 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19353 19354 static int 19355 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19356 uint32_t *lbap, int path_flag) 19357 { 19358 struct scsi_extended_sense sense_buf; 19359 struct uscsi_cmd ucmd_buf; 19360 union scsi_cdb cdb; 19361 uint64_t *capacity16_buf; 19362 uint64_t capacity; 19363 uint32_t lbasize; 19364 int status; 19365 struct sd_lun *un; 19366 19367 ASSERT(ssc != NULL); 19368 19369 un = ssc->ssc_un; 19370 ASSERT(un != NULL); 19371 ASSERT(!mutex_owned(SD_MUTEX(un))); 19372 ASSERT(capp != NULL); 19373 ASSERT(lbap != NULL); 19374 19375 SD_TRACE(SD_LOG_IO, un, 19376 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19377 19378 /* 19379 * First send a READ_CAPACITY_16 command to the target. 19380 * 19381 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19382 * Medium Indicator bit is cleared. The address field must be 19383 * zero if the PMI bit is zero. 19384 */ 19385 bzero(&cdb, sizeof (cdb)); 19386 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19387 19388 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19389 19390 ucmd_buf.uscsi_cdb = (char *)&cdb; 19391 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19392 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19393 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19394 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19395 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19396 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19397 ucmd_buf.uscsi_timeout = 60; 19398 19399 /* 19400 * Read Capacity (16) is a Service Action In command. One 19401 * command byte (0x9E) is overloaded for multiple operations, 19402 * with the second CDB byte specifying the desired operation 19403 */ 19404 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19405 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19406 19407 /* 19408 * Fill in allocation length field 19409 */ 19410 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19411 19412 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19413 UIO_SYSSPACE, path_flag); 19414 19415 switch (status) { 19416 case 0: 19417 /* Return failure if we did not get valid capacity data. */ 19418 if (ucmd_buf.uscsi_resid > 20) { 19419 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19420 "sd_send_scsi_READ_CAPACITY_16 received " 19421 "invalid capacity data"); 19422 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19423 return (EIO); 19424 } 19425 19426 /* 19427 * Read capacity and block size from the READ CAPACITY 10 data. 19428 * This data may be adjusted later due to device specific 19429 * issues. 19430 * 19431 * According to the SCSI spec, the READ CAPACITY 10 19432 * command returns the following: 19433 * 19434 * bytes 0-7: Maximum logical block address available. 19435 * (MSB in byte:0 & LSB in byte:7) 19436 * 19437 * bytes 8-11: Block length in bytes 19438 * (MSB in byte:8 & LSB in byte:11) 19439 * 19440 */ 19441 capacity = BE_64(capacity16_buf[0]); 19442 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19443 19444 /* 19445 * Done with capacity16_buf 19446 */ 19447 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19448 19449 /* 19450 * if the reported capacity is set to all 0xf's, then 19451 * this disk is too large. This could only happen with 19452 * a device that supports LBAs larger than 64 bits which 19453 * are not defined by any current T10 standards. 19454 */ 19455 if (capacity == 0xffffffffffffffff) { 19456 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19457 "disk is too large"); 19458 return (EIO); 19459 } 19460 break; /* Success! */ 19461 case EIO: 19462 switch (ucmd_buf.uscsi_status) { 19463 case STATUS_RESERVATION_CONFLICT: 19464 status = EACCES; 19465 break; 19466 case STATUS_CHECK: 19467 /* 19468 * Check condition; look for ASC/ASCQ of 0x04/0x01 19469 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19470 */ 19471 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19472 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19473 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19474 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19475 return (EAGAIN); 19476 } 19477 break; 19478 default: 19479 break; 19480 } 19481 /* FALLTHRU */ 19482 default: 19483 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19484 return (status); 19485 } 19486 19487 *capp = capacity; 19488 *lbap = lbasize; 19489 19490 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19491 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19492 19493 return (0); 19494 } 19495 19496 19497 /* 19498 * Function: sd_send_scsi_START_STOP_UNIT 19499 * 19500 * Description: Issue a scsi START STOP UNIT command to the target. 19501 * 19502 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19503 * structure for this target. 19504 * flag - SD_TARGET_START 19505 * SD_TARGET_STOP 19506 * SD_TARGET_EJECT 19507 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19508 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19509 * to use the USCSI "direct" chain and bypass the normal 19510 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19511 * command is issued as part of an error recovery action. 19512 * 19513 * Return Code: 0 - Success 19514 * EIO - IO error 19515 * EACCES - Reservation conflict detected 19516 * ENXIO - Not Ready, medium not present 19517 * errno return code from sd_ssc_send() 19518 * 19519 * Context: Can sleep. 19520 */ 19521 19522 static int 19523 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19524 { 19525 struct scsi_extended_sense sense_buf; 19526 union scsi_cdb cdb; 19527 struct uscsi_cmd ucmd_buf; 19528 int status; 19529 struct sd_lun *un; 19530 19531 ASSERT(ssc != NULL); 19532 un = ssc->ssc_un; 19533 ASSERT(un != NULL); 19534 ASSERT(!mutex_owned(SD_MUTEX(un))); 19535 19536 SD_TRACE(SD_LOG_IO, un, 19537 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19538 19539 if (un->un_f_check_start_stop && 19540 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19541 (un->un_f_start_stop_supported != TRUE)) { 19542 return (0); 19543 } 19544 19545 /* 19546 * If we are performing an eject operation and 19547 * we receive any command other than SD_TARGET_EJECT 19548 * we should immediately return. 19549 */ 19550 if (flag != SD_TARGET_EJECT) { 19551 mutex_enter(SD_MUTEX(un)); 19552 if (un->un_f_ejecting == TRUE) { 19553 mutex_exit(SD_MUTEX(un)); 19554 return (EAGAIN); 19555 } 19556 mutex_exit(SD_MUTEX(un)); 19557 } 19558 19559 bzero(&cdb, sizeof (cdb)); 19560 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19561 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19562 19563 cdb.scc_cmd = SCMD_START_STOP; 19564 cdb.cdb_opaque[4] = (uchar_t)flag; 19565 19566 ucmd_buf.uscsi_cdb = (char *)&cdb; 19567 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19568 ucmd_buf.uscsi_bufaddr = NULL; 19569 ucmd_buf.uscsi_buflen = 0; 19570 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19571 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19572 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19573 ucmd_buf.uscsi_timeout = 200; 19574 19575 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19576 UIO_SYSSPACE, path_flag); 19577 19578 switch (status) { 19579 case 0: 19580 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19581 break; /* Success! */ 19582 case EIO: 19583 switch (ucmd_buf.uscsi_status) { 19584 case STATUS_RESERVATION_CONFLICT: 19585 status = EACCES; 19586 break; 19587 case STATUS_CHECK: 19588 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19589 switch (scsi_sense_key( 19590 (uint8_t *)&sense_buf)) { 19591 case KEY_ILLEGAL_REQUEST: 19592 status = ENOTSUP; 19593 break; 19594 case KEY_NOT_READY: 19595 if (scsi_sense_asc( 19596 (uint8_t *)&sense_buf) 19597 == 0x3A) { 19598 status = ENXIO; 19599 } 19600 break; 19601 default: 19602 break; 19603 } 19604 } 19605 break; 19606 default: 19607 break; 19608 } 19609 break; 19610 default: 19611 break; 19612 } 19613 19614 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19615 19616 return (status); 19617 } 19618 19619 19620 /* 19621 * Function: sd_start_stop_unit_callback 19622 * 19623 * Description: timeout(9F) callback to begin recovery process for a 19624 * device that has spun down. 19625 * 19626 * Arguments: arg - pointer to associated softstate struct. 19627 * 19628 * Context: Executes in a timeout(9F) thread context 19629 */ 19630 19631 static void 19632 sd_start_stop_unit_callback(void *arg) 19633 { 19634 struct sd_lun *un = arg; 19635 ASSERT(un != NULL); 19636 ASSERT(!mutex_owned(SD_MUTEX(un))); 19637 19638 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19639 19640 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19641 } 19642 19643 19644 /* 19645 * Function: sd_start_stop_unit_task 19646 * 19647 * Description: Recovery procedure when a drive is spun down. 19648 * 19649 * Arguments: arg - pointer to associated softstate struct. 19650 * 19651 * Context: Executes in a taskq() thread context 19652 */ 19653 19654 static void 19655 sd_start_stop_unit_task(void *arg) 19656 { 19657 struct sd_lun *un = arg; 19658 sd_ssc_t *ssc; 19659 int rval; 19660 19661 ASSERT(un != NULL); 19662 ASSERT(!mutex_owned(SD_MUTEX(un))); 19663 19664 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19665 19666 /* 19667 * Some unformatted drives report not ready error, no need to 19668 * restart if format has been initiated. 19669 */ 19670 mutex_enter(SD_MUTEX(un)); 19671 if (un->un_f_format_in_progress == TRUE) { 19672 mutex_exit(SD_MUTEX(un)); 19673 return; 19674 } 19675 mutex_exit(SD_MUTEX(un)); 19676 19677 /* 19678 * When a START STOP command is issued from here, it is part of a 19679 * failure recovery operation and must be issued before any other 19680 * commands, including any pending retries. Thus it must be sent 19681 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19682 * succeeds or not, we will start I/O after the attempt. 19683 */ 19684 ssc = sd_ssc_init(un); 19685 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19686 SD_PATH_DIRECT_PRIORITY); 19687 if (rval != 0) 19688 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19689 sd_ssc_fini(ssc); 19690 /* 19691 * The above call blocks until the START_STOP_UNIT command completes. 19692 * Now that it has completed, we must re-try the original IO that 19693 * received the NOT READY condition in the first place. There are 19694 * three possible conditions here: 19695 * 19696 * (1) The original IO is on un_retry_bp. 19697 * (2) The original IO is on the regular wait queue, and un_retry_bp 19698 * is NULL. 19699 * (3) The original IO is on the regular wait queue, and un_retry_bp 19700 * points to some other, unrelated bp. 19701 * 19702 * For each case, we must call sd_start_cmds() with un_retry_bp 19703 * as the argument. If un_retry_bp is NULL, this will initiate 19704 * processing of the regular wait queue. If un_retry_bp is not NULL, 19705 * then this will process the bp on un_retry_bp. That may or may not 19706 * be the original IO, but that does not matter: the important thing 19707 * is to keep the IO processing going at this point. 19708 * 19709 * Note: This is a very specific error recovery sequence associated 19710 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19711 * serialize the I/O with completion of the spin-up. 19712 */ 19713 mutex_enter(SD_MUTEX(un)); 19714 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19715 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19716 un, un->un_retry_bp); 19717 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19718 sd_start_cmds(un, un->un_retry_bp); 19719 mutex_exit(SD_MUTEX(un)); 19720 19721 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19722 } 19723 19724 19725 /* 19726 * Function: sd_send_scsi_INQUIRY 19727 * 19728 * Description: Issue the scsi INQUIRY command. 19729 * 19730 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19731 * structure for this target. 19732 * bufaddr 19733 * buflen 19734 * evpd 19735 * page_code 19736 * page_length 19737 * 19738 * Return Code: 0 - Success 19739 * errno return code from sd_ssc_send() 19740 * 19741 * Context: Can sleep. Does not return until command is completed. 19742 */ 19743 19744 static int 19745 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19746 uchar_t evpd, uchar_t page_code, size_t *residp) 19747 { 19748 union scsi_cdb cdb; 19749 struct uscsi_cmd ucmd_buf; 19750 int status; 19751 struct sd_lun *un; 19752 19753 ASSERT(ssc != NULL); 19754 un = ssc->ssc_un; 19755 ASSERT(un != NULL); 19756 ASSERT(!mutex_owned(SD_MUTEX(un))); 19757 ASSERT(bufaddr != NULL); 19758 19759 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19760 19761 bzero(&cdb, sizeof (cdb)); 19762 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19763 bzero(bufaddr, buflen); 19764 19765 cdb.scc_cmd = SCMD_INQUIRY; 19766 cdb.cdb_opaque[1] = evpd; 19767 cdb.cdb_opaque[2] = page_code; 19768 FORMG0COUNT(&cdb, buflen); 19769 19770 ucmd_buf.uscsi_cdb = (char *)&cdb; 19771 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19772 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19773 ucmd_buf.uscsi_buflen = buflen; 19774 ucmd_buf.uscsi_rqbuf = NULL; 19775 ucmd_buf.uscsi_rqlen = 0; 19776 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19777 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19778 19779 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19780 UIO_SYSSPACE, SD_PATH_DIRECT); 19781 19782 /* 19783 * Only handle status == 0, the upper-level caller 19784 * will put different assessment based on the context. 19785 */ 19786 if (status == 0) 19787 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19788 19789 if ((status == 0) && (residp != NULL)) { 19790 *residp = ucmd_buf.uscsi_resid; 19791 } 19792 19793 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19794 19795 return (status); 19796 } 19797 19798 19799 /* 19800 * Function: sd_send_scsi_TEST_UNIT_READY 19801 * 19802 * Description: Issue the scsi TEST UNIT READY command. 19803 * This routine can be told to set the flag USCSI_DIAGNOSE to 19804 * prevent retrying failed commands. Use this when the intent 19805 * is either to check for device readiness, to clear a Unit 19806 * Attention, or to clear any outstanding sense data. 19807 * However under specific conditions the expected behavior 19808 * is for retries to bring a device ready, so use the flag 19809 * with caution. 19810 * 19811 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19812 * structure for this target. 19813 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19814 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19815 * 0: dont check for media present, do retries on cmd. 19816 * 19817 * Return Code: 0 - Success 19818 * EIO - IO error 19819 * EACCES - Reservation conflict detected 19820 * ENXIO - Not Ready, medium not present 19821 * errno return code from sd_ssc_send() 19822 * 19823 * Context: Can sleep. Does not return until command is completed. 19824 */ 19825 19826 static int 19827 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 19828 { 19829 struct scsi_extended_sense sense_buf; 19830 union scsi_cdb cdb; 19831 struct uscsi_cmd ucmd_buf; 19832 int status; 19833 struct sd_lun *un; 19834 19835 ASSERT(ssc != NULL); 19836 un = ssc->ssc_un; 19837 ASSERT(un != NULL); 19838 ASSERT(!mutex_owned(SD_MUTEX(un))); 19839 19840 SD_TRACE(SD_LOG_IO, un, 19841 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19842 19843 /* 19844 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19845 * timeouts when they receive a TUR and the queue is not empty. Check 19846 * the configuration flag set during attach (indicating the drive has 19847 * this firmware bug) and un_ncmds_in_transport before issuing the 19848 * TUR. If there are 19849 * pending commands return success, this is a bit arbitrary but is ok 19850 * for non-removables (i.e. the eliteI disks) and non-clustering 19851 * configurations. 19852 */ 19853 if (un->un_f_cfg_tur_check == TRUE) { 19854 mutex_enter(SD_MUTEX(un)); 19855 if (un->un_ncmds_in_transport != 0) { 19856 mutex_exit(SD_MUTEX(un)); 19857 return (0); 19858 } 19859 mutex_exit(SD_MUTEX(un)); 19860 } 19861 19862 bzero(&cdb, sizeof (cdb)); 19863 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19864 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19865 19866 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19867 19868 ucmd_buf.uscsi_cdb = (char *)&cdb; 19869 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19870 ucmd_buf.uscsi_bufaddr = NULL; 19871 ucmd_buf.uscsi_buflen = 0; 19872 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19873 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19874 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19875 19876 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19877 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19878 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19879 } 19880 ucmd_buf.uscsi_timeout = 60; 19881 19882 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19883 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 19884 SD_PATH_STANDARD)); 19885 19886 switch (status) { 19887 case 0: 19888 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19889 break; /* Success! */ 19890 case EIO: 19891 switch (ucmd_buf.uscsi_status) { 19892 case STATUS_RESERVATION_CONFLICT: 19893 status = EACCES; 19894 break; 19895 case STATUS_CHECK: 19896 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19897 break; 19898 } 19899 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19900 (scsi_sense_key((uint8_t *)&sense_buf) == 19901 KEY_NOT_READY) && 19902 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 19903 status = ENXIO; 19904 } 19905 break; 19906 default: 19907 break; 19908 } 19909 break; 19910 default: 19911 break; 19912 } 19913 19914 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19915 19916 return (status); 19917 } 19918 19919 /* 19920 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19921 * 19922 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19923 * 19924 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19925 * structure for this target. 19926 * 19927 * Return Code: 0 - Success 19928 * EACCES 19929 * ENOTSUP 19930 * errno return code from sd_ssc_send() 19931 * 19932 * Context: Can sleep. Does not return until command is completed. 19933 */ 19934 19935 static int 19936 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 19937 uint16_t data_len, uchar_t *data_bufp) 19938 { 19939 struct scsi_extended_sense sense_buf; 19940 union scsi_cdb cdb; 19941 struct uscsi_cmd ucmd_buf; 19942 int status; 19943 int no_caller_buf = FALSE; 19944 struct sd_lun *un; 19945 19946 ASSERT(ssc != NULL); 19947 un = ssc->ssc_un; 19948 ASSERT(un != NULL); 19949 ASSERT(!mutex_owned(SD_MUTEX(un))); 19950 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19951 19952 SD_TRACE(SD_LOG_IO, un, 19953 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19954 19955 bzero(&cdb, sizeof (cdb)); 19956 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19957 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19958 if (data_bufp == NULL) { 19959 /* Allocate a default buf if the caller did not give one */ 19960 ASSERT(data_len == 0); 19961 data_len = MHIOC_RESV_KEY_SIZE; 19962 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19963 no_caller_buf = TRUE; 19964 } 19965 19966 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19967 cdb.cdb_opaque[1] = usr_cmd; 19968 FORMG1COUNT(&cdb, data_len); 19969 19970 ucmd_buf.uscsi_cdb = (char *)&cdb; 19971 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19972 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19973 ucmd_buf.uscsi_buflen = data_len; 19974 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19975 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19976 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19977 ucmd_buf.uscsi_timeout = 60; 19978 19979 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19980 UIO_SYSSPACE, SD_PATH_STANDARD); 19981 19982 switch (status) { 19983 case 0: 19984 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19985 19986 break; /* Success! */ 19987 case EIO: 19988 switch (ucmd_buf.uscsi_status) { 19989 case STATUS_RESERVATION_CONFLICT: 19990 status = EACCES; 19991 break; 19992 case STATUS_CHECK: 19993 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19994 (scsi_sense_key((uint8_t *)&sense_buf) == 19995 KEY_ILLEGAL_REQUEST)) { 19996 status = ENOTSUP; 19997 } 19998 break; 19999 default: 20000 break; 20001 } 20002 break; 20003 default: 20004 break; 20005 } 20006 20007 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20008 20009 if (no_caller_buf == TRUE) { 20010 kmem_free(data_bufp, data_len); 20011 } 20012 20013 return (status); 20014 } 20015 20016 20017 /* 20018 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20019 * 20020 * Description: This routine is the driver entry point for handling CD-ROM 20021 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20022 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20023 * device. 20024 * 20025 * Arguments: ssc - ssc contains un - pointer to soft state struct 20026 * for the target. 20027 * usr_cmd SCSI-3 reservation facility command (one of 20028 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20029 * SD_SCSI3_PREEMPTANDABORT) 20030 * usr_bufp - user provided pointer register, reserve descriptor or 20031 * preempt and abort structure (mhioc_register_t, 20032 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20033 * 20034 * Return Code: 0 - Success 20035 * EACCES 20036 * ENOTSUP 20037 * errno return code from sd_ssc_send() 20038 * 20039 * Context: Can sleep. Does not return until command is completed. 20040 */ 20041 20042 static int 20043 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20044 uchar_t *usr_bufp) 20045 { 20046 struct scsi_extended_sense sense_buf; 20047 union scsi_cdb cdb; 20048 struct uscsi_cmd ucmd_buf; 20049 int status; 20050 uchar_t data_len = sizeof (sd_prout_t); 20051 sd_prout_t *prp; 20052 struct sd_lun *un; 20053 20054 ASSERT(ssc != NULL); 20055 un = ssc->ssc_un; 20056 ASSERT(un != NULL); 20057 ASSERT(!mutex_owned(SD_MUTEX(un))); 20058 ASSERT(data_len == 24); /* required by scsi spec */ 20059 20060 SD_TRACE(SD_LOG_IO, un, 20061 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20062 20063 if (usr_bufp == NULL) { 20064 return (EINVAL); 20065 } 20066 20067 bzero(&cdb, sizeof (cdb)); 20068 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20069 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20070 prp = kmem_zalloc(data_len, KM_SLEEP); 20071 20072 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20073 cdb.cdb_opaque[1] = usr_cmd; 20074 FORMG1COUNT(&cdb, data_len); 20075 20076 ucmd_buf.uscsi_cdb = (char *)&cdb; 20077 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20078 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20079 ucmd_buf.uscsi_buflen = data_len; 20080 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20081 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20082 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20083 ucmd_buf.uscsi_timeout = 60; 20084 20085 switch (usr_cmd) { 20086 case SD_SCSI3_REGISTER: { 20087 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20088 20089 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20090 bcopy(ptr->newkey.key, prp->service_key, 20091 MHIOC_RESV_KEY_SIZE); 20092 prp->aptpl = ptr->aptpl; 20093 break; 20094 } 20095 case SD_SCSI3_RESERVE: 20096 case SD_SCSI3_RELEASE: { 20097 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20098 20099 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20100 prp->scope_address = BE_32(ptr->scope_specific_addr); 20101 cdb.cdb_opaque[2] = ptr->type; 20102 break; 20103 } 20104 case SD_SCSI3_PREEMPTANDABORT: { 20105 mhioc_preemptandabort_t *ptr = 20106 (mhioc_preemptandabort_t *)usr_bufp; 20107 20108 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20109 bcopy(ptr->victim_key.key, prp->service_key, 20110 MHIOC_RESV_KEY_SIZE); 20111 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20112 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20113 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20114 break; 20115 } 20116 case SD_SCSI3_REGISTERANDIGNOREKEY: 20117 { 20118 mhioc_registerandignorekey_t *ptr; 20119 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20120 bcopy(ptr->newkey.key, 20121 prp->service_key, MHIOC_RESV_KEY_SIZE); 20122 prp->aptpl = ptr->aptpl; 20123 break; 20124 } 20125 default: 20126 ASSERT(FALSE); 20127 break; 20128 } 20129 20130 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20131 UIO_SYSSPACE, SD_PATH_STANDARD); 20132 20133 switch (status) { 20134 case 0: 20135 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20136 break; /* Success! */ 20137 case EIO: 20138 switch (ucmd_buf.uscsi_status) { 20139 case STATUS_RESERVATION_CONFLICT: 20140 status = EACCES; 20141 break; 20142 case STATUS_CHECK: 20143 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20144 (scsi_sense_key((uint8_t *)&sense_buf) == 20145 KEY_ILLEGAL_REQUEST)) { 20146 status = ENOTSUP; 20147 } 20148 break; 20149 default: 20150 break; 20151 } 20152 break; 20153 default: 20154 break; 20155 } 20156 20157 kmem_free(prp, data_len); 20158 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20159 return (status); 20160 } 20161 20162 20163 /* 20164 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20165 * 20166 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20167 * 20168 * Arguments: un - pointer to the target's soft state struct 20169 * dkc - pointer to the callback structure 20170 * 20171 * Return Code: 0 - success 20172 * errno-type error code 20173 * 20174 * Context: kernel thread context only. 20175 * 20176 * _______________________________________________________________ 20177 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20178 * |FLUSH_VOLATILE| | operation | 20179 * |______________|______________|_________________________________| 20180 * | 0 | NULL | Synchronous flush on both | 20181 * | | | volatile and non-volatile cache | 20182 * |______________|______________|_________________________________| 20183 * | 1 | NULL | Synchronous flush on volatile | 20184 * | | | cache; disk drivers may suppress| 20185 * | | | flush if disk table indicates | 20186 * | | | non-volatile cache | 20187 * |______________|______________|_________________________________| 20188 * | 0 | !NULL | Asynchronous flush on both | 20189 * | | | volatile and non-volatile cache;| 20190 * |______________|______________|_________________________________| 20191 * | 1 | !NULL | Asynchronous flush on volatile | 20192 * | | | cache; disk drivers may suppress| 20193 * | | | flush if disk table indicates | 20194 * | | | non-volatile cache | 20195 * |______________|______________|_________________________________| 20196 * 20197 */ 20198 20199 static int 20200 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20201 { 20202 struct sd_uscsi_info *uip; 20203 struct uscsi_cmd *uscmd; 20204 union scsi_cdb *cdb; 20205 struct buf *bp; 20206 int rval = 0; 20207 int is_async; 20208 20209 SD_TRACE(SD_LOG_IO, un, 20210 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20211 20212 ASSERT(un != NULL); 20213 ASSERT(!mutex_owned(SD_MUTEX(un))); 20214 20215 if (dkc == NULL || dkc->dkc_callback == NULL) { 20216 is_async = FALSE; 20217 } else { 20218 is_async = TRUE; 20219 } 20220 20221 mutex_enter(SD_MUTEX(un)); 20222 /* check whether cache flush should be suppressed */ 20223 if (un->un_f_suppress_cache_flush == TRUE) { 20224 mutex_exit(SD_MUTEX(un)); 20225 /* 20226 * suppress the cache flush if the device is told to do 20227 * so by sd.conf or disk table 20228 */ 20229 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20230 skip the cache flush since suppress_cache_flush is %d!\n", 20231 un->un_f_suppress_cache_flush); 20232 20233 if (is_async == TRUE) { 20234 /* invoke callback for asynchronous flush */ 20235 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20236 } 20237 return (rval); 20238 } 20239 mutex_exit(SD_MUTEX(un)); 20240 20241 /* 20242 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20243 * set properly 20244 */ 20245 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20246 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20247 20248 mutex_enter(SD_MUTEX(un)); 20249 if (dkc != NULL && un->un_f_sync_nv_supported && 20250 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20251 /* 20252 * if the device supports SYNC_NV bit, turn on 20253 * the SYNC_NV bit to only flush volatile cache 20254 */ 20255 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20256 } 20257 mutex_exit(SD_MUTEX(un)); 20258 20259 /* 20260 * First get some memory for the uscsi_cmd struct and cdb 20261 * and initialize for SYNCHRONIZE_CACHE cmd. 20262 */ 20263 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20264 uscmd->uscsi_cdblen = CDB_GROUP1; 20265 uscmd->uscsi_cdb = (caddr_t)cdb; 20266 uscmd->uscsi_bufaddr = NULL; 20267 uscmd->uscsi_buflen = 0; 20268 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20269 uscmd->uscsi_rqlen = SENSE_LENGTH; 20270 uscmd->uscsi_rqresid = SENSE_LENGTH; 20271 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20272 uscmd->uscsi_timeout = sd_io_time; 20273 20274 /* 20275 * Allocate an sd_uscsi_info struct and fill it with the info 20276 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20277 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20278 * since we allocate the buf here in this function, we do not 20279 * need to preserve the prior contents of b_private. 20280 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20281 */ 20282 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20283 uip->ui_flags = SD_PATH_DIRECT; 20284 uip->ui_cmdp = uscmd; 20285 20286 bp = getrbuf(KM_SLEEP); 20287 bp->b_private = uip; 20288 20289 /* 20290 * Setup buffer to carry uscsi request. 20291 */ 20292 bp->b_flags = B_BUSY; 20293 bp->b_bcount = 0; 20294 bp->b_blkno = 0; 20295 20296 if (is_async == TRUE) { 20297 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20298 uip->ui_dkc = *dkc; 20299 } 20300 20301 bp->b_edev = SD_GET_DEV(un); 20302 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20303 20304 /* 20305 * Unset un_f_sync_cache_required flag 20306 */ 20307 mutex_enter(SD_MUTEX(un)); 20308 un->un_f_sync_cache_required = FALSE; 20309 mutex_exit(SD_MUTEX(un)); 20310 20311 (void) sd_uscsi_strategy(bp); 20312 20313 /* 20314 * If synchronous request, wait for completion 20315 * If async just return and let b_iodone callback 20316 * cleanup. 20317 * NOTE: On return, u_ncmds_in_driver will be decremented, 20318 * but it was also incremented in sd_uscsi_strategy(), so 20319 * we should be ok. 20320 */ 20321 if (is_async == FALSE) { 20322 (void) biowait(bp); 20323 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20324 } 20325 20326 return (rval); 20327 } 20328 20329 20330 static int 20331 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20332 { 20333 struct sd_uscsi_info *uip; 20334 struct uscsi_cmd *uscmd; 20335 uint8_t *sense_buf; 20336 struct sd_lun *un; 20337 int status; 20338 union scsi_cdb *cdb; 20339 20340 uip = (struct sd_uscsi_info *)(bp->b_private); 20341 ASSERT(uip != NULL); 20342 20343 uscmd = uip->ui_cmdp; 20344 ASSERT(uscmd != NULL); 20345 20346 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20347 ASSERT(sense_buf != NULL); 20348 20349 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20350 ASSERT(un != NULL); 20351 20352 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20353 20354 status = geterror(bp); 20355 switch (status) { 20356 case 0: 20357 break; /* Success! */ 20358 case EIO: 20359 switch (uscmd->uscsi_status) { 20360 case STATUS_RESERVATION_CONFLICT: 20361 /* Ignore reservation conflict */ 20362 status = 0; 20363 goto done; 20364 20365 case STATUS_CHECK: 20366 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20367 (scsi_sense_key(sense_buf) == 20368 KEY_ILLEGAL_REQUEST)) { 20369 /* Ignore Illegal Request error */ 20370 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20371 mutex_enter(SD_MUTEX(un)); 20372 un->un_f_sync_nv_supported = FALSE; 20373 mutex_exit(SD_MUTEX(un)); 20374 status = 0; 20375 SD_TRACE(SD_LOG_IO, un, 20376 "un_f_sync_nv_supported \ 20377 is set to false.\n"); 20378 goto done; 20379 } 20380 20381 mutex_enter(SD_MUTEX(un)); 20382 un->un_f_sync_cache_supported = FALSE; 20383 mutex_exit(SD_MUTEX(un)); 20384 SD_TRACE(SD_LOG_IO, un, 20385 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20386 un_f_sync_cache_supported set to false \ 20387 with asc = %x, ascq = %x\n", 20388 scsi_sense_asc(sense_buf), 20389 scsi_sense_ascq(sense_buf)); 20390 status = ENOTSUP; 20391 goto done; 20392 } 20393 break; 20394 default: 20395 break; 20396 } 20397 /* FALLTHRU */ 20398 default: 20399 /* 20400 * Turn on the un_f_sync_cache_required flag 20401 * since the SYNC CACHE command failed 20402 */ 20403 mutex_enter(SD_MUTEX(un)); 20404 un->un_f_sync_cache_required = TRUE; 20405 mutex_exit(SD_MUTEX(un)); 20406 20407 /* 20408 * Don't log an error message if this device 20409 * has removable media. 20410 */ 20411 if (!un->un_f_has_removable_media) { 20412 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20413 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20414 } 20415 break; 20416 } 20417 20418 done: 20419 if (uip->ui_dkc.dkc_callback != NULL) { 20420 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20421 } 20422 20423 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20424 freerbuf(bp); 20425 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20426 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20427 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20428 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20429 20430 return (status); 20431 } 20432 20433 20434 /* 20435 * Function: sd_send_scsi_GET_CONFIGURATION 20436 * 20437 * Description: Issues the get configuration command to the device. 20438 * Called from sd_check_for_writable_cd & sd_get_media_info 20439 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20440 * Arguments: ssc 20441 * ucmdbuf 20442 * rqbuf 20443 * rqbuflen 20444 * bufaddr 20445 * buflen 20446 * path_flag 20447 * 20448 * Return Code: 0 - Success 20449 * errno return code from sd_ssc_send() 20450 * 20451 * Context: Can sleep. Does not return until command is completed. 20452 * 20453 */ 20454 20455 static int 20456 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20457 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20458 int path_flag) 20459 { 20460 char cdb[CDB_GROUP1]; 20461 int status; 20462 struct sd_lun *un; 20463 20464 ASSERT(ssc != NULL); 20465 un = ssc->ssc_un; 20466 ASSERT(un != NULL); 20467 ASSERT(!mutex_owned(SD_MUTEX(un))); 20468 ASSERT(bufaddr != NULL); 20469 ASSERT(ucmdbuf != NULL); 20470 ASSERT(rqbuf != NULL); 20471 20472 SD_TRACE(SD_LOG_IO, un, 20473 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20474 20475 bzero(cdb, sizeof (cdb)); 20476 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20477 bzero(rqbuf, rqbuflen); 20478 bzero(bufaddr, buflen); 20479 20480 /* 20481 * Set up cdb field for the get configuration command. 20482 */ 20483 cdb[0] = SCMD_GET_CONFIGURATION; 20484 cdb[1] = 0x02; /* Requested Type */ 20485 cdb[8] = SD_PROFILE_HEADER_LEN; 20486 ucmdbuf->uscsi_cdb = cdb; 20487 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20488 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20489 ucmdbuf->uscsi_buflen = buflen; 20490 ucmdbuf->uscsi_timeout = sd_io_time; 20491 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20492 ucmdbuf->uscsi_rqlen = rqbuflen; 20493 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20494 20495 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20496 UIO_SYSSPACE, path_flag); 20497 20498 switch (status) { 20499 case 0: 20500 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20501 break; /* Success! */ 20502 case EIO: 20503 switch (ucmdbuf->uscsi_status) { 20504 case STATUS_RESERVATION_CONFLICT: 20505 status = EACCES; 20506 break; 20507 default: 20508 break; 20509 } 20510 break; 20511 default: 20512 break; 20513 } 20514 20515 if (status == 0) { 20516 SD_DUMP_MEMORY(un, SD_LOG_IO, 20517 "sd_send_scsi_GET_CONFIGURATION: data", 20518 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20519 } 20520 20521 SD_TRACE(SD_LOG_IO, un, 20522 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20523 20524 return (status); 20525 } 20526 20527 /* 20528 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20529 * 20530 * Description: Issues the get configuration command to the device to 20531 * retrieve a specific feature. Called from 20532 * sd_check_for_writable_cd & sd_set_mmc_caps. 20533 * Arguments: ssc 20534 * ucmdbuf 20535 * rqbuf 20536 * rqbuflen 20537 * bufaddr 20538 * buflen 20539 * feature 20540 * 20541 * Return Code: 0 - Success 20542 * errno return code from sd_ssc_send() 20543 * 20544 * Context: Can sleep. Does not return until command is completed. 20545 * 20546 */ 20547 static int 20548 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20549 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20550 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20551 { 20552 char cdb[CDB_GROUP1]; 20553 int status; 20554 struct sd_lun *un; 20555 20556 ASSERT(ssc != NULL); 20557 un = ssc->ssc_un; 20558 ASSERT(un != NULL); 20559 ASSERT(!mutex_owned(SD_MUTEX(un))); 20560 ASSERT(bufaddr != NULL); 20561 ASSERT(ucmdbuf != NULL); 20562 ASSERT(rqbuf != NULL); 20563 20564 SD_TRACE(SD_LOG_IO, un, 20565 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20566 20567 bzero(cdb, sizeof (cdb)); 20568 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20569 bzero(rqbuf, rqbuflen); 20570 bzero(bufaddr, buflen); 20571 20572 /* 20573 * Set up cdb field for the get configuration command. 20574 */ 20575 cdb[0] = SCMD_GET_CONFIGURATION; 20576 cdb[1] = 0x02; /* Requested Type */ 20577 cdb[3] = feature; 20578 cdb[8] = buflen; 20579 ucmdbuf->uscsi_cdb = cdb; 20580 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20581 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20582 ucmdbuf->uscsi_buflen = buflen; 20583 ucmdbuf->uscsi_timeout = sd_io_time; 20584 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20585 ucmdbuf->uscsi_rqlen = rqbuflen; 20586 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20587 20588 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20589 UIO_SYSSPACE, path_flag); 20590 20591 switch (status) { 20592 case 0: 20593 20594 break; /* Success! */ 20595 case EIO: 20596 switch (ucmdbuf->uscsi_status) { 20597 case STATUS_RESERVATION_CONFLICT: 20598 status = EACCES; 20599 break; 20600 default: 20601 break; 20602 } 20603 break; 20604 default: 20605 break; 20606 } 20607 20608 if (status == 0) { 20609 SD_DUMP_MEMORY(un, SD_LOG_IO, 20610 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20611 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20612 } 20613 20614 SD_TRACE(SD_LOG_IO, un, 20615 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20616 20617 return (status); 20618 } 20619 20620 20621 /* 20622 * Function: sd_send_scsi_MODE_SENSE 20623 * 20624 * Description: Utility function for issuing a scsi MODE SENSE command. 20625 * Note: This routine uses a consistent implementation for Group0, 20626 * Group1, and Group2 commands across all platforms. ATAPI devices 20627 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20628 * 20629 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20630 * structure for this target. 20631 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20632 * CDB_GROUP[1|2] (10 byte). 20633 * bufaddr - buffer for page data retrieved from the target. 20634 * buflen - size of page to be retrieved. 20635 * page_code - page code of data to be retrieved from the target. 20636 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20637 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20638 * to use the USCSI "direct" chain and bypass the normal 20639 * command waitq. 20640 * 20641 * Return Code: 0 - Success 20642 * errno return code from sd_ssc_send() 20643 * 20644 * Context: Can sleep. Does not return until command is completed. 20645 */ 20646 20647 static int 20648 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20649 size_t buflen, uchar_t page_code, int path_flag) 20650 { 20651 struct scsi_extended_sense sense_buf; 20652 union scsi_cdb cdb; 20653 struct uscsi_cmd ucmd_buf; 20654 int status; 20655 int headlen; 20656 struct sd_lun *un; 20657 20658 ASSERT(ssc != NULL); 20659 un = ssc->ssc_un; 20660 ASSERT(un != NULL); 20661 ASSERT(!mutex_owned(SD_MUTEX(un))); 20662 ASSERT(bufaddr != NULL); 20663 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20664 (cdbsize == CDB_GROUP2)); 20665 20666 SD_TRACE(SD_LOG_IO, un, 20667 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20668 20669 bzero(&cdb, sizeof (cdb)); 20670 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20671 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20672 bzero(bufaddr, buflen); 20673 20674 if (cdbsize == CDB_GROUP0) { 20675 cdb.scc_cmd = SCMD_MODE_SENSE; 20676 cdb.cdb_opaque[2] = page_code; 20677 FORMG0COUNT(&cdb, buflen); 20678 headlen = MODE_HEADER_LENGTH; 20679 } else { 20680 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20681 cdb.cdb_opaque[2] = page_code; 20682 FORMG1COUNT(&cdb, buflen); 20683 headlen = MODE_HEADER_LENGTH_GRP2; 20684 } 20685 20686 ASSERT(headlen <= buflen); 20687 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20688 20689 ucmd_buf.uscsi_cdb = (char *)&cdb; 20690 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20691 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20692 ucmd_buf.uscsi_buflen = buflen; 20693 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20694 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20695 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20696 ucmd_buf.uscsi_timeout = 60; 20697 20698 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20699 UIO_SYSSPACE, path_flag); 20700 20701 switch (status) { 20702 case 0: 20703 /* 20704 * sr_check_wp() uses 0x3f page code and check the header of 20705 * mode page to determine if target device is write-protected. 20706 * But some USB devices return 0 bytes for 0x3f page code. For 20707 * this case, make sure that mode page header is returned at 20708 * least. 20709 */ 20710 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20711 status = EIO; 20712 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 20713 "mode page header is not returned"); 20714 } 20715 break; /* Success! */ 20716 case EIO: 20717 switch (ucmd_buf.uscsi_status) { 20718 case STATUS_RESERVATION_CONFLICT: 20719 status = EACCES; 20720 break; 20721 default: 20722 break; 20723 } 20724 break; 20725 default: 20726 break; 20727 } 20728 20729 if (status == 0) { 20730 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20731 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20732 } 20733 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20734 20735 return (status); 20736 } 20737 20738 20739 /* 20740 * Function: sd_send_scsi_MODE_SELECT 20741 * 20742 * Description: Utility function for issuing a scsi MODE SELECT command. 20743 * Note: This routine uses a consistent implementation for Group0, 20744 * Group1, and Group2 commands across all platforms. ATAPI devices 20745 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20746 * 20747 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20748 * structure for this target. 20749 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20750 * CDB_GROUP[1|2] (10 byte). 20751 * bufaddr - buffer for page data retrieved from the target. 20752 * buflen - size of page to be retrieved. 20753 * save_page - boolean to determin if SP bit should be set. 20754 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20755 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20756 * to use the USCSI "direct" chain and bypass the normal 20757 * command waitq. 20758 * 20759 * Return Code: 0 - Success 20760 * errno return code from sd_ssc_send() 20761 * 20762 * Context: Can sleep. Does not return until command is completed. 20763 */ 20764 20765 static int 20766 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20767 size_t buflen, uchar_t save_page, int path_flag) 20768 { 20769 struct scsi_extended_sense sense_buf; 20770 union scsi_cdb cdb; 20771 struct uscsi_cmd ucmd_buf; 20772 int status; 20773 struct sd_lun *un; 20774 20775 ASSERT(ssc != NULL); 20776 un = ssc->ssc_un; 20777 ASSERT(un != NULL); 20778 ASSERT(!mutex_owned(SD_MUTEX(un))); 20779 ASSERT(bufaddr != NULL); 20780 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20781 (cdbsize == CDB_GROUP2)); 20782 20783 SD_TRACE(SD_LOG_IO, un, 20784 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20785 20786 bzero(&cdb, sizeof (cdb)); 20787 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20788 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20789 20790 /* Set the PF bit for many third party drives */ 20791 cdb.cdb_opaque[1] = 0x10; 20792 20793 /* Set the savepage(SP) bit if given */ 20794 if (save_page == SD_SAVE_PAGE) { 20795 cdb.cdb_opaque[1] |= 0x01; 20796 } 20797 20798 if (cdbsize == CDB_GROUP0) { 20799 cdb.scc_cmd = SCMD_MODE_SELECT; 20800 FORMG0COUNT(&cdb, buflen); 20801 } else { 20802 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20803 FORMG1COUNT(&cdb, buflen); 20804 } 20805 20806 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20807 20808 ucmd_buf.uscsi_cdb = (char *)&cdb; 20809 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20810 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20811 ucmd_buf.uscsi_buflen = buflen; 20812 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20813 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20814 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20815 ucmd_buf.uscsi_timeout = 60; 20816 20817 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20818 UIO_SYSSPACE, path_flag); 20819 20820 switch (status) { 20821 case 0: 20822 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20823 break; /* Success! */ 20824 case EIO: 20825 switch (ucmd_buf.uscsi_status) { 20826 case STATUS_RESERVATION_CONFLICT: 20827 status = EACCES; 20828 break; 20829 default: 20830 break; 20831 } 20832 break; 20833 default: 20834 break; 20835 } 20836 20837 if (status == 0) { 20838 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20839 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20840 } 20841 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20842 20843 return (status); 20844 } 20845 20846 20847 /* 20848 * Function: sd_send_scsi_RDWR 20849 * 20850 * Description: Issue a scsi READ or WRITE command with the given parameters. 20851 * 20852 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20853 * structure for this target. 20854 * cmd: SCMD_READ or SCMD_WRITE 20855 * bufaddr: Address of caller's buffer to receive the RDWR data 20856 * buflen: Length of caller's buffer receive the RDWR data. 20857 * start_block: Block number for the start of the RDWR operation. 20858 * (Assumes target-native block size.) 20859 * residp: Pointer to variable to receive the redisual of the 20860 * RDWR operation (may be NULL of no residual requested). 20861 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20862 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20863 * to use the USCSI "direct" chain and bypass the normal 20864 * command waitq. 20865 * 20866 * Return Code: 0 - Success 20867 * errno return code from sd_ssc_send() 20868 * 20869 * Context: Can sleep. Does not return until command is completed. 20870 */ 20871 20872 static int 20873 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 20874 size_t buflen, daddr_t start_block, int path_flag) 20875 { 20876 struct scsi_extended_sense sense_buf; 20877 union scsi_cdb cdb; 20878 struct uscsi_cmd ucmd_buf; 20879 uint32_t block_count; 20880 int status; 20881 int cdbsize; 20882 uchar_t flag; 20883 struct sd_lun *un; 20884 20885 ASSERT(ssc != NULL); 20886 un = ssc->ssc_un; 20887 ASSERT(un != NULL); 20888 ASSERT(!mutex_owned(SD_MUTEX(un))); 20889 ASSERT(bufaddr != NULL); 20890 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20891 20892 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20893 20894 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20895 return (EINVAL); 20896 } 20897 20898 mutex_enter(SD_MUTEX(un)); 20899 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20900 mutex_exit(SD_MUTEX(un)); 20901 20902 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20903 20904 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20905 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20906 bufaddr, buflen, start_block, block_count); 20907 20908 bzero(&cdb, sizeof (cdb)); 20909 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20910 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20911 20912 /* Compute CDB size to use */ 20913 if (start_block > 0xffffffff) 20914 cdbsize = CDB_GROUP4; 20915 else if ((start_block & 0xFFE00000) || 20916 (un->un_f_cfg_is_atapi == TRUE)) 20917 cdbsize = CDB_GROUP1; 20918 else 20919 cdbsize = CDB_GROUP0; 20920 20921 switch (cdbsize) { 20922 case CDB_GROUP0: /* 6-byte CDBs */ 20923 cdb.scc_cmd = cmd; 20924 FORMG0ADDR(&cdb, start_block); 20925 FORMG0COUNT(&cdb, block_count); 20926 break; 20927 case CDB_GROUP1: /* 10-byte CDBs */ 20928 cdb.scc_cmd = cmd | SCMD_GROUP1; 20929 FORMG1ADDR(&cdb, start_block); 20930 FORMG1COUNT(&cdb, block_count); 20931 break; 20932 case CDB_GROUP4: /* 16-byte CDBs */ 20933 cdb.scc_cmd = cmd | SCMD_GROUP4; 20934 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20935 FORMG4COUNT(&cdb, block_count); 20936 break; 20937 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20938 default: 20939 /* All others reserved */ 20940 return (EINVAL); 20941 } 20942 20943 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20944 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20945 20946 ucmd_buf.uscsi_cdb = (char *)&cdb; 20947 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20948 ucmd_buf.uscsi_bufaddr = bufaddr; 20949 ucmd_buf.uscsi_buflen = buflen; 20950 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20951 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20952 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20953 ucmd_buf.uscsi_timeout = 60; 20954 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20955 UIO_SYSSPACE, path_flag); 20956 20957 switch (status) { 20958 case 0: 20959 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20960 break; /* Success! */ 20961 case EIO: 20962 switch (ucmd_buf.uscsi_status) { 20963 case STATUS_RESERVATION_CONFLICT: 20964 status = EACCES; 20965 break; 20966 default: 20967 break; 20968 } 20969 break; 20970 default: 20971 break; 20972 } 20973 20974 if (status == 0) { 20975 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20976 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20977 } 20978 20979 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20980 20981 return (status); 20982 } 20983 20984 20985 /* 20986 * Function: sd_send_scsi_LOG_SENSE 20987 * 20988 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20989 * 20990 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20991 * structure for this target. 20992 * 20993 * Return Code: 0 - Success 20994 * errno return code from sd_ssc_send() 20995 * 20996 * Context: Can sleep. Does not return until command is completed. 20997 */ 20998 20999 static int 21000 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21001 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21002 int path_flag) 21003 21004 { 21005 struct scsi_extended_sense sense_buf; 21006 union scsi_cdb cdb; 21007 struct uscsi_cmd ucmd_buf; 21008 int status; 21009 struct sd_lun *un; 21010 21011 ASSERT(ssc != NULL); 21012 un = ssc->ssc_un; 21013 ASSERT(un != NULL); 21014 ASSERT(!mutex_owned(SD_MUTEX(un))); 21015 21016 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21017 21018 bzero(&cdb, sizeof (cdb)); 21019 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21020 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21021 21022 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21023 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21024 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21025 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21026 FORMG1COUNT(&cdb, buflen); 21027 21028 ucmd_buf.uscsi_cdb = (char *)&cdb; 21029 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21030 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21031 ucmd_buf.uscsi_buflen = buflen; 21032 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21033 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21034 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21035 ucmd_buf.uscsi_timeout = 60; 21036 21037 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21038 UIO_SYSSPACE, path_flag); 21039 21040 switch (status) { 21041 case 0: 21042 break; 21043 case EIO: 21044 switch (ucmd_buf.uscsi_status) { 21045 case STATUS_RESERVATION_CONFLICT: 21046 status = EACCES; 21047 break; 21048 case STATUS_CHECK: 21049 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21050 (scsi_sense_key((uint8_t *)&sense_buf) == 21051 KEY_ILLEGAL_REQUEST) && 21052 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21053 /* 21054 * ASC 0x24: INVALID FIELD IN CDB 21055 */ 21056 switch (page_code) { 21057 case START_STOP_CYCLE_PAGE: 21058 /* 21059 * The start stop cycle counter is 21060 * implemented as page 0x31 in earlier 21061 * generation disks. In new generation 21062 * disks the start stop cycle counter is 21063 * implemented as page 0xE. To properly 21064 * handle this case if an attempt for 21065 * log page 0xE is made and fails we 21066 * will try again using page 0x31. 21067 * 21068 * Network storage BU committed to 21069 * maintain the page 0x31 for this 21070 * purpose and will not have any other 21071 * page implemented with page code 0x31 21072 * until all disks transition to the 21073 * standard page. 21074 */ 21075 mutex_enter(SD_MUTEX(un)); 21076 un->un_start_stop_cycle_page = 21077 START_STOP_CYCLE_VU_PAGE; 21078 cdb.cdb_opaque[2] = 21079 (char)(page_control << 6) | 21080 un->un_start_stop_cycle_page; 21081 mutex_exit(SD_MUTEX(un)); 21082 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21083 status = sd_ssc_send( 21084 ssc, &ucmd_buf, FKIOCTL, 21085 UIO_SYSSPACE, path_flag); 21086 21087 break; 21088 case TEMPERATURE_PAGE: 21089 status = ENOTTY; 21090 break; 21091 default: 21092 break; 21093 } 21094 } 21095 break; 21096 default: 21097 break; 21098 } 21099 break; 21100 default: 21101 break; 21102 } 21103 21104 if (status == 0) { 21105 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21106 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21107 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21108 } 21109 21110 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21111 21112 return (status); 21113 } 21114 21115 21116 /* 21117 * Function: sdioctl 21118 * 21119 * Description: Driver's ioctl(9e) entry point function. 21120 * 21121 * Arguments: dev - device number 21122 * cmd - ioctl operation to be performed 21123 * arg - user argument, contains data to be set or reference 21124 * parameter for get 21125 * flag - bit flag, indicating open settings, 32/64 bit type 21126 * cred_p - user credential pointer 21127 * rval_p - calling process return value (OPT) 21128 * 21129 * Return Code: EINVAL 21130 * ENOTTY 21131 * ENXIO 21132 * EIO 21133 * EFAULT 21134 * ENOTSUP 21135 * EPERM 21136 * 21137 * Context: Called from the device switch at normal priority. 21138 */ 21139 21140 static int 21141 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21142 { 21143 struct sd_lun *un = NULL; 21144 int err = 0; 21145 int i = 0; 21146 cred_t *cr; 21147 int tmprval = EINVAL; 21148 int is_valid; 21149 sd_ssc_t *ssc; 21150 21151 /* 21152 * All device accesses go thru sdstrategy where we check on suspend 21153 * status 21154 */ 21155 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21156 return (ENXIO); 21157 } 21158 21159 ASSERT(!mutex_owned(SD_MUTEX(un))); 21160 21161 /* Initialize sd_ssc_t for internal uscsi commands */ 21162 ssc = sd_ssc_init(un); 21163 21164 is_valid = SD_IS_VALID_LABEL(un); 21165 21166 /* 21167 * Moved this wait from sd_uscsi_strategy to here for 21168 * reasons of deadlock prevention. Internal driver commands, 21169 * specifically those to change a devices power level, result 21170 * in a call to sd_uscsi_strategy. 21171 */ 21172 mutex_enter(SD_MUTEX(un)); 21173 while ((un->un_state == SD_STATE_SUSPENDED) || 21174 (un->un_state == SD_STATE_PM_CHANGING)) { 21175 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21176 } 21177 /* 21178 * Twiddling the counter here protects commands from now 21179 * through to the top of sd_uscsi_strategy. Without the 21180 * counter inc. a power down, for example, could get in 21181 * after the above check for state is made and before 21182 * execution gets to the top of sd_uscsi_strategy. 21183 * That would cause problems. 21184 */ 21185 un->un_ncmds_in_driver++; 21186 21187 if (!is_valid && 21188 (flag & (FNDELAY | FNONBLOCK))) { 21189 switch (cmd) { 21190 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21191 case DKIOCGVTOC: 21192 case DKIOCGEXTVTOC: 21193 case DKIOCGAPART: 21194 case DKIOCPARTINFO: 21195 case DKIOCEXTPARTINFO: 21196 case DKIOCSGEOM: 21197 case DKIOCSAPART: 21198 case DKIOCGETEFI: 21199 case DKIOCPARTITION: 21200 case DKIOCSVTOC: 21201 case DKIOCSEXTVTOC: 21202 case DKIOCSETEFI: 21203 case DKIOCGMBOOT: 21204 case DKIOCSMBOOT: 21205 case DKIOCG_PHYGEOM: 21206 case DKIOCG_VIRTGEOM: 21207 /* let cmlb handle it */ 21208 goto skip_ready_valid; 21209 21210 case CDROMPAUSE: 21211 case CDROMRESUME: 21212 case CDROMPLAYMSF: 21213 case CDROMPLAYTRKIND: 21214 case CDROMREADTOCHDR: 21215 case CDROMREADTOCENTRY: 21216 case CDROMSTOP: 21217 case CDROMSTART: 21218 case CDROMVOLCTRL: 21219 case CDROMSUBCHNL: 21220 case CDROMREADMODE2: 21221 case CDROMREADMODE1: 21222 case CDROMREADOFFSET: 21223 case CDROMSBLKMODE: 21224 case CDROMGBLKMODE: 21225 case CDROMGDRVSPEED: 21226 case CDROMSDRVSPEED: 21227 case CDROMCDDA: 21228 case CDROMCDXA: 21229 case CDROMSUBCODE: 21230 if (!ISCD(un)) { 21231 un->un_ncmds_in_driver--; 21232 ASSERT(un->un_ncmds_in_driver >= 0); 21233 mutex_exit(SD_MUTEX(un)); 21234 err = ENOTTY; 21235 goto done_without_assess; 21236 } 21237 break; 21238 case FDEJECT: 21239 case DKIOCEJECT: 21240 case CDROMEJECT: 21241 if (!un->un_f_eject_media_supported) { 21242 un->un_ncmds_in_driver--; 21243 ASSERT(un->un_ncmds_in_driver >= 0); 21244 mutex_exit(SD_MUTEX(un)); 21245 err = ENOTTY; 21246 goto done_without_assess; 21247 } 21248 break; 21249 case DKIOCFLUSHWRITECACHE: 21250 mutex_exit(SD_MUTEX(un)); 21251 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21252 if (err != 0) { 21253 mutex_enter(SD_MUTEX(un)); 21254 un->un_ncmds_in_driver--; 21255 ASSERT(un->un_ncmds_in_driver >= 0); 21256 mutex_exit(SD_MUTEX(un)); 21257 err = EIO; 21258 goto done_quick_assess; 21259 } 21260 mutex_enter(SD_MUTEX(un)); 21261 /* FALLTHROUGH */ 21262 case DKIOCREMOVABLE: 21263 case DKIOCHOTPLUGGABLE: 21264 case DKIOCINFO: 21265 case DKIOCGMEDIAINFO: 21266 case MHIOCENFAILFAST: 21267 case MHIOCSTATUS: 21268 case MHIOCTKOWN: 21269 case MHIOCRELEASE: 21270 case MHIOCGRP_INKEYS: 21271 case MHIOCGRP_INRESV: 21272 case MHIOCGRP_REGISTER: 21273 case MHIOCGRP_RESERVE: 21274 case MHIOCGRP_PREEMPTANDABORT: 21275 case MHIOCGRP_REGISTERANDIGNOREKEY: 21276 case CDROMCLOSETRAY: 21277 case USCSICMD: 21278 goto skip_ready_valid; 21279 default: 21280 break; 21281 } 21282 21283 mutex_exit(SD_MUTEX(un)); 21284 err = sd_ready_and_valid(ssc, SDPART(dev)); 21285 mutex_enter(SD_MUTEX(un)); 21286 21287 if (err != SD_READY_VALID) { 21288 switch (cmd) { 21289 case DKIOCSTATE: 21290 case CDROMGDRVSPEED: 21291 case CDROMSDRVSPEED: 21292 case FDEJECT: /* for eject command */ 21293 case DKIOCEJECT: 21294 case CDROMEJECT: 21295 case DKIOCREMOVABLE: 21296 case DKIOCHOTPLUGGABLE: 21297 break; 21298 default: 21299 if (un->un_f_has_removable_media) { 21300 err = ENXIO; 21301 } else { 21302 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21303 if (err == SD_RESERVED_BY_OTHERS) { 21304 err = EACCES; 21305 } else { 21306 err = EIO; 21307 } 21308 } 21309 un->un_ncmds_in_driver--; 21310 ASSERT(un->un_ncmds_in_driver >= 0); 21311 mutex_exit(SD_MUTEX(un)); 21312 21313 goto done_without_assess; 21314 } 21315 } 21316 } 21317 21318 skip_ready_valid: 21319 mutex_exit(SD_MUTEX(un)); 21320 21321 switch (cmd) { 21322 case DKIOCINFO: 21323 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21324 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21325 break; 21326 21327 case DKIOCGMEDIAINFO: 21328 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21329 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21330 break; 21331 21332 case DKIOCGGEOM: 21333 case DKIOCGVTOC: 21334 case DKIOCGEXTVTOC: 21335 case DKIOCGAPART: 21336 case DKIOCPARTINFO: 21337 case DKIOCEXTPARTINFO: 21338 case DKIOCSGEOM: 21339 case DKIOCSAPART: 21340 case DKIOCGETEFI: 21341 case DKIOCPARTITION: 21342 case DKIOCSVTOC: 21343 case DKIOCSEXTVTOC: 21344 case DKIOCSETEFI: 21345 case DKIOCGMBOOT: 21346 case DKIOCSMBOOT: 21347 case DKIOCG_PHYGEOM: 21348 case DKIOCG_VIRTGEOM: 21349 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21350 21351 /* TUR should spin up */ 21352 21353 if (un->un_f_has_removable_media) 21354 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21355 SD_CHECK_FOR_MEDIA); 21356 21357 else 21358 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21359 21360 if (err != 0) 21361 goto done_with_assess; 21362 21363 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21364 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21365 21366 if ((err == 0) && 21367 ((cmd == DKIOCSETEFI) || 21368 (un->un_f_pkstats_enabled) && 21369 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 21370 21371 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21372 (void *)SD_PATH_DIRECT); 21373 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21374 sd_set_pstats(un); 21375 SD_TRACE(SD_LOG_IO_PARTITION, un, 21376 "sd_ioctl: un:0x%p pstats created and " 21377 "set\n", un); 21378 } 21379 } 21380 21381 if ((cmd == DKIOCSVTOC) || 21382 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21383 21384 mutex_enter(SD_MUTEX(un)); 21385 if (un->un_f_devid_supported && 21386 (un->un_f_opt_fab_devid == TRUE)) { 21387 if (un->un_devid == NULL) { 21388 sd_register_devid(ssc, SD_DEVINFO(un), 21389 SD_TARGET_IS_UNRESERVED); 21390 } else { 21391 /* 21392 * The device id for this disk 21393 * has been fabricated. The 21394 * device id must be preserved 21395 * by writing it back out to 21396 * disk. 21397 */ 21398 if (sd_write_deviceid(ssc) != 0) { 21399 ddi_devid_free(un->un_devid); 21400 un->un_devid = NULL; 21401 } 21402 } 21403 } 21404 mutex_exit(SD_MUTEX(un)); 21405 } 21406 21407 break; 21408 21409 case DKIOCLOCK: 21410 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21411 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21412 SD_PATH_STANDARD); 21413 goto done_with_assess; 21414 21415 case DKIOCUNLOCK: 21416 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21417 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21418 SD_PATH_STANDARD); 21419 goto done_with_assess; 21420 21421 case DKIOCSTATE: { 21422 enum dkio_state state; 21423 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21424 21425 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21426 err = EFAULT; 21427 } else { 21428 err = sd_check_media(dev, state); 21429 if (err == 0) { 21430 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21431 sizeof (int), flag) != 0) 21432 err = EFAULT; 21433 } 21434 } 21435 break; 21436 } 21437 21438 case DKIOCREMOVABLE: 21439 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21440 i = un->un_f_has_removable_media ? 1 : 0; 21441 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21442 err = EFAULT; 21443 } else { 21444 err = 0; 21445 } 21446 break; 21447 21448 case DKIOCHOTPLUGGABLE: 21449 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21450 i = un->un_f_is_hotpluggable ? 1 : 0; 21451 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21452 err = EFAULT; 21453 } else { 21454 err = 0; 21455 } 21456 break; 21457 21458 case DKIOCGTEMPERATURE: 21459 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21460 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21461 break; 21462 21463 case MHIOCENFAILFAST: 21464 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21465 if ((err = drv_priv(cred_p)) == 0) { 21466 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21467 } 21468 break; 21469 21470 case MHIOCTKOWN: 21471 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21472 if ((err = drv_priv(cred_p)) == 0) { 21473 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21474 } 21475 break; 21476 21477 case MHIOCRELEASE: 21478 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21479 if ((err = drv_priv(cred_p)) == 0) { 21480 err = sd_mhdioc_release(dev); 21481 } 21482 break; 21483 21484 case MHIOCSTATUS: 21485 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21486 if ((err = drv_priv(cred_p)) == 0) { 21487 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21488 case 0: 21489 err = 0; 21490 break; 21491 case EACCES: 21492 *rval_p = 1; 21493 err = 0; 21494 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21495 break; 21496 default: 21497 err = EIO; 21498 goto done_with_assess; 21499 } 21500 } 21501 break; 21502 21503 case MHIOCQRESERVE: 21504 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21505 if ((err = drv_priv(cred_p)) == 0) { 21506 err = sd_reserve_release(dev, SD_RESERVE); 21507 } 21508 break; 21509 21510 case MHIOCREREGISTERDEVID: 21511 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21512 if (drv_priv(cred_p) == EPERM) { 21513 err = EPERM; 21514 } else if (!un->un_f_devid_supported) { 21515 err = ENOTTY; 21516 } else { 21517 err = sd_mhdioc_register_devid(dev); 21518 } 21519 break; 21520 21521 case MHIOCGRP_INKEYS: 21522 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21523 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21524 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21525 err = ENOTSUP; 21526 } else { 21527 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21528 flag); 21529 } 21530 } 21531 break; 21532 21533 case MHIOCGRP_INRESV: 21534 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21535 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21536 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21537 err = ENOTSUP; 21538 } else { 21539 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21540 } 21541 } 21542 break; 21543 21544 case MHIOCGRP_REGISTER: 21545 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21546 if ((err = drv_priv(cred_p)) != EPERM) { 21547 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21548 err = ENOTSUP; 21549 } else if (arg != NULL) { 21550 mhioc_register_t reg; 21551 if (ddi_copyin((void *)arg, ®, 21552 sizeof (mhioc_register_t), flag) != 0) { 21553 err = EFAULT; 21554 } else { 21555 err = 21556 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21557 ssc, SD_SCSI3_REGISTER, 21558 (uchar_t *)®); 21559 if (err != 0) 21560 goto done_with_assess; 21561 } 21562 } 21563 } 21564 break; 21565 21566 case MHIOCGRP_RESERVE: 21567 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21568 if ((err = drv_priv(cred_p)) != EPERM) { 21569 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21570 err = ENOTSUP; 21571 } else if (arg != NULL) { 21572 mhioc_resv_desc_t resv_desc; 21573 if (ddi_copyin((void *)arg, &resv_desc, 21574 sizeof (mhioc_resv_desc_t), flag) != 0) { 21575 err = EFAULT; 21576 } else { 21577 err = 21578 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21579 ssc, SD_SCSI3_RESERVE, 21580 (uchar_t *)&resv_desc); 21581 if (err != 0) 21582 goto done_with_assess; 21583 } 21584 } 21585 } 21586 break; 21587 21588 case MHIOCGRP_PREEMPTANDABORT: 21589 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21590 if ((err = drv_priv(cred_p)) != EPERM) { 21591 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21592 err = ENOTSUP; 21593 } else if (arg != NULL) { 21594 mhioc_preemptandabort_t preempt_abort; 21595 if (ddi_copyin((void *)arg, &preempt_abort, 21596 sizeof (mhioc_preemptandabort_t), 21597 flag) != 0) { 21598 err = EFAULT; 21599 } else { 21600 err = 21601 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21602 ssc, SD_SCSI3_PREEMPTANDABORT, 21603 (uchar_t *)&preempt_abort); 21604 if (err != 0) 21605 goto done_with_assess; 21606 } 21607 } 21608 } 21609 break; 21610 21611 case MHIOCGRP_REGISTERANDIGNOREKEY: 21612 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21613 if ((err = drv_priv(cred_p)) != EPERM) { 21614 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21615 err = ENOTSUP; 21616 } else if (arg != NULL) { 21617 mhioc_registerandignorekey_t r_and_i; 21618 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21619 sizeof (mhioc_registerandignorekey_t), 21620 flag) != 0) { 21621 err = EFAULT; 21622 } else { 21623 err = 21624 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21625 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21626 (uchar_t *)&r_and_i); 21627 if (err != 0) 21628 goto done_with_assess; 21629 } 21630 } 21631 } 21632 break; 21633 21634 case USCSICMD: 21635 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21636 cr = ddi_get_cred(); 21637 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21638 err = EPERM; 21639 } else { 21640 enum uio_seg uioseg; 21641 21642 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21643 UIO_USERSPACE; 21644 if (un->un_f_format_in_progress == TRUE) { 21645 err = EAGAIN; 21646 break; 21647 } 21648 21649 err = sd_ssc_send(ssc, 21650 (struct uscsi_cmd *)arg, 21651 flag, uioseg, SD_PATH_STANDARD); 21652 if (err != 0) 21653 goto done_with_assess; 21654 else 21655 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21656 } 21657 break; 21658 21659 case CDROMPAUSE: 21660 case CDROMRESUME: 21661 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21662 if (!ISCD(un)) { 21663 err = ENOTTY; 21664 } else { 21665 err = sr_pause_resume(dev, cmd); 21666 } 21667 break; 21668 21669 case CDROMPLAYMSF: 21670 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21671 if (!ISCD(un)) { 21672 err = ENOTTY; 21673 } else { 21674 err = sr_play_msf(dev, (caddr_t)arg, flag); 21675 } 21676 break; 21677 21678 case CDROMPLAYTRKIND: 21679 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21680 #if defined(__i386) || defined(__amd64) 21681 /* 21682 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21683 */ 21684 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21685 #else 21686 if (!ISCD(un)) { 21687 #endif 21688 err = ENOTTY; 21689 } else { 21690 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21691 } 21692 break; 21693 21694 case CDROMREADTOCHDR: 21695 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21696 if (!ISCD(un)) { 21697 err = ENOTTY; 21698 } else { 21699 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21700 } 21701 break; 21702 21703 case CDROMREADTOCENTRY: 21704 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21705 if (!ISCD(un)) { 21706 err = ENOTTY; 21707 } else { 21708 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21709 } 21710 break; 21711 21712 case CDROMSTOP: 21713 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21714 if (!ISCD(un)) { 21715 err = ENOTTY; 21716 } else { 21717 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21718 SD_PATH_STANDARD); 21719 goto done_with_assess; 21720 } 21721 break; 21722 21723 case CDROMSTART: 21724 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21725 if (!ISCD(un)) { 21726 err = ENOTTY; 21727 } else { 21728 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21729 SD_PATH_STANDARD); 21730 goto done_with_assess; 21731 } 21732 break; 21733 21734 case CDROMCLOSETRAY: 21735 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21736 if (!ISCD(un)) { 21737 err = ENOTTY; 21738 } else { 21739 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21740 SD_PATH_STANDARD); 21741 goto done_with_assess; 21742 } 21743 break; 21744 21745 case FDEJECT: /* for eject command */ 21746 case DKIOCEJECT: 21747 case CDROMEJECT: 21748 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21749 if (!un->un_f_eject_media_supported) { 21750 err = ENOTTY; 21751 } else { 21752 err = sr_eject(dev); 21753 } 21754 break; 21755 21756 case CDROMVOLCTRL: 21757 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21758 if (!ISCD(un)) { 21759 err = ENOTTY; 21760 } else { 21761 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21762 } 21763 break; 21764 21765 case CDROMSUBCHNL: 21766 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21767 if (!ISCD(un)) { 21768 err = ENOTTY; 21769 } else { 21770 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21771 } 21772 break; 21773 21774 case CDROMREADMODE2: 21775 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21776 if (!ISCD(un)) { 21777 err = ENOTTY; 21778 } else if (un->un_f_cfg_is_atapi == TRUE) { 21779 /* 21780 * If the drive supports READ CD, use that instead of 21781 * switching the LBA size via a MODE SELECT 21782 * Block Descriptor 21783 */ 21784 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21785 } else { 21786 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21787 } 21788 break; 21789 21790 case CDROMREADMODE1: 21791 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21792 if (!ISCD(un)) { 21793 err = ENOTTY; 21794 } else { 21795 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21796 } 21797 break; 21798 21799 case CDROMREADOFFSET: 21800 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21801 if (!ISCD(un)) { 21802 err = ENOTTY; 21803 } else { 21804 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21805 flag); 21806 } 21807 break; 21808 21809 case CDROMSBLKMODE: 21810 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21811 /* 21812 * There is no means of changing block size in case of atapi 21813 * drives, thus return ENOTTY if drive type is atapi 21814 */ 21815 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21816 err = ENOTTY; 21817 } else if (un->un_f_mmc_cap == TRUE) { 21818 21819 /* 21820 * MMC Devices do not support changing the 21821 * logical block size 21822 * 21823 * Note: EINVAL is being returned instead of ENOTTY to 21824 * maintain consistancy with the original mmc 21825 * driver update. 21826 */ 21827 err = EINVAL; 21828 } else { 21829 mutex_enter(SD_MUTEX(un)); 21830 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21831 (un->un_ncmds_in_transport > 0)) { 21832 mutex_exit(SD_MUTEX(un)); 21833 err = EINVAL; 21834 } else { 21835 mutex_exit(SD_MUTEX(un)); 21836 err = sr_change_blkmode(dev, cmd, arg, flag); 21837 } 21838 } 21839 break; 21840 21841 case CDROMGBLKMODE: 21842 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21843 if (!ISCD(un)) { 21844 err = ENOTTY; 21845 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21846 (un->un_f_blockcount_is_valid != FALSE)) { 21847 /* 21848 * Drive is an ATAPI drive so return target block 21849 * size for ATAPI drives since we cannot change the 21850 * blocksize on ATAPI drives. Used primarily to detect 21851 * if an ATAPI cdrom is present. 21852 */ 21853 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21854 sizeof (int), flag) != 0) { 21855 err = EFAULT; 21856 } else { 21857 err = 0; 21858 } 21859 21860 } else { 21861 /* 21862 * Drive supports changing block sizes via a Mode 21863 * Select. 21864 */ 21865 err = sr_change_blkmode(dev, cmd, arg, flag); 21866 } 21867 break; 21868 21869 case CDROMGDRVSPEED: 21870 case CDROMSDRVSPEED: 21871 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21872 if (!ISCD(un)) { 21873 err = ENOTTY; 21874 } else if (un->un_f_mmc_cap == TRUE) { 21875 /* 21876 * Note: In the future the driver implementation 21877 * for getting and 21878 * setting cd speed should entail: 21879 * 1) If non-mmc try the Toshiba mode page 21880 * (sr_change_speed) 21881 * 2) If mmc but no support for Real Time Streaming try 21882 * the SET CD SPEED (0xBB) command 21883 * (sr_atapi_change_speed) 21884 * 3) If mmc and support for Real Time Streaming 21885 * try the GET PERFORMANCE and SET STREAMING 21886 * commands (not yet implemented, 4380808) 21887 */ 21888 /* 21889 * As per recent MMC spec, CD-ROM speed is variable 21890 * and changes with LBA. Since there is no such 21891 * things as drive speed now, fail this ioctl. 21892 * 21893 * Note: EINVAL is returned for consistancy of original 21894 * implementation which included support for getting 21895 * the drive speed of mmc devices but not setting 21896 * the drive speed. Thus EINVAL would be returned 21897 * if a set request was made for an mmc device. 21898 * We no longer support get or set speed for 21899 * mmc but need to remain consistent with regard 21900 * to the error code returned. 21901 */ 21902 err = EINVAL; 21903 } else if (un->un_f_cfg_is_atapi == TRUE) { 21904 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21905 } else { 21906 err = sr_change_speed(dev, cmd, arg, flag); 21907 } 21908 break; 21909 21910 case CDROMCDDA: 21911 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21912 if (!ISCD(un)) { 21913 err = ENOTTY; 21914 } else { 21915 err = sr_read_cdda(dev, (void *)arg, flag); 21916 } 21917 break; 21918 21919 case CDROMCDXA: 21920 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21921 if (!ISCD(un)) { 21922 err = ENOTTY; 21923 } else { 21924 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21925 } 21926 break; 21927 21928 case CDROMSUBCODE: 21929 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21930 if (!ISCD(un)) { 21931 err = ENOTTY; 21932 } else { 21933 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21934 } 21935 break; 21936 21937 21938 #ifdef SDDEBUG 21939 /* RESET/ABORTS testing ioctls */ 21940 case DKIOCRESET: { 21941 int reset_level; 21942 21943 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21944 err = EFAULT; 21945 } else { 21946 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21947 "reset_level = 0x%lx\n", reset_level); 21948 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21949 err = 0; 21950 } else { 21951 err = EIO; 21952 } 21953 } 21954 break; 21955 } 21956 21957 case DKIOCABORT: 21958 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21959 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21960 err = 0; 21961 } else { 21962 err = EIO; 21963 } 21964 break; 21965 #endif 21966 21967 #ifdef SD_FAULT_INJECTION 21968 /* SDIOC FaultInjection testing ioctls */ 21969 case SDIOCSTART: 21970 case SDIOCSTOP: 21971 case SDIOCINSERTPKT: 21972 case SDIOCINSERTXB: 21973 case SDIOCINSERTUN: 21974 case SDIOCINSERTARQ: 21975 case SDIOCPUSH: 21976 case SDIOCRETRIEVE: 21977 case SDIOCRUN: 21978 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21979 "SDIOC detected cmd:0x%X:\n", cmd); 21980 /* call error generator */ 21981 sd_faultinjection_ioctl(cmd, arg, un); 21982 err = 0; 21983 break; 21984 21985 #endif /* SD_FAULT_INJECTION */ 21986 21987 case DKIOCFLUSHWRITECACHE: 21988 { 21989 struct dk_callback *dkc = (struct dk_callback *)arg; 21990 21991 mutex_enter(SD_MUTEX(un)); 21992 if (!un->un_f_sync_cache_supported || 21993 !un->un_f_write_cache_enabled) { 21994 err = un->un_f_sync_cache_supported ? 21995 0 : ENOTSUP; 21996 mutex_exit(SD_MUTEX(un)); 21997 if ((flag & FKIOCTL) && dkc != NULL && 21998 dkc->dkc_callback != NULL) { 21999 (*dkc->dkc_callback)(dkc->dkc_cookie, 22000 err); 22001 /* 22002 * Did callback and reported error. 22003 * Since we did a callback, ioctl 22004 * should return 0. 22005 */ 22006 err = 0; 22007 } 22008 break; 22009 } 22010 mutex_exit(SD_MUTEX(un)); 22011 22012 if ((flag & FKIOCTL) && dkc != NULL && 22013 dkc->dkc_callback != NULL) { 22014 /* async SYNC CACHE request */ 22015 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22016 } else { 22017 /* synchronous SYNC CACHE request */ 22018 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22019 } 22020 } 22021 break; 22022 22023 case DKIOCGETWCE: { 22024 22025 int wce; 22026 22027 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22028 break; 22029 } 22030 22031 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22032 err = EFAULT; 22033 } 22034 break; 22035 } 22036 22037 case DKIOCSETWCE: { 22038 22039 int wce, sync_supported; 22040 22041 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22042 err = EFAULT; 22043 break; 22044 } 22045 22046 /* 22047 * Synchronize multiple threads trying to enable 22048 * or disable the cache via the un_f_wcc_cv 22049 * condition variable. 22050 */ 22051 mutex_enter(SD_MUTEX(un)); 22052 22053 /* 22054 * Don't allow the cache to be enabled if the 22055 * config file has it disabled. 22056 */ 22057 if (un->un_f_opt_disable_cache && wce) { 22058 mutex_exit(SD_MUTEX(un)); 22059 err = EINVAL; 22060 break; 22061 } 22062 22063 /* 22064 * Wait for write cache change in progress 22065 * bit to be clear before proceeding. 22066 */ 22067 while (un->un_f_wcc_inprog) 22068 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22069 22070 un->un_f_wcc_inprog = 1; 22071 22072 if (un->un_f_write_cache_enabled && wce == 0) { 22073 /* 22074 * Disable the write cache. Don't clear 22075 * un_f_write_cache_enabled until after 22076 * the mode select and flush are complete. 22077 */ 22078 sync_supported = un->un_f_sync_cache_supported; 22079 22080 /* 22081 * If cache flush is suppressed, we assume that the 22082 * controller firmware will take care of managing the 22083 * write cache for us: no need to explicitly 22084 * disable it. 22085 */ 22086 if (!un->un_f_suppress_cache_flush) { 22087 mutex_exit(SD_MUTEX(un)); 22088 if ((err = sd_cache_control(ssc, 22089 SD_CACHE_NOCHANGE, 22090 SD_CACHE_DISABLE)) == 0 && 22091 sync_supported) { 22092 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22093 NULL); 22094 } 22095 } else { 22096 mutex_exit(SD_MUTEX(un)); 22097 } 22098 22099 mutex_enter(SD_MUTEX(un)); 22100 if (err == 0) { 22101 un->un_f_write_cache_enabled = 0; 22102 } 22103 22104 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22105 /* 22106 * Set un_f_write_cache_enabled first, so there is 22107 * no window where the cache is enabled, but the 22108 * bit says it isn't. 22109 */ 22110 un->un_f_write_cache_enabled = 1; 22111 22112 /* 22113 * If cache flush is suppressed, we assume that the 22114 * controller firmware will take care of managing the 22115 * write cache for us: no need to explicitly 22116 * enable it. 22117 */ 22118 if (!un->un_f_suppress_cache_flush) { 22119 mutex_exit(SD_MUTEX(un)); 22120 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22121 SD_CACHE_ENABLE); 22122 } else { 22123 mutex_exit(SD_MUTEX(un)); 22124 } 22125 22126 mutex_enter(SD_MUTEX(un)); 22127 22128 if (err) { 22129 un->un_f_write_cache_enabled = 0; 22130 } 22131 } 22132 22133 un->un_f_wcc_inprog = 0; 22134 cv_broadcast(&un->un_wcc_cv); 22135 mutex_exit(SD_MUTEX(un)); 22136 break; 22137 } 22138 22139 default: 22140 err = ENOTTY; 22141 break; 22142 } 22143 mutex_enter(SD_MUTEX(un)); 22144 un->un_ncmds_in_driver--; 22145 ASSERT(un->un_ncmds_in_driver >= 0); 22146 mutex_exit(SD_MUTEX(un)); 22147 22148 22149 done_without_assess: 22150 sd_ssc_fini(ssc); 22151 22152 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22153 return (err); 22154 22155 done_with_assess: 22156 mutex_enter(SD_MUTEX(un)); 22157 un->un_ncmds_in_driver--; 22158 ASSERT(un->un_ncmds_in_driver >= 0); 22159 mutex_exit(SD_MUTEX(un)); 22160 22161 done_quick_assess: 22162 if (err != 0) 22163 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22164 /* Uninitialize sd_ssc_t pointer */ 22165 sd_ssc_fini(ssc); 22166 22167 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22168 return (err); 22169 } 22170 22171 22172 /* 22173 * Function: sd_dkio_ctrl_info 22174 * 22175 * Description: This routine is the driver entry point for handling controller 22176 * information ioctl requests (DKIOCINFO). 22177 * 22178 * Arguments: dev - the device number 22179 * arg - pointer to user provided dk_cinfo structure 22180 * specifying the controller type and attributes. 22181 * flag - this argument is a pass through to ddi_copyxxx() 22182 * directly from the mode argument of ioctl(). 22183 * 22184 * Return Code: 0 22185 * EFAULT 22186 * ENXIO 22187 */ 22188 22189 static int 22190 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22191 { 22192 struct sd_lun *un = NULL; 22193 struct dk_cinfo *info; 22194 dev_info_t *pdip; 22195 int lun, tgt; 22196 22197 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22198 return (ENXIO); 22199 } 22200 22201 info = (struct dk_cinfo *) 22202 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22203 22204 switch (un->un_ctype) { 22205 case CTYPE_CDROM: 22206 info->dki_ctype = DKC_CDROM; 22207 break; 22208 default: 22209 info->dki_ctype = DKC_SCSI_CCS; 22210 break; 22211 } 22212 pdip = ddi_get_parent(SD_DEVINFO(un)); 22213 info->dki_cnum = ddi_get_instance(pdip); 22214 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22215 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22216 } else { 22217 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22218 DK_DEVLEN - 1); 22219 } 22220 22221 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22222 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22223 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22224 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22225 22226 /* Unit Information */ 22227 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22228 info->dki_slave = ((tgt << 3) | lun); 22229 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22230 DK_DEVLEN - 1); 22231 info->dki_flags = DKI_FMTVOL; 22232 info->dki_partition = SDPART(dev); 22233 22234 /* Max Transfer size of this device in blocks */ 22235 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22236 info->dki_addr = 0; 22237 info->dki_space = 0; 22238 info->dki_prio = 0; 22239 info->dki_vec = 0; 22240 22241 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22242 kmem_free(info, sizeof (struct dk_cinfo)); 22243 return (EFAULT); 22244 } else { 22245 kmem_free(info, sizeof (struct dk_cinfo)); 22246 return (0); 22247 } 22248 } 22249 22250 22251 /* 22252 * Function: sd_get_media_info 22253 * 22254 * Description: This routine is the driver entry point for handling ioctl 22255 * requests for the media type or command set profile used by the 22256 * drive to operate on the media (DKIOCGMEDIAINFO). 22257 * 22258 * Arguments: dev - the device number 22259 * arg - pointer to user provided dk_minfo structure 22260 * specifying the media type, logical block size and 22261 * drive capacity. 22262 * flag - this argument is a pass through to ddi_copyxxx() 22263 * directly from the mode argument of ioctl(). 22264 * 22265 * Return Code: 0 22266 * EACCESS 22267 * EFAULT 22268 * ENXIO 22269 * EIO 22270 */ 22271 22272 static int 22273 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22274 { 22275 struct sd_lun *un = NULL; 22276 struct uscsi_cmd com; 22277 struct scsi_inquiry *sinq; 22278 struct dk_minfo media_info; 22279 u_longlong_t media_capacity; 22280 uint64_t capacity; 22281 uint_t lbasize; 22282 uchar_t *out_data; 22283 uchar_t *rqbuf; 22284 int rval = 0; 22285 int rtn; 22286 sd_ssc_t *ssc; 22287 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22288 (un->un_state == SD_STATE_OFFLINE)) { 22289 return (ENXIO); 22290 } 22291 22292 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22293 22294 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22295 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22296 22297 /* Issue a TUR to determine if the drive is ready with media present */ 22298 ssc = sd_ssc_init(un); 22299 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22300 if (rval == ENXIO) { 22301 goto done; 22302 } else if (rval != 0) { 22303 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22304 } 22305 22306 /* Now get configuration data */ 22307 if (ISCD(un)) { 22308 media_info.dki_media_type = DK_CDROM; 22309 22310 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22311 if (un->un_f_mmc_cap == TRUE) { 22312 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22313 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22314 SD_PATH_STANDARD); 22315 22316 if (rtn) { 22317 /* 22318 * We ignore all failures for CD and need to 22319 * put the assessment before processing code 22320 * to avoid missing assessment for FMA. 22321 */ 22322 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22323 /* 22324 * Failed for other than an illegal request 22325 * or command not supported 22326 */ 22327 if ((com.uscsi_status == STATUS_CHECK) && 22328 (com.uscsi_rqstatus == STATUS_GOOD)) { 22329 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22330 (rqbuf[12] != 0x20)) { 22331 rval = EIO; 22332 goto no_assessment; 22333 } 22334 } 22335 } else { 22336 /* 22337 * The GET CONFIGURATION command succeeded 22338 * so set the media type according to the 22339 * returned data 22340 */ 22341 media_info.dki_media_type = out_data[6]; 22342 media_info.dki_media_type <<= 8; 22343 media_info.dki_media_type |= out_data[7]; 22344 } 22345 } 22346 } else { 22347 /* 22348 * The profile list is not available, so we attempt to identify 22349 * the media type based on the inquiry data 22350 */ 22351 sinq = un->un_sd->sd_inq; 22352 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22353 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22354 /* This is a direct access device or optical disk */ 22355 media_info.dki_media_type = DK_FIXED_DISK; 22356 22357 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22358 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22359 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22360 media_info.dki_media_type = DK_ZIP; 22361 } else if ( 22362 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22363 media_info.dki_media_type = DK_JAZ; 22364 } 22365 } 22366 } else { 22367 /* 22368 * Not a CD, direct access or optical disk so return 22369 * unknown media 22370 */ 22371 media_info.dki_media_type = DK_UNKNOWN; 22372 } 22373 } 22374 22375 /* Now read the capacity so we can provide the lbasize and capacity */ 22376 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22377 SD_PATH_DIRECT); 22378 switch (rval) { 22379 case 0: 22380 break; 22381 case EACCES: 22382 rval = EACCES; 22383 goto done; 22384 default: 22385 rval = EIO; 22386 goto done; 22387 } 22388 22389 /* 22390 * If lun is expanded dynamically, update the un structure. 22391 */ 22392 mutex_enter(SD_MUTEX(un)); 22393 if ((un->un_f_blockcount_is_valid == TRUE) && 22394 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22395 (capacity > un->un_blockcount)) { 22396 sd_update_block_info(un, lbasize, capacity); 22397 } 22398 mutex_exit(SD_MUTEX(un)); 22399 22400 media_info.dki_lbsize = lbasize; 22401 media_capacity = capacity; 22402 22403 /* 22404 * sd_send_scsi_READ_CAPACITY() reports capacity in 22405 * un->un_sys_blocksize chunks. So we need to convert it into 22406 * cap.lbasize chunks. 22407 */ 22408 media_capacity *= un->un_sys_blocksize; 22409 media_capacity /= lbasize; 22410 media_info.dki_capacity = media_capacity; 22411 22412 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22413 rval = EFAULT; 22414 /* Put goto. Anybody might add some code below in future */ 22415 goto no_assessment; 22416 } 22417 done: 22418 if (rval != 0) { 22419 if (rval == EIO) 22420 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22421 else 22422 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22423 } 22424 no_assessment: 22425 sd_ssc_fini(ssc); 22426 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22427 kmem_free(rqbuf, SENSE_LENGTH); 22428 return (rval); 22429 } 22430 22431 22432 /* 22433 * Function: sd_check_media 22434 * 22435 * Description: This utility routine implements the functionality for the 22436 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22437 * driver state changes from that specified by the user 22438 * (inserted or ejected). For example, if the user specifies 22439 * DKIO_EJECTED and the current media state is inserted this 22440 * routine will immediately return DKIO_INSERTED. However, if the 22441 * current media state is not inserted the user thread will be 22442 * blocked until the drive state changes. If DKIO_NONE is specified 22443 * the user thread will block until a drive state change occurs. 22444 * 22445 * Arguments: dev - the device number 22446 * state - user pointer to a dkio_state, updated with the current 22447 * drive state at return. 22448 * 22449 * Return Code: ENXIO 22450 * EIO 22451 * EAGAIN 22452 * EINTR 22453 */ 22454 22455 static int 22456 sd_check_media(dev_t dev, enum dkio_state state) 22457 { 22458 struct sd_lun *un = NULL; 22459 enum dkio_state prev_state; 22460 opaque_t token = NULL; 22461 int rval = 0; 22462 sd_ssc_t *ssc; 22463 22464 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22465 return (ENXIO); 22466 } 22467 22468 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22469 22470 ssc = sd_ssc_init(un); 22471 22472 mutex_enter(SD_MUTEX(un)); 22473 22474 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22475 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22476 22477 prev_state = un->un_mediastate; 22478 22479 /* is there anything to do? */ 22480 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22481 /* 22482 * submit the request to the scsi_watch service; 22483 * scsi_media_watch_cb() does the real work 22484 */ 22485 mutex_exit(SD_MUTEX(un)); 22486 22487 /* 22488 * This change handles the case where a scsi watch request is 22489 * added to a device that is powered down. To accomplish this 22490 * we power up the device before adding the scsi watch request, 22491 * since the scsi watch sends a TUR directly to the device 22492 * which the device cannot handle if it is powered down. 22493 */ 22494 if (sd_pm_entry(un) != DDI_SUCCESS) { 22495 mutex_enter(SD_MUTEX(un)); 22496 goto done; 22497 } 22498 22499 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22500 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22501 (caddr_t)dev); 22502 22503 sd_pm_exit(un); 22504 22505 mutex_enter(SD_MUTEX(un)); 22506 if (token == NULL) { 22507 rval = EAGAIN; 22508 goto done; 22509 } 22510 22511 /* 22512 * This is a special case IOCTL that doesn't return 22513 * until the media state changes. Routine sdpower 22514 * knows about and handles this so don't count it 22515 * as an active cmd in the driver, which would 22516 * keep the device busy to the pm framework. 22517 * If the count isn't decremented the device can't 22518 * be powered down. 22519 */ 22520 un->un_ncmds_in_driver--; 22521 ASSERT(un->un_ncmds_in_driver >= 0); 22522 22523 /* 22524 * if a prior request had been made, this will be the same 22525 * token, as scsi_watch was designed that way. 22526 */ 22527 un->un_swr_token = token; 22528 un->un_specified_mediastate = state; 22529 22530 /* 22531 * now wait for media change 22532 * we will not be signalled unless mediastate == state but it is 22533 * still better to test for this condition, since there is a 22534 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22535 */ 22536 SD_TRACE(SD_LOG_COMMON, un, 22537 "sd_check_media: waiting for media state change\n"); 22538 while (un->un_mediastate == state) { 22539 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22540 SD_TRACE(SD_LOG_COMMON, un, 22541 "sd_check_media: waiting for media state " 22542 "was interrupted\n"); 22543 un->un_ncmds_in_driver++; 22544 rval = EINTR; 22545 goto done; 22546 } 22547 SD_TRACE(SD_LOG_COMMON, un, 22548 "sd_check_media: received signal, state=%x\n", 22549 un->un_mediastate); 22550 } 22551 /* 22552 * Inc the counter to indicate the device once again 22553 * has an active outstanding cmd. 22554 */ 22555 un->un_ncmds_in_driver++; 22556 } 22557 22558 /* invalidate geometry */ 22559 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22560 sr_ejected(un); 22561 } 22562 22563 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22564 uint64_t capacity; 22565 uint_t lbasize; 22566 22567 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22568 mutex_exit(SD_MUTEX(un)); 22569 /* 22570 * Since the following routines use SD_PATH_DIRECT, we must 22571 * call PM directly before the upcoming disk accesses. This 22572 * may cause the disk to be power/spin up. 22573 */ 22574 22575 if (sd_pm_entry(un) == DDI_SUCCESS) { 22576 rval = sd_send_scsi_READ_CAPACITY(ssc, 22577 &capacity, &lbasize, SD_PATH_DIRECT); 22578 if (rval != 0) { 22579 sd_pm_exit(un); 22580 if (rval == EIO) 22581 sd_ssc_assessment(ssc, 22582 SD_FMT_STATUS_CHECK); 22583 else 22584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22585 mutex_enter(SD_MUTEX(un)); 22586 goto done; 22587 } 22588 } else { 22589 rval = EIO; 22590 mutex_enter(SD_MUTEX(un)); 22591 goto done; 22592 } 22593 mutex_enter(SD_MUTEX(un)); 22594 22595 sd_update_block_info(un, lbasize, capacity); 22596 22597 /* 22598 * Check if the media in the device is writable or not 22599 */ 22600 if (ISCD(un)) { 22601 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22602 } 22603 22604 mutex_exit(SD_MUTEX(un)); 22605 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22606 if ((cmlb_validate(un->un_cmlbhandle, 0, 22607 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22608 sd_set_pstats(un); 22609 SD_TRACE(SD_LOG_IO_PARTITION, un, 22610 "sd_check_media: un:0x%p pstats created and " 22611 "set\n", un); 22612 } 22613 22614 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22615 SD_PATH_DIRECT); 22616 22617 sd_pm_exit(un); 22618 22619 if (rval != 0) { 22620 if (rval == EIO) 22621 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22622 else 22623 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22624 } 22625 22626 mutex_enter(SD_MUTEX(un)); 22627 } 22628 done: 22629 sd_ssc_fini(ssc); 22630 un->un_f_watcht_stopped = FALSE; 22631 /* 22632 * Use of this local token and the mutex ensures that we avoid 22633 * some race conditions associated with terminating the 22634 * scsi watch. 22635 */ 22636 if (token) { 22637 un->un_swr_token = (opaque_t)NULL; 22638 mutex_exit(SD_MUTEX(un)); 22639 (void) scsi_watch_request_terminate(token, 22640 SCSI_WATCH_TERMINATE_WAIT); 22641 mutex_enter(SD_MUTEX(un)); 22642 } 22643 22644 /* 22645 * Update the capacity kstat value, if no media previously 22646 * (capacity kstat is 0) and a media has been inserted 22647 * (un_f_blockcount_is_valid == TRUE) 22648 */ 22649 if (un->un_errstats) { 22650 struct sd_errstats *stp = NULL; 22651 22652 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22653 if ((stp->sd_capacity.value.ui64 == 0) && 22654 (un->un_f_blockcount_is_valid == TRUE)) { 22655 stp->sd_capacity.value.ui64 = 22656 (uint64_t)((uint64_t)un->un_blockcount * 22657 un->un_sys_blocksize); 22658 } 22659 } 22660 mutex_exit(SD_MUTEX(un)); 22661 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22662 return (rval); 22663 } 22664 22665 22666 /* 22667 * Function: sd_delayed_cv_broadcast 22668 * 22669 * Description: Delayed cv_broadcast to allow for target to recover from media 22670 * insertion. 22671 * 22672 * Arguments: arg - driver soft state (unit) structure 22673 */ 22674 22675 static void 22676 sd_delayed_cv_broadcast(void *arg) 22677 { 22678 struct sd_lun *un = arg; 22679 22680 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22681 22682 mutex_enter(SD_MUTEX(un)); 22683 un->un_dcvb_timeid = NULL; 22684 cv_broadcast(&un->un_state_cv); 22685 mutex_exit(SD_MUTEX(un)); 22686 } 22687 22688 22689 /* 22690 * Function: sd_media_watch_cb 22691 * 22692 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22693 * routine processes the TUR sense data and updates the driver 22694 * state if a transition has occurred. The user thread 22695 * (sd_check_media) is then signalled. 22696 * 22697 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22698 * among multiple watches that share this callback function 22699 * resultp - scsi watch facility result packet containing scsi 22700 * packet, status byte and sense data 22701 * 22702 * Return Code: 0 for success, -1 for failure 22703 */ 22704 22705 static int 22706 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22707 { 22708 struct sd_lun *un; 22709 struct scsi_status *statusp = resultp->statusp; 22710 uint8_t *sensep = (uint8_t *)resultp->sensep; 22711 enum dkio_state state = DKIO_NONE; 22712 dev_t dev = (dev_t)arg; 22713 uchar_t actual_sense_length; 22714 uint8_t skey, asc, ascq; 22715 22716 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22717 return (-1); 22718 } 22719 actual_sense_length = resultp->actual_sense_length; 22720 22721 mutex_enter(SD_MUTEX(un)); 22722 SD_TRACE(SD_LOG_COMMON, un, 22723 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22724 *((char *)statusp), (void *)sensep, actual_sense_length); 22725 22726 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22727 un->un_mediastate = DKIO_DEV_GONE; 22728 cv_broadcast(&un->un_state_cv); 22729 mutex_exit(SD_MUTEX(un)); 22730 22731 return (0); 22732 } 22733 22734 /* 22735 * If there was a check condition then sensep points to valid sense data 22736 * If status was not a check condition but a reservation or busy status 22737 * then the new state is DKIO_NONE 22738 */ 22739 if (sensep != NULL) { 22740 skey = scsi_sense_key(sensep); 22741 asc = scsi_sense_asc(sensep); 22742 ascq = scsi_sense_ascq(sensep); 22743 22744 SD_INFO(SD_LOG_COMMON, un, 22745 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22746 skey, asc, ascq); 22747 /* This routine only uses up to 13 bytes of sense data. */ 22748 if (actual_sense_length >= 13) { 22749 if (skey == KEY_UNIT_ATTENTION) { 22750 if (asc == 0x28) { 22751 state = DKIO_INSERTED; 22752 } 22753 } else if (skey == KEY_NOT_READY) { 22754 /* 22755 * Sense data of 02/06/00 means that the 22756 * drive could not read the media (No 22757 * reference position found). In this case 22758 * to prevent a hang on the DKIOCSTATE IOCTL 22759 * we set the media state to DKIO_INSERTED. 22760 */ 22761 if (asc == 0x06 && ascq == 0x00) 22762 state = DKIO_INSERTED; 22763 22764 /* 22765 * if 02/04/02 means that the host 22766 * should send start command. Explicitly 22767 * leave the media state as is 22768 * (inserted) as the media is inserted 22769 * and host has stopped device for PM 22770 * reasons. Upon next true read/write 22771 * to this media will bring the 22772 * device to the right state good for 22773 * media access. 22774 */ 22775 if (asc == 0x3a) { 22776 state = DKIO_EJECTED; 22777 } else { 22778 /* 22779 * If the drive is busy with an 22780 * operation or long write, keep the 22781 * media in an inserted state. 22782 */ 22783 22784 if ((asc == 0x04) && 22785 ((ascq == 0x02) || 22786 (ascq == 0x07) || 22787 (ascq == 0x08))) { 22788 state = DKIO_INSERTED; 22789 } 22790 } 22791 } else if (skey == KEY_NO_SENSE) { 22792 if ((asc == 0x00) && (ascq == 0x00)) { 22793 /* 22794 * Sense Data 00/00/00 does not provide 22795 * any information about the state of 22796 * the media. Ignore it. 22797 */ 22798 mutex_exit(SD_MUTEX(un)); 22799 return (0); 22800 } 22801 } 22802 } 22803 } else if ((*((char *)statusp) == STATUS_GOOD) && 22804 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22805 state = DKIO_INSERTED; 22806 } 22807 22808 SD_TRACE(SD_LOG_COMMON, un, 22809 "sd_media_watch_cb: state=%x, specified=%x\n", 22810 state, un->un_specified_mediastate); 22811 22812 /* 22813 * now signal the waiting thread if this is *not* the specified state; 22814 * delay the signal if the state is DKIO_INSERTED to allow the target 22815 * to recover 22816 */ 22817 if (state != un->un_specified_mediastate) { 22818 un->un_mediastate = state; 22819 if (state == DKIO_INSERTED) { 22820 /* 22821 * delay the signal to give the drive a chance 22822 * to do what it apparently needs to do 22823 */ 22824 SD_TRACE(SD_LOG_COMMON, un, 22825 "sd_media_watch_cb: delayed cv_broadcast\n"); 22826 if (un->un_dcvb_timeid == NULL) { 22827 un->un_dcvb_timeid = 22828 timeout(sd_delayed_cv_broadcast, un, 22829 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 22830 } 22831 } else { 22832 SD_TRACE(SD_LOG_COMMON, un, 22833 "sd_media_watch_cb: immediate cv_broadcast\n"); 22834 cv_broadcast(&un->un_state_cv); 22835 } 22836 } 22837 mutex_exit(SD_MUTEX(un)); 22838 return (0); 22839 } 22840 22841 22842 /* 22843 * Function: sd_dkio_get_temp 22844 * 22845 * Description: This routine is the driver entry point for handling ioctl 22846 * requests to get the disk temperature. 22847 * 22848 * Arguments: dev - the device number 22849 * arg - pointer to user provided dk_temperature structure. 22850 * flag - this argument is a pass through to ddi_copyxxx() 22851 * directly from the mode argument of ioctl(). 22852 * 22853 * Return Code: 0 22854 * EFAULT 22855 * ENXIO 22856 * EAGAIN 22857 */ 22858 22859 static int 22860 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 22861 { 22862 struct sd_lun *un = NULL; 22863 struct dk_temperature *dktemp = NULL; 22864 uchar_t *temperature_page; 22865 int rval = 0; 22866 int path_flag = SD_PATH_STANDARD; 22867 sd_ssc_t *ssc; 22868 22869 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22870 return (ENXIO); 22871 } 22872 22873 ssc = sd_ssc_init(un); 22874 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 22875 22876 /* copyin the disk temp argument to get the user flags */ 22877 if (ddi_copyin((void *)arg, dktemp, 22878 sizeof (struct dk_temperature), flag) != 0) { 22879 rval = EFAULT; 22880 goto done; 22881 } 22882 22883 /* Initialize the temperature to invalid. */ 22884 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22885 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22886 22887 /* 22888 * Note: Investigate removing the "bypass pm" semantic. 22889 * Can we just bypass PM always? 22890 */ 22891 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 22892 path_flag = SD_PATH_DIRECT; 22893 ASSERT(!mutex_owned(&un->un_pm_mutex)); 22894 mutex_enter(&un->un_pm_mutex); 22895 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22896 /* 22897 * If DKT_BYPASS_PM is set, and the drive happens to be 22898 * in low power mode, we can not wake it up, Need to 22899 * return EAGAIN. 22900 */ 22901 mutex_exit(&un->un_pm_mutex); 22902 rval = EAGAIN; 22903 goto done; 22904 } else { 22905 /* 22906 * Indicate to PM the device is busy. This is required 22907 * to avoid a race - i.e. the ioctl is issuing a 22908 * command and the pm framework brings down the device 22909 * to low power mode (possible power cut-off on some 22910 * platforms). 22911 */ 22912 mutex_exit(&un->un_pm_mutex); 22913 if (sd_pm_entry(un) != DDI_SUCCESS) { 22914 rval = EAGAIN; 22915 goto done; 22916 } 22917 } 22918 } 22919 22920 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 22921 22922 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 22923 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 22924 if (rval != 0) 22925 goto done2; 22926 22927 /* 22928 * For the current temperature verify that the parameter length is 0x02 22929 * and the parameter code is 0x00 22930 */ 22931 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 22932 (temperature_page[5] == 0x00)) { 22933 if (temperature_page[9] == 0xFF) { 22934 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22935 } else { 22936 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 22937 } 22938 } 22939 22940 /* 22941 * For the reference temperature verify that the parameter 22942 * length is 0x02 and the parameter code is 0x01 22943 */ 22944 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 22945 (temperature_page[11] == 0x01)) { 22946 if (temperature_page[15] == 0xFF) { 22947 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22948 } else { 22949 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 22950 } 22951 } 22952 22953 /* Do the copyout regardless of the temperature commands status. */ 22954 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 22955 flag) != 0) { 22956 rval = EFAULT; 22957 goto done1; 22958 } 22959 22960 done2: 22961 if (rval != 0) { 22962 if (rval == EIO) 22963 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22964 else 22965 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22966 } 22967 done1: 22968 if (path_flag == SD_PATH_DIRECT) { 22969 sd_pm_exit(un); 22970 } 22971 22972 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 22973 done: 22974 sd_ssc_fini(ssc); 22975 if (dktemp != NULL) { 22976 kmem_free(dktemp, sizeof (struct dk_temperature)); 22977 } 22978 22979 return (rval); 22980 } 22981 22982 22983 /* 22984 * Function: sd_log_page_supported 22985 * 22986 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 22987 * supported log pages. 22988 * 22989 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22990 * structure for this target. 22991 * log_page - 22992 * 22993 * Return Code: -1 - on error (log sense is optional and may not be supported). 22994 * 0 - log page not found. 22995 * 1 - log page found. 22996 */ 22997 22998 static int 22999 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23000 { 23001 uchar_t *log_page_data; 23002 int i; 23003 int match = 0; 23004 int log_size; 23005 int status = 0; 23006 struct sd_lun *un; 23007 23008 ASSERT(ssc != NULL); 23009 un = ssc->ssc_un; 23010 ASSERT(un != NULL); 23011 23012 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23013 23014 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23015 SD_PATH_DIRECT); 23016 23017 if (status != 0) { 23018 if (status == EIO) { 23019 /* 23020 * Some disks do not support log sense, we 23021 * should ignore this kind of error(sense key is 23022 * 0x5 - illegal request). 23023 */ 23024 uint8_t *sensep; 23025 int senlen; 23026 23027 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23028 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23029 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23030 23031 if (senlen > 0 && 23032 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23033 sd_ssc_assessment(ssc, 23034 SD_FMT_IGNORE_COMPROMISE); 23035 } else { 23036 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23037 } 23038 } else { 23039 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23040 } 23041 23042 SD_ERROR(SD_LOG_COMMON, un, 23043 "sd_log_page_supported: failed log page retrieval\n"); 23044 kmem_free(log_page_data, 0xFF); 23045 return (-1); 23046 } 23047 23048 log_size = log_page_data[3]; 23049 23050 /* 23051 * The list of supported log pages start from the fourth byte. Check 23052 * until we run out of log pages or a match is found. 23053 */ 23054 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23055 if (log_page_data[i] == log_page) { 23056 match++; 23057 } 23058 } 23059 kmem_free(log_page_data, 0xFF); 23060 return (match); 23061 } 23062 23063 23064 /* 23065 * Function: sd_mhdioc_failfast 23066 * 23067 * Description: This routine is the driver entry point for handling ioctl 23068 * requests to enable/disable the multihost failfast option. 23069 * (MHIOCENFAILFAST) 23070 * 23071 * Arguments: dev - the device number 23072 * arg - user specified probing interval. 23073 * flag - this argument is a pass through to ddi_copyxxx() 23074 * directly from the mode argument of ioctl(). 23075 * 23076 * Return Code: 0 23077 * EFAULT 23078 * ENXIO 23079 */ 23080 23081 static int 23082 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23083 { 23084 struct sd_lun *un = NULL; 23085 int mh_time; 23086 int rval = 0; 23087 23088 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23089 return (ENXIO); 23090 } 23091 23092 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23093 return (EFAULT); 23094 23095 if (mh_time) { 23096 mutex_enter(SD_MUTEX(un)); 23097 un->un_resvd_status |= SD_FAILFAST; 23098 mutex_exit(SD_MUTEX(un)); 23099 /* 23100 * If mh_time is INT_MAX, then this ioctl is being used for 23101 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23102 */ 23103 if (mh_time != INT_MAX) { 23104 rval = sd_check_mhd(dev, mh_time); 23105 } 23106 } else { 23107 (void) sd_check_mhd(dev, 0); 23108 mutex_enter(SD_MUTEX(un)); 23109 un->un_resvd_status &= ~SD_FAILFAST; 23110 mutex_exit(SD_MUTEX(un)); 23111 } 23112 return (rval); 23113 } 23114 23115 23116 /* 23117 * Function: sd_mhdioc_takeown 23118 * 23119 * Description: This routine is the driver entry point for handling ioctl 23120 * requests to forcefully acquire exclusive access rights to the 23121 * multihost disk (MHIOCTKOWN). 23122 * 23123 * Arguments: dev - the device number 23124 * arg - user provided structure specifying the delay 23125 * parameters in milliseconds 23126 * flag - this argument is a pass through to ddi_copyxxx() 23127 * directly from the mode argument of ioctl(). 23128 * 23129 * Return Code: 0 23130 * EFAULT 23131 * ENXIO 23132 */ 23133 23134 static int 23135 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23136 { 23137 struct sd_lun *un = NULL; 23138 struct mhioctkown *tkown = NULL; 23139 int rval = 0; 23140 23141 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23142 return (ENXIO); 23143 } 23144 23145 if (arg != NULL) { 23146 tkown = (struct mhioctkown *) 23147 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23148 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23149 if (rval != 0) { 23150 rval = EFAULT; 23151 goto error; 23152 } 23153 } 23154 23155 rval = sd_take_ownership(dev, tkown); 23156 mutex_enter(SD_MUTEX(un)); 23157 if (rval == 0) { 23158 un->un_resvd_status |= SD_RESERVE; 23159 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23160 sd_reinstate_resv_delay = 23161 tkown->reinstate_resv_delay * 1000; 23162 } else { 23163 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23164 } 23165 /* 23166 * Give the scsi_watch routine interval set by 23167 * the MHIOCENFAILFAST ioctl precedence here. 23168 */ 23169 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23170 mutex_exit(SD_MUTEX(un)); 23171 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23172 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23173 "sd_mhdioc_takeown : %d\n", 23174 sd_reinstate_resv_delay); 23175 } else { 23176 mutex_exit(SD_MUTEX(un)); 23177 } 23178 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23179 sd_mhd_reset_notify_cb, (caddr_t)un); 23180 } else { 23181 un->un_resvd_status &= ~SD_RESERVE; 23182 mutex_exit(SD_MUTEX(un)); 23183 } 23184 23185 error: 23186 if (tkown != NULL) { 23187 kmem_free(tkown, sizeof (struct mhioctkown)); 23188 } 23189 return (rval); 23190 } 23191 23192 23193 /* 23194 * Function: sd_mhdioc_release 23195 * 23196 * Description: This routine is the driver entry point for handling ioctl 23197 * requests to release exclusive access rights to the multihost 23198 * disk (MHIOCRELEASE). 23199 * 23200 * Arguments: dev - the device number 23201 * 23202 * Return Code: 0 23203 * ENXIO 23204 */ 23205 23206 static int 23207 sd_mhdioc_release(dev_t dev) 23208 { 23209 struct sd_lun *un = NULL; 23210 timeout_id_t resvd_timeid_save; 23211 int resvd_status_save; 23212 int rval = 0; 23213 23214 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23215 return (ENXIO); 23216 } 23217 23218 mutex_enter(SD_MUTEX(un)); 23219 resvd_status_save = un->un_resvd_status; 23220 un->un_resvd_status &= 23221 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23222 if (un->un_resvd_timeid) { 23223 resvd_timeid_save = un->un_resvd_timeid; 23224 un->un_resvd_timeid = NULL; 23225 mutex_exit(SD_MUTEX(un)); 23226 (void) untimeout(resvd_timeid_save); 23227 } else { 23228 mutex_exit(SD_MUTEX(un)); 23229 } 23230 23231 /* 23232 * destroy any pending timeout thread that may be attempting to 23233 * reinstate reservation on this device. 23234 */ 23235 sd_rmv_resv_reclaim_req(dev); 23236 23237 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23238 mutex_enter(SD_MUTEX(un)); 23239 if ((un->un_mhd_token) && 23240 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23241 mutex_exit(SD_MUTEX(un)); 23242 (void) sd_check_mhd(dev, 0); 23243 } else { 23244 mutex_exit(SD_MUTEX(un)); 23245 } 23246 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23247 sd_mhd_reset_notify_cb, (caddr_t)un); 23248 } else { 23249 /* 23250 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23251 */ 23252 mutex_enter(SD_MUTEX(un)); 23253 un->un_resvd_status = resvd_status_save; 23254 mutex_exit(SD_MUTEX(un)); 23255 } 23256 return (rval); 23257 } 23258 23259 23260 /* 23261 * Function: sd_mhdioc_register_devid 23262 * 23263 * Description: This routine is the driver entry point for handling ioctl 23264 * requests to register the device id (MHIOCREREGISTERDEVID). 23265 * 23266 * Note: The implementation for this ioctl has been updated to 23267 * be consistent with the original PSARC case (1999/357) 23268 * (4375899, 4241671, 4220005) 23269 * 23270 * Arguments: dev - the device number 23271 * 23272 * Return Code: 0 23273 * ENXIO 23274 */ 23275 23276 static int 23277 sd_mhdioc_register_devid(dev_t dev) 23278 { 23279 struct sd_lun *un = NULL; 23280 int rval = 0; 23281 sd_ssc_t *ssc; 23282 23283 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23284 return (ENXIO); 23285 } 23286 23287 ASSERT(!mutex_owned(SD_MUTEX(un))); 23288 23289 mutex_enter(SD_MUTEX(un)); 23290 23291 /* If a devid already exists, de-register it */ 23292 if (un->un_devid != NULL) { 23293 ddi_devid_unregister(SD_DEVINFO(un)); 23294 /* 23295 * After unregister devid, needs to free devid memory 23296 */ 23297 ddi_devid_free(un->un_devid); 23298 un->un_devid = NULL; 23299 } 23300 23301 /* Check for reservation conflict */ 23302 mutex_exit(SD_MUTEX(un)); 23303 ssc = sd_ssc_init(un); 23304 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23305 mutex_enter(SD_MUTEX(un)); 23306 23307 switch (rval) { 23308 case 0: 23309 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23310 break; 23311 case EACCES: 23312 break; 23313 default: 23314 rval = EIO; 23315 } 23316 23317 mutex_exit(SD_MUTEX(un)); 23318 if (rval != 0) { 23319 if (rval == EIO) 23320 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23321 else 23322 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23323 } 23324 sd_ssc_fini(ssc); 23325 return (rval); 23326 } 23327 23328 23329 /* 23330 * Function: sd_mhdioc_inkeys 23331 * 23332 * Description: This routine is the driver entry point for handling ioctl 23333 * requests to issue the SCSI-3 Persistent In Read Keys command 23334 * to the device (MHIOCGRP_INKEYS). 23335 * 23336 * Arguments: dev - the device number 23337 * arg - user provided in_keys structure 23338 * flag - this argument is a pass through to ddi_copyxxx() 23339 * directly from the mode argument of ioctl(). 23340 * 23341 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23342 * ENXIO 23343 * EFAULT 23344 */ 23345 23346 static int 23347 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23348 { 23349 struct sd_lun *un; 23350 mhioc_inkeys_t inkeys; 23351 int rval = 0; 23352 23353 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23354 return (ENXIO); 23355 } 23356 23357 #ifdef _MULTI_DATAMODEL 23358 switch (ddi_model_convert_from(flag & FMODELS)) { 23359 case DDI_MODEL_ILP32: { 23360 struct mhioc_inkeys32 inkeys32; 23361 23362 if (ddi_copyin(arg, &inkeys32, 23363 sizeof (struct mhioc_inkeys32), flag) != 0) { 23364 return (EFAULT); 23365 } 23366 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23367 if ((rval = sd_persistent_reservation_in_read_keys(un, 23368 &inkeys, flag)) != 0) { 23369 return (rval); 23370 } 23371 inkeys32.generation = inkeys.generation; 23372 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23373 flag) != 0) { 23374 return (EFAULT); 23375 } 23376 break; 23377 } 23378 case DDI_MODEL_NONE: 23379 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23380 flag) != 0) { 23381 return (EFAULT); 23382 } 23383 if ((rval = sd_persistent_reservation_in_read_keys(un, 23384 &inkeys, flag)) != 0) { 23385 return (rval); 23386 } 23387 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23388 flag) != 0) { 23389 return (EFAULT); 23390 } 23391 break; 23392 } 23393 23394 #else /* ! _MULTI_DATAMODEL */ 23395 23396 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23397 return (EFAULT); 23398 } 23399 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23400 if (rval != 0) { 23401 return (rval); 23402 } 23403 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23404 return (EFAULT); 23405 } 23406 23407 #endif /* _MULTI_DATAMODEL */ 23408 23409 return (rval); 23410 } 23411 23412 23413 /* 23414 * Function: sd_mhdioc_inresv 23415 * 23416 * Description: This routine is the driver entry point for handling ioctl 23417 * requests to issue the SCSI-3 Persistent In Read Reservations 23418 * command to the device (MHIOCGRP_INKEYS). 23419 * 23420 * Arguments: dev - the device number 23421 * arg - user provided in_resv structure 23422 * flag - this argument is a pass through to ddi_copyxxx() 23423 * directly from the mode argument of ioctl(). 23424 * 23425 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23426 * ENXIO 23427 * EFAULT 23428 */ 23429 23430 static int 23431 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23432 { 23433 struct sd_lun *un; 23434 mhioc_inresvs_t inresvs; 23435 int rval = 0; 23436 23437 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23438 return (ENXIO); 23439 } 23440 23441 #ifdef _MULTI_DATAMODEL 23442 23443 switch (ddi_model_convert_from(flag & FMODELS)) { 23444 case DDI_MODEL_ILP32: { 23445 struct mhioc_inresvs32 inresvs32; 23446 23447 if (ddi_copyin(arg, &inresvs32, 23448 sizeof (struct mhioc_inresvs32), flag) != 0) { 23449 return (EFAULT); 23450 } 23451 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23452 if ((rval = sd_persistent_reservation_in_read_resv(un, 23453 &inresvs, flag)) != 0) { 23454 return (rval); 23455 } 23456 inresvs32.generation = inresvs.generation; 23457 if (ddi_copyout(&inresvs32, arg, 23458 sizeof (struct mhioc_inresvs32), flag) != 0) { 23459 return (EFAULT); 23460 } 23461 break; 23462 } 23463 case DDI_MODEL_NONE: 23464 if (ddi_copyin(arg, &inresvs, 23465 sizeof (mhioc_inresvs_t), flag) != 0) { 23466 return (EFAULT); 23467 } 23468 if ((rval = sd_persistent_reservation_in_read_resv(un, 23469 &inresvs, flag)) != 0) { 23470 return (rval); 23471 } 23472 if (ddi_copyout(&inresvs, arg, 23473 sizeof (mhioc_inresvs_t), flag) != 0) { 23474 return (EFAULT); 23475 } 23476 break; 23477 } 23478 23479 #else /* ! _MULTI_DATAMODEL */ 23480 23481 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23482 return (EFAULT); 23483 } 23484 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23485 if (rval != 0) { 23486 return (rval); 23487 } 23488 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23489 return (EFAULT); 23490 } 23491 23492 #endif /* ! _MULTI_DATAMODEL */ 23493 23494 return (rval); 23495 } 23496 23497 23498 /* 23499 * The following routines support the clustering functionality described below 23500 * and implement lost reservation reclaim functionality. 23501 * 23502 * Clustering 23503 * ---------- 23504 * The clustering code uses two different, independent forms of SCSI 23505 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23506 * Persistent Group Reservations. For any particular disk, it will use either 23507 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23508 * 23509 * SCSI-2 23510 * The cluster software takes ownership of a multi-hosted disk by issuing the 23511 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23512 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23513 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23514 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23515 * driver. The meaning of failfast is that if the driver (on this host) ever 23516 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23517 * it should immediately panic the host. The motivation for this ioctl is that 23518 * if this host does encounter reservation conflict, the underlying cause is 23519 * that some other host of the cluster has decided that this host is no longer 23520 * in the cluster and has seized control of the disks for itself. Since this 23521 * host is no longer in the cluster, it ought to panic itself. The 23522 * MHIOCENFAILFAST ioctl does two things: 23523 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23524 * error to panic the host 23525 * (b) it sets up a periodic timer to test whether this host still has 23526 * "access" (in that no other host has reserved the device): if the 23527 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23528 * purpose of that periodic timer is to handle scenarios where the host is 23529 * otherwise temporarily quiescent, temporarily doing no real i/o. 23530 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23531 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23532 * the device itself. 23533 * 23534 * SCSI-3 PGR 23535 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23536 * facility is supported through the shared multihost disk ioctls 23537 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23538 * MHIOCGRP_PREEMPTANDABORT) 23539 * 23540 * Reservation Reclaim: 23541 * -------------------- 23542 * To support the lost reservation reclaim operations this driver creates a 23543 * single thread to handle reinstating reservations on all devices that have 23544 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23545 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23546 * and the reservation reclaim thread loops through the requests to regain the 23547 * lost reservations. 23548 */ 23549 23550 /* 23551 * Function: sd_check_mhd() 23552 * 23553 * Description: This function sets up and submits a scsi watch request or 23554 * terminates an existing watch request. This routine is used in 23555 * support of reservation reclaim. 23556 * 23557 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23558 * among multiple watches that share the callback function 23559 * interval - the number of microseconds specifying the watch 23560 * interval for issuing TEST UNIT READY commands. If 23561 * set to 0 the watch should be terminated. If the 23562 * interval is set to 0 and if the device is required 23563 * to hold reservation while disabling failfast, the 23564 * watch is restarted with an interval of 23565 * reinstate_resv_delay. 23566 * 23567 * Return Code: 0 - Successful submit/terminate of scsi watch request 23568 * ENXIO - Indicates an invalid device was specified 23569 * EAGAIN - Unable to submit the scsi watch request 23570 */ 23571 23572 static int 23573 sd_check_mhd(dev_t dev, int interval) 23574 { 23575 struct sd_lun *un; 23576 opaque_t token; 23577 23578 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23579 return (ENXIO); 23580 } 23581 23582 /* is this a watch termination request? */ 23583 if (interval == 0) { 23584 mutex_enter(SD_MUTEX(un)); 23585 /* if there is an existing watch task then terminate it */ 23586 if (un->un_mhd_token) { 23587 token = un->un_mhd_token; 23588 un->un_mhd_token = NULL; 23589 mutex_exit(SD_MUTEX(un)); 23590 (void) scsi_watch_request_terminate(token, 23591 SCSI_WATCH_TERMINATE_ALL_WAIT); 23592 mutex_enter(SD_MUTEX(un)); 23593 } else { 23594 mutex_exit(SD_MUTEX(un)); 23595 /* 23596 * Note: If we return here we don't check for the 23597 * failfast case. This is the original legacy 23598 * implementation but perhaps we should be checking 23599 * the failfast case. 23600 */ 23601 return (0); 23602 } 23603 /* 23604 * If the device is required to hold reservation while 23605 * disabling failfast, we need to restart the scsi_watch 23606 * routine with an interval of reinstate_resv_delay. 23607 */ 23608 if (un->un_resvd_status & SD_RESERVE) { 23609 interval = sd_reinstate_resv_delay/1000; 23610 } else { 23611 /* no failfast so bail */ 23612 mutex_exit(SD_MUTEX(un)); 23613 return (0); 23614 } 23615 mutex_exit(SD_MUTEX(un)); 23616 } 23617 23618 /* 23619 * adjust minimum time interval to 1 second, 23620 * and convert from msecs to usecs 23621 */ 23622 if (interval > 0 && interval < 1000) { 23623 interval = 1000; 23624 } 23625 interval *= 1000; 23626 23627 /* 23628 * submit the request to the scsi_watch service 23629 */ 23630 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23631 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23632 if (token == NULL) { 23633 return (EAGAIN); 23634 } 23635 23636 /* 23637 * save token for termination later on 23638 */ 23639 mutex_enter(SD_MUTEX(un)); 23640 un->un_mhd_token = token; 23641 mutex_exit(SD_MUTEX(un)); 23642 return (0); 23643 } 23644 23645 23646 /* 23647 * Function: sd_mhd_watch_cb() 23648 * 23649 * Description: This function is the call back function used by the scsi watch 23650 * facility. The scsi watch facility sends the "Test Unit Ready" 23651 * and processes the status. If applicable (i.e. a "Unit Attention" 23652 * status and automatic "Request Sense" not used) the scsi watch 23653 * facility will send a "Request Sense" and retrieve the sense data 23654 * to be passed to this callback function. In either case the 23655 * automatic "Request Sense" or the facility submitting one, this 23656 * callback is passed the status and sense data. 23657 * 23658 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23659 * among multiple watches that share this callback function 23660 * resultp - scsi watch facility result packet containing scsi 23661 * packet, status byte and sense data 23662 * 23663 * Return Code: 0 - continue the watch task 23664 * non-zero - terminate the watch task 23665 */ 23666 23667 static int 23668 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23669 { 23670 struct sd_lun *un; 23671 struct scsi_status *statusp; 23672 uint8_t *sensep; 23673 struct scsi_pkt *pkt; 23674 uchar_t actual_sense_length; 23675 dev_t dev = (dev_t)arg; 23676 23677 ASSERT(resultp != NULL); 23678 statusp = resultp->statusp; 23679 sensep = (uint8_t *)resultp->sensep; 23680 pkt = resultp->pkt; 23681 actual_sense_length = resultp->actual_sense_length; 23682 23683 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23684 return (ENXIO); 23685 } 23686 23687 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23688 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23689 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23690 23691 /* Begin processing of the status and/or sense data */ 23692 if (pkt->pkt_reason != CMD_CMPLT) { 23693 /* Handle the incomplete packet */ 23694 sd_mhd_watch_incomplete(un, pkt); 23695 return (0); 23696 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23697 if (*((unsigned char *)statusp) 23698 == STATUS_RESERVATION_CONFLICT) { 23699 /* 23700 * Handle a reservation conflict by panicking if 23701 * configured for failfast or by logging the conflict 23702 * and updating the reservation status 23703 */ 23704 mutex_enter(SD_MUTEX(un)); 23705 if ((un->un_resvd_status & SD_FAILFAST) && 23706 (sd_failfast_enable)) { 23707 sd_panic_for_res_conflict(un); 23708 /*NOTREACHED*/ 23709 } 23710 SD_INFO(SD_LOG_IOCTL_MHD, un, 23711 "sd_mhd_watch_cb: Reservation Conflict\n"); 23712 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23713 mutex_exit(SD_MUTEX(un)); 23714 } 23715 } 23716 23717 if (sensep != NULL) { 23718 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23719 mutex_enter(SD_MUTEX(un)); 23720 if ((scsi_sense_asc(sensep) == 23721 SD_SCSI_RESET_SENSE_CODE) && 23722 (un->un_resvd_status & SD_RESERVE)) { 23723 /* 23724 * The additional sense code indicates a power 23725 * on or bus device reset has occurred; update 23726 * the reservation status. 23727 */ 23728 un->un_resvd_status |= 23729 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23730 SD_INFO(SD_LOG_IOCTL_MHD, un, 23731 "sd_mhd_watch_cb: Lost Reservation\n"); 23732 } 23733 } else { 23734 return (0); 23735 } 23736 } else { 23737 mutex_enter(SD_MUTEX(un)); 23738 } 23739 23740 if ((un->un_resvd_status & SD_RESERVE) && 23741 (un->un_resvd_status & SD_LOST_RESERVE)) { 23742 if (un->un_resvd_status & SD_WANT_RESERVE) { 23743 /* 23744 * A reset occurred in between the last probe and this 23745 * one so if a timeout is pending cancel it. 23746 */ 23747 if (un->un_resvd_timeid) { 23748 timeout_id_t temp_id = un->un_resvd_timeid; 23749 un->un_resvd_timeid = NULL; 23750 mutex_exit(SD_MUTEX(un)); 23751 (void) untimeout(temp_id); 23752 mutex_enter(SD_MUTEX(un)); 23753 } 23754 un->un_resvd_status &= ~SD_WANT_RESERVE; 23755 } 23756 if (un->un_resvd_timeid == 0) { 23757 /* Schedule a timeout to handle the lost reservation */ 23758 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23759 (void *)dev, 23760 drv_usectohz(sd_reinstate_resv_delay)); 23761 } 23762 } 23763 mutex_exit(SD_MUTEX(un)); 23764 return (0); 23765 } 23766 23767 23768 /* 23769 * Function: sd_mhd_watch_incomplete() 23770 * 23771 * Description: This function is used to find out why a scsi pkt sent by the 23772 * scsi watch facility was not completed. Under some scenarios this 23773 * routine will return. Otherwise it will send a bus reset to see 23774 * if the drive is still online. 23775 * 23776 * Arguments: un - driver soft state (unit) structure 23777 * pkt - incomplete scsi pkt 23778 */ 23779 23780 static void 23781 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23782 { 23783 int be_chatty; 23784 int perr; 23785 23786 ASSERT(pkt != NULL); 23787 ASSERT(un != NULL); 23788 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23789 perr = (pkt->pkt_statistics & STAT_PERR); 23790 23791 mutex_enter(SD_MUTEX(un)); 23792 if (un->un_state == SD_STATE_DUMPING) { 23793 mutex_exit(SD_MUTEX(un)); 23794 return; 23795 } 23796 23797 switch (pkt->pkt_reason) { 23798 case CMD_UNX_BUS_FREE: 23799 /* 23800 * If we had a parity error that caused the target to drop BSY*, 23801 * don't be chatty about it. 23802 */ 23803 if (perr && be_chatty) { 23804 be_chatty = 0; 23805 } 23806 break; 23807 case CMD_TAG_REJECT: 23808 /* 23809 * The SCSI-2 spec states that a tag reject will be sent by the 23810 * target if tagged queuing is not supported. A tag reject may 23811 * also be sent during certain initialization periods or to 23812 * control internal resources. For the latter case the target 23813 * may also return Queue Full. 23814 * 23815 * If this driver receives a tag reject from a target that is 23816 * going through an init period or controlling internal 23817 * resources tagged queuing will be disabled. This is a less 23818 * than optimal behavior but the driver is unable to determine 23819 * the target state and assumes tagged queueing is not supported 23820 */ 23821 pkt->pkt_flags = 0; 23822 un->un_tagflags = 0; 23823 23824 if (un->un_f_opt_queueing == TRUE) { 23825 un->un_throttle = min(un->un_throttle, 3); 23826 } else { 23827 un->un_throttle = 1; 23828 } 23829 mutex_exit(SD_MUTEX(un)); 23830 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 23831 mutex_enter(SD_MUTEX(un)); 23832 break; 23833 case CMD_INCOMPLETE: 23834 /* 23835 * The transport stopped with an abnormal state, fallthrough and 23836 * reset the target and/or bus unless selection did not complete 23837 * (indicated by STATE_GOT_BUS) in which case we don't want to 23838 * go through a target/bus reset 23839 */ 23840 if (pkt->pkt_state == STATE_GOT_BUS) { 23841 break; 23842 } 23843 /*FALLTHROUGH*/ 23844 23845 case CMD_TIMEOUT: 23846 default: 23847 /* 23848 * The lun may still be running the command, so a lun reset 23849 * should be attempted. If the lun reset fails or cannot be 23850 * issued, than try a target reset. Lastly try a bus reset. 23851 */ 23852 if ((pkt->pkt_statistics & 23853 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 23854 int reset_retval = 0; 23855 mutex_exit(SD_MUTEX(un)); 23856 if (un->un_f_allow_bus_device_reset == TRUE) { 23857 if (un->un_f_lun_reset_enabled == TRUE) { 23858 reset_retval = 23859 scsi_reset(SD_ADDRESS(un), 23860 RESET_LUN); 23861 } 23862 if (reset_retval == 0) { 23863 reset_retval = 23864 scsi_reset(SD_ADDRESS(un), 23865 RESET_TARGET); 23866 } 23867 } 23868 if (reset_retval == 0) { 23869 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23870 } 23871 mutex_enter(SD_MUTEX(un)); 23872 } 23873 break; 23874 } 23875 23876 /* A device/bus reset has occurred; update the reservation status. */ 23877 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 23878 (STAT_BUS_RESET | STAT_DEV_RESET))) { 23879 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 23880 un->un_resvd_status |= 23881 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23882 SD_INFO(SD_LOG_IOCTL_MHD, un, 23883 "sd_mhd_watch_incomplete: Lost Reservation\n"); 23884 } 23885 } 23886 23887 /* 23888 * The disk has been turned off; Update the device state. 23889 * 23890 * Note: Should we be offlining the disk here? 23891 */ 23892 if (pkt->pkt_state == STATE_GOT_BUS) { 23893 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 23894 "Disk not responding to selection\n"); 23895 if (un->un_state != SD_STATE_OFFLINE) { 23896 New_state(un, SD_STATE_OFFLINE); 23897 } 23898 } else if (be_chatty) { 23899 /* 23900 * suppress messages if they are all the same pkt reason; 23901 * with TQ, many (up to 256) are returned with the same 23902 * pkt_reason 23903 */ 23904 if (pkt->pkt_reason != un->un_last_pkt_reason) { 23905 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23906 "sd_mhd_watch_incomplete: " 23907 "SCSI transport failed: reason '%s'\n", 23908 scsi_rname(pkt->pkt_reason)); 23909 } 23910 } 23911 un->un_last_pkt_reason = pkt->pkt_reason; 23912 mutex_exit(SD_MUTEX(un)); 23913 } 23914 23915 23916 /* 23917 * Function: sd_sname() 23918 * 23919 * Description: This is a simple little routine to return a string containing 23920 * a printable description of command status byte for use in 23921 * logging. 23922 * 23923 * Arguments: status - pointer to a status byte 23924 * 23925 * Return Code: char * - string containing status description. 23926 */ 23927 23928 static char * 23929 sd_sname(uchar_t status) 23930 { 23931 switch (status & STATUS_MASK) { 23932 case STATUS_GOOD: 23933 return ("good status"); 23934 case STATUS_CHECK: 23935 return ("check condition"); 23936 case STATUS_MET: 23937 return ("condition met"); 23938 case STATUS_BUSY: 23939 return ("busy"); 23940 case STATUS_INTERMEDIATE: 23941 return ("intermediate"); 23942 case STATUS_INTERMEDIATE_MET: 23943 return ("intermediate - condition met"); 23944 case STATUS_RESERVATION_CONFLICT: 23945 return ("reservation_conflict"); 23946 case STATUS_TERMINATED: 23947 return ("command terminated"); 23948 case STATUS_QFULL: 23949 return ("queue full"); 23950 default: 23951 return ("<unknown status>"); 23952 } 23953 } 23954 23955 23956 /* 23957 * Function: sd_mhd_resvd_recover() 23958 * 23959 * Description: This function adds a reservation entry to the 23960 * sd_resv_reclaim_request list and signals the reservation 23961 * reclaim thread that there is work pending. If the reservation 23962 * reclaim thread has not been previously created this function 23963 * will kick it off. 23964 * 23965 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23966 * among multiple watches that share this callback function 23967 * 23968 * Context: This routine is called by timeout() and is run in interrupt 23969 * context. It must not sleep or call other functions which may 23970 * sleep. 23971 */ 23972 23973 static void 23974 sd_mhd_resvd_recover(void *arg) 23975 { 23976 dev_t dev = (dev_t)arg; 23977 struct sd_lun *un; 23978 struct sd_thr_request *sd_treq = NULL; 23979 struct sd_thr_request *sd_cur = NULL; 23980 struct sd_thr_request *sd_prev = NULL; 23981 int already_there = 0; 23982 23983 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23984 return; 23985 } 23986 23987 mutex_enter(SD_MUTEX(un)); 23988 un->un_resvd_timeid = NULL; 23989 if (un->un_resvd_status & SD_WANT_RESERVE) { 23990 /* 23991 * There was a reset so don't issue the reserve, allow the 23992 * sd_mhd_watch_cb callback function to notice this and 23993 * reschedule the timeout for reservation. 23994 */ 23995 mutex_exit(SD_MUTEX(un)); 23996 return; 23997 } 23998 mutex_exit(SD_MUTEX(un)); 23999 24000 /* 24001 * Add this device to the sd_resv_reclaim_request list and the 24002 * sd_resv_reclaim_thread should take care of the rest. 24003 * 24004 * Note: We can't sleep in this context so if the memory allocation 24005 * fails allow the sd_mhd_watch_cb callback function to notice this and 24006 * reschedule the timeout for reservation. (4378460) 24007 */ 24008 sd_treq = (struct sd_thr_request *) 24009 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24010 if (sd_treq == NULL) { 24011 return; 24012 } 24013 24014 sd_treq->sd_thr_req_next = NULL; 24015 sd_treq->dev = dev; 24016 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24017 if (sd_tr.srq_thr_req_head == NULL) { 24018 sd_tr.srq_thr_req_head = sd_treq; 24019 } else { 24020 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24021 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24022 if (sd_cur->dev == dev) { 24023 /* 24024 * already in Queue so don't log 24025 * another request for the device 24026 */ 24027 already_there = 1; 24028 break; 24029 } 24030 sd_prev = sd_cur; 24031 } 24032 if (!already_there) { 24033 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24034 "logging request for %lx\n", dev); 24035 sd_prev->sd_thr_req_next = sd_treq; 24036 } else { 24037 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24038 } 24039 } 24040 24041 /* 24042 * Create a kernel thread to do the reservation reclaim and free up this 24043 * thread. We cannot block this thread while we go away to do the 24044 * reservation reclaim 24045 */ 24046 if (sd_tr.srq_resv_reclaim_thread == NULL) 24047 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24048 sd_resv_reclaim_thread, NULL, 24049 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24050 24051 /* Tell the reservation reclaim thread that it has work to do */ 24052 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24053 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24054 } 24055 24056 /* 24057 * Function: sd_resv_reclaim_thread() 24058 * 24059 * Description: This function implements the reservation reclaim operations 24060 * 24061 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24062 * among multiple watches that share this callback function 24063 */ 24064 24065 static void 24066 sd_resv_reclaim_thread() 24067 { 24068 struct sd_lun *un; 24069 struct sd_thr_request *sd_mhreq; 24070 24071 /* Wait for work */ 24072 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24073 if (sd_tr.srq_thr_req_head == NULL) { 24074 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24075 &sd_tr.srq_resv_reclaim_mutex); 24076 } 24077 24078 /* Loop while we have work */ 24079 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24080 un = ddi_get_soft_state(sd_state, 24081 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24082 if (un == NULL) { 24083 /* 24084 * softstate structure is NULL so just 24085 * dequeue the request and continue 24086 */ 24087 sd_tr.srq_thr_req_head = 24088 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24089 kmem_free(sd_tr.srq_thr_cur_req, 24090 sizeof (struct sd_thr_request)); 24091 continue; 24092 } 24093 24094 /* dequeue the request */ 24095 sd_mhreq = sd_tr.srq_thr_cur_req; 24096 sd_tr.srq_thr_req_head = 24097 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24098 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24099 24100 /* 24101 * Reclaim reservation only if SD_RESERVE is still set. There 24102 * may have been a call to MHIOCRELEASE before we got here. 24103 */ 24104 mutex_enter(SD_MUTEX(un)); 24105 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24106 /* 24107 * Note: The SD_LOST_RESERVE flag is cleared before 24108 * reclaiming the reservation. If this is done after the 24109 * call to sd_reserve_release a reservation loss in the 24110 * window between pkt completion of reserve cmd and 24111 * mutex_enter below may not be recognized 24112 */ 24113 un->un_resvd_status &= ~SD_LOST_RESERVE; 24114 mutex_exit(SD_MUTEX(un)); 24115 24116 if (sd_reserve_release(sd_mhreq->dev, 24117 SD_RESERVE) == 0) { 24118 mutex_enter(SD_MUTEX(un)); 24119 un->un_resvd_status |= SD_RESERVE; 24120 mutex_exit(SD_MUTEX(un)); 24121 SD_INFO(SD_LOG_IOCTL_MHD, un, 24122 "sd_resv_reclaim_thread: " 24123 "Reservation Recovered\n"); 24124 } else { 24125 mutex_enter(SD_MUTEX(un)); 24126 un->un_resvd_status |= SD_LOST_RESERVE; 24127 mutex_exit(SD_MUTEX(un)); 24128 SD_INFO(SD_LOG_IOCTL_MHD, un, 24129 "sd_resv_reclaim_thread: Failed " 24130 "Reservation Recovery\n"); 24131 } 24132 } else { 24133 mutex_exit(SD_MUTEX(un)); 24134 } 24135 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24136 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24137 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24138 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24139 /* 24140 * wakeup the destroy thread if anyone is waiting on 24141 * us to complete. 24142 */ 24143 cv_signal(&sd_tr.srq_inprocess_cv); 24144 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24145 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24146 } 24147 24148 /* 24149 * cleanup the sd_tr structure now that this thread will not exist 24150 */ 24151 ASSERT(sd_tr.srq_thr_req_head == NULL); 24152 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24153 sd_tr.srq_resv_reclaim_thread = NULL; 24154 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24155 thread_exit(); 24156 } 24157 24158 24159 /* 24160 * Function: sd_rmv_resv_reclaim_req() 24161 * 24162 * Description: This function removes any pending reservation reclaim requests 24163 * for the specified device. 24164 * 24165 * Arguments: dev - the device 'dev_t' 24166 */ 24167 24168 static void 24169 sd_rmv_resv_reclaim_req(dev_t dev) 24170 { 24171 struct sd_thr_request *sd_mhreq; 24172 struct sd_thr_request *sd_prev; 24173 24174 /* Remove a reservation reclaim request from the list */ 24175 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24176 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24177 /* 24178 * We are attempting to reinstate reservation for 24179 * this device. We wait for sd_reserve_release() 24180 * to return before we return. 24181 */ 24182 cv_wait(&sd_tr.srq_inprocess_cv, 24183 &sd_tr.srq_resv_reclaim_mutex); 24184 } else { 24185 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24186 if (sd_mhreq && sd_mhreq->dev == dev) { 24187 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24188 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24189 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24190 return; 24191 } 24192 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24193 if (sd_mhreq && sd_mhreq->dev == dev) { 24194 break; 24195 } 24196 sd_prev = sd_mhreq; 24197 } 24198 if (sd_mhreq != NULL) { 24199 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24200 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24201 } 24202 } 24203 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24204 } 24205 24206 24207 /* 24208 * Function: sd_mhd_reset_notify_cb() 24209 * 24210 * Description: This is a call back function for scsi_reset_notify. This 24211 * function updates the softstate reserved status and logs the 24212 * reset. The driver scsi watch facility callback function 24213 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24214 * will reclaim the reservation. 24215 * 24216 * Arguments: arg - driver soft state (unit) structure 24217 */ 24218 24219 static void 24220 sd_mhd_reset_notify_cb(caddr_t arg) 24221 { 24222 struct sd_lun *un = (struct sd_lun *)arg; 24223 24224 mutex_enter(SD_MUTEX(un)); 24225 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24226 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24227 SD_INFO(SD_LOG_IOCTL_MHD, un, 24228 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24229 } 24230 mutex_exit(SD_MUTEX(un)); 24231 } 24232 24233 24234 /* 24235 * Function: sd_take_ownership() 24236 * 24237 * Description: This routine implements an algorithm to achieve a stable 24238 * reservation on disks which don't implement priority reserve, 24239 * and makes sure that other host lose re-reservation attempts. 24240 * This algorithm contains of a loop that keeps issuing the RESERVE 24241 * for some period of time (min_ownership_delay, default 6 seconds) 24242 * During that loop, it looks to see if there has been a bus device 24243 * reset or bus reset (both of which cause an existing reservation 24244 * to be lost). If the reservation is lost issue RESERVE until a 24245 * period of min_ownership_delay with no resets has gone by, or 24246 * until max_ownership_delay has expired. This loop ensures that 24247 * the host really did manage to reserve the device, in spite of 24248 * resets. The looping for min_ownership_delay (default six 24249 * seconds) is important to early generation clustering products, 24250 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24251 * MHIOCENFAILFAST periodic timer of two seconds. By having 24252 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24253 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24254 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24255 * have already noticed, via the MHIOCENFAILFAST polling, that it 24256 * no longer "owns" the disk and will have panicked itself. Thus, 24257 * the host issuing the MHIOCTKOWN is assured (with timing 24258 * dependencies) that by the time it actually starts to use the 24259 * disk for real work, the old owner is no longer accessing it. 24260 * 24261 * min_ownership_delay is the minimum amount of time for which the 24262 * disk must be reserved continuously devoid of resets before the 24263 * MHIOCTKOWN ioctl will return success. 24264 * 24265 * max_ownership_delay indicates the amount of time by which the 24266 * take ownership should succeed or timeout with an error. 24267 * 24268 * Arguments: dev - the device 'dev_t' 24269 * *p - struct containing timing info. 24270 * 24271 * Return Code: 0 for success or error code 24272 */ 24273 24274 static int 24275 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24276 { 24277 struct sd_lun *un; 24278 int rval; 24279 int err; 24280 int reservation_count = 0; 24281 int min_ownership_delay = 6000000; /* in usec */ 24282 int max_ownership_delay = 30000000; /* in usec */ 24283 clock_t start_time; /* starting time of this algorithm */ 24284 clock_t end_time; /* time limit for giving up */ 24285 clock_t ownership_time; /* time limit for stable ownership */ 24286 clock_t current_time; 24287 clock_t previous_current_time; 24288 24289 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24290 return (ENXIO); 24291 } 24292 24293 /* 24294 * Attempt a device reservation. A priority reservation is requested. 24295 */ 24296 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24297 != SD_SUCCESS) { 24298 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24299 "sd_take_ownership: return(1)=%d\n", rval); 24300 return (rval); 24301 } 24302 24303 /* Update the softstate reserved status to indicate the reservation */ 24304 mutex_enter(SD_MUTEX(un)); 24305 un->un_resvd_status |= SD_RESERVE; 24306 un->un_resvd_status &= 24307 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24308 mutex_exit(SD_MUTEX(un)); 24309 24310 if (p != NULL) { 24311 if (p->min_ownership_delay != 0) { 24312 min_ownership_delay = p->min_ownership_delay * 1000; 24313 } 24314 if (p->max_ownership_delay != 0) { 24315 max_ownership_delay = p->max_ownership_delay * 1000; 24316 } 24317 } 24318 SD_INFO(SD_LOG_IOCTL_MHD, un, 24319 "sd_take_ownership: min, max delays: %d, %d\n", 24320 min_ownership_delay, max_ownership_delay); 24321 24322 start_time = ddi_get_lbolt(); 24323 current_time = start_time; 24324 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24325 end_time = start_time + drv_usectohz(max_ownership_delay); 24326 24327 while (current_time - end_time < 0) { 24328 delay(drv_usectohz(500000)); 24329 24330 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24331 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24332 mutex_enter(SD_MUTEX(un)); 24333 rval = (un->un_resvd_status & 24334 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24335 mutex_exit(SD_MUTEX(un)); 24336 break; 24337 } 24338 } 24339 previous_current_time = current_time; 24340 current_time = ddi_get_lbolt(); 24341 mutex_enter(SD_MUTEX(un)); 24342 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24343 ownership_time = ddi_get_lbolt() + 24344 drv_usectohz(min_ownership_delay); 24345 reservation_count = 0; 24346 } else { 24347 reservation_count++; 24348 } 24349 un->un_resvd_status |= SD_RESERVE; 24350 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24351 mutex_exit(SD_MUTEX(un)); 24352 24353 SD_INFO(SD_LOG_IOCTL_MHD, un, 24354 "sd_take_ownership: ticks for loop iteration=%ld, " 24355 "reservation=%s\n", (current_time - previous_current_time), 24356 reservation_count ? "ok" : "reclaimed"); 24357 24358 if (current_time - ownership_time >= 0 && 24359 reservation_count >= 4) { 24360 rval = 0; /* Achieved a stable ownership */ 24361 break; 24362 } 24363 if (current_time - end_time >= 0) { 24364 rval = EACCES; /* No ownership in max possible time */ 24365 break; 24366 } 24367 } 24368 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24369 "sd_take_ownership: return(2)=%d\n", rval); 24370 return (rval); 24371 } 24372 24373 24374 /* 24375 * Function: sd_reserve_release() 24376 * 24377 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24378 * PRIORITY RESERVE commands based on a user specified command type 24379 * 24380 * Arguments: dev - the device 'dev_t' 24381 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24382 * SD_RESERVE, SD_RELEASE 24383 * 24384 * Return Code: 0 or Error Code 24385 */ 24386 24387 static int 24388 sd_reserve_release(dev_t dev, int cmd) 24389 { 24390 struct uscsi_cmd *com = NULL; 24391 struct sd_lun *un = NULL; 24392 char cdb[CDB_GROUP0]; 24393 int rval; 24394 24395 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24396 (cmd == SD_PRIORITY_RESERVE)); 24397 24398 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24399 return (ENXIO); 24400 } 24401 24402 /* instantiate and initialize the command and cdb */ 24403 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24404 bzero(cdb, CDB_GROUP0); 24405 com->uscsi_flags = USCSI_SILENT; 24406 com->uscsi_timeout = un->un_reserve_release_time; 24407 com->uscsi_cdblen = CDB_GROUP0; 24408 com->uscsi_cdb = cdb; 24409 if (cmd == SD_RELEASE) { 24410 cdb[0] = SCMD_RELEASE; 24411 } else { 24412 cdb[0] = SCMD_RESERVE; 24413 } 24414 24415 /* Send the command. */ 24416 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24417 SD_PATH_STANDARD); 24418 24419 /* 24420 * "break" a reservation that is held by another host, by issuing a 24421 * reset if priority reserve is desired, and we could not get the 24422 * device. 24423 */ 24424 if ((cmd == SD_PRIORITY_RESERVE) && 24425 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24426 /* 24427 * First try to reset the LUN. If we cannot, then try a target 24428 * reset, followed by a bus reset if the target reset fails. 24429 */ 24430 int reset_retval = 0; 24431 if (un->un_f_lun_reset_enabled == TRUE) { 24432 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24433 } 24434 if (reset_retval == 0) { 24435 /* The LUN reset either failed or was not issued */ 24436 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24437 } 24438 if ((reset_retval == 0) && 24439 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24440 rval = EIO; 24441 kmem_free(com, sizeof (*com)); 24442 return (rval); 24443 } 24444 24445 bzero(com, sizeof (struct uscsi_cmd)); 24446 com->uscsi_flags = USCSI_SILENT; 24447 com->uscsi_cdb = cdb; 24448 com->uscsi_cdblen = CDB_GROUP0; 24449 com->uscsi_timeout = 5; 24450 24451 /* 24452 * Reissue the last reserve command, this time without request 24453 * sense. Assume that it is just a regular reserve command. 24454 */ 24455 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24456 SD_PATH_STANDARD); 24457 } 24458 24459 /* Return an error if still getting a reservation conflict. */ 24460 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24461 rval = EACCES; 24462 } 24463 24464 kmem_free(com, sizeof (*com)); 24465 return (rval); 24466 } 24467 24468 24469 #define SD_NDUMP_RETRIES 12 24470 /* 24471 * System Crash Dump routine 24472 */ 24473 24474 static int 24475 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24476 { 24477 int instance; 24478 int partition; 24479 int i; 24480 int err; 24481 struct sd_lun *un; 24482 struct scsi_pkt *wr_pktp; 24483 struct buf *wr_bp; 24484 struct buf wr_buf; 24485 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24486 daddr_t tgt_blkno; /* rmw - blkno for target */ 24487 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24488 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24489 size_t io_start_offset; 24490 int doing_rmw = FALSE; 24491 int rval; 24492 ssize_t dma_resid; 24493 daddr_t oblkno; 24494 diskaddr_t nblks = 0; 24495 diskaddr_t start_block; 24496 24497 instance = SDUNIT(dev); 24498 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24499 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24500 return (ENXIO); 24501 } 24502 24503 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24504 24505 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24506 24507 partition = SDPART(dev); 24508 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24509 24510 /* Validate blocks to dump at against partition size. */ 24511 24512 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24513 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24514 24515 if ((blkno + nblk) > nblks) { 24516 SD_TRACE(SD_LOG_DUMP, un, 24517 "sddump: dump range larger than partition: " 24518 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24519 blkno, nblk, nblks); 24520 return (EINVAL); 24521 } 24522 24523 mutex_enter(&un->un_pm_mutex); 24524 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24525 struct scsi_pkt *start_pktp; 24526 24527 mutex_exit(&un->un_pm_mutex); 24528 24529 /* 24530 * use pm framework to power on HBA 1st 24531 */ 24532 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24533 24534 /* 24535 * Dump no long uses sdpower to power on a device, it's 24536 * in-line here so it can be done in polled mode. 24537 */ 24538 24539 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24540 24541 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24542 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24543 24544 if (start_pktp == NULL) { 24545 /* We were not given a SCSI packet, fail. */ 24546 return (EIO); 24547 } 24548 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24549 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24550 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24551 start_pktp->pkt_flags = FLAG_NOINTR; 24552 24553 mutex_enter(SD_MUTEX(un)); 24554 SD_FILL_SCSI1_LUN(un, start_pktp); 24555 mutex_exit(SD_MUTEX(un)); 24556 /* 24557 * Scsi_poll returns 0 (success) if the command completes and 24558 * the status block is STATUS_GOOD. 24559 */ 24560 if (sd_scsi_poll(un, start_pktp) != 0) { 24561 scsi_destroy_pkt(start_pktp); 24562 return (EIO); 24563 } 24564 scsi_destroy_pkt(start_pktp); 24565 (void) sd_ddi_pm_resume(un); 24566 } else { 24567 mutex_exit(&un->un_pm_mutex); 24568 } 24569 24570 mutex_enter(SD_MUTEX(un)); 24571 un->un_throttle = 0; 24572 24573 /* 24574 * The first time through, reset the specific target device. 24575 * However, when cpr calls sddump we know that sd is in a 24576 * a good state so no bus reset is required. 24577 * Clear sense data via Request Sense cmd. 24578 * In sddump we don't care about allow_bus_device_reset anymore 24579 */ 24580 24581 if ((un->un_state != SD_STATE_SUSPENDED) && 24582 (un->un_state != SD_STATE_DUMPING)) { 24583 24584 New_state(un, SD_STATE_DUMPING); 24585 24586 if (un->un_f_is_fibre == FALSE) { 24587 mutex_exit(SD_MUTEX(un)); 24588 /* 24589 * Attempt a bus reset for parallel scsi. 24590 * 24591 * Note: A bus reset is required because on some host 24592 * systems (i.e. E420R) a bus device reset is 24593 * insufficient to reset the state of the target. 24594 * 24595 * Note: Don't issue the reset for fibre-channel, 24596 * because this tends to hang the bus (loop) for 24597 * too long while everyone is logging out and in 24598 * and the deadman timer for dumping will fire 24599 * before the dump is complete. 24600 */ 24601 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24602 mutex_enter(SD_MUTEX(un)); 24603 Restore_state(un); 24604 mutex_exit(SD_MUTEX(un)); 24605 return (EIO); 24606 } 24607 24608 /* Delay to give the device some recovery time. */ 24609 drv_usecwait(10000); 24610 24611 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24612 SD_INFO(SD_LOG_DUMP, un, 24613 "sddump: sd_send_polled_RQS failed\n"); 24614 } 24615 mutex_enter(SD_MUTEX(un)); 24616 } 24617 } 24618 24619 /* 24620 * Convert the partition-relative block number to a 24621 * disk physical block number. 24622 */ 24623 blkno += start_block; 24624 24625 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24626 24627 24628 /* 24629 * Check if the device has a non-512 block size. 24630 */ 24631 wr_bp = NULL; 24632 if (NOT_DEVBSIZE(un)) { 24633 tgt_byte_offset = blkno * un->un_sys_blocksize; 24634 tgt_byte_count = nblk * un->un_sys_blocksize; 24635 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24636 (tgt_byte_count % un->un_tgt_blocksize)) { 24637 doing_rmw = TRUE; 24638 /* 24639 * Calculate the block number and number of block 24640 * in terms of the media block size. 24641 */ 24642 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24643 tgt_nblk = 24644 ((tgt_byte_offset + tgt_byte_count + 24645 (un->un_tgt_blocksize - 1)) / 24646 un->un_tgt_blocksize) - tgt_blkno; 24647 24648 /* 24649 * Invoke the routine which is going to do read part 24650 * of read-modify-write. 24651 * Note that this routine returns a pointer to 24652 * a valid bp in wr_bp. 24653 */ 24654 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24655 &wr_bp); 24656 if (err) { 24657 mutex_exit(SD_MUTEX(un)); 24658 return (err); 24659 } 24660 /* 24661 * Offset is being calculated as - 24662 * (original block # * system block size) - 24663 * (new block # * target block size) 24664 */ 24665 io_start_offset = 24666 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24667 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24668 24669 ASSERT((io_start_offset >= 0) && 24670 (io_start_offset < un->un_tgt_blocksize)); 24671 /* 24672 * Do the modify portion of read modify write. 24673 */ 24674 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24675 (size_t)nblk * un->un_sys_blocksize); 24676 } else { 24677 doing_rmw = FALSE; 24678 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24679 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24680 } 24681 24682 /* Convert blkno and nblk to target blocks */ 24683 blkno = tgt_blkno; 24684 nblk = tgt_nblk; 24685 } else { 24686 wr_bp = &wr_buf; 24687 bzero(wr_bp, sizeof (struct buf)); 24688 wr_bp->b_flags = B_BUSY; 24689 wr_bp->b_un.b_addr = addr; 24690 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24691 wr_bp->b_resid = 0; 24692 } 24693 24694 mutex_exit(SD_MUTEX(un)); 24695 24696 /* 24697 * Obtain a SCSI packet for the write command. 24698 * It should be safe to call the allocator here without 24699 * worrying about being locked for DVMA mapping because 24700 * the address we're passed is already a DVMA mapping 24701 * 24702 * We are also not going to worry about semaphore ownership 24703 * in the dump buffer. Dumping is single threaded at present. 24704 */ 24705 24706 wr_pktp = NULL; 24707 24708 dma_resid = wr_bp->b_bcount; 24709 oblkno = blkno; 24710 24711 while (dma_resid != 0) { 24712 24713 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24714 wr_bp->b_flags &= ~B_ERROR; 24715 24716 if (un->un_partial_dma_supported == 1) { 24717 blkno = oblkno + 24718 ((wr_bp->b_bcount - dma_resid) / 24719 un->un_tgt_blocksize); 24720 nblk = dma_resid / un->un_tgt_blocksize; 24721 24722 if (wr_pktp) { 24723 /* 24724 * Partial DMA transfers after initial transfer 24725 */ 24726 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24727 blkno, nblk); 24728 } else { 24729 /* Initial transfer */ 24730 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24731 un->un_pkt_flags, NULL_FUNC, NULL, 24732 blkno, nblk); 24733 } 24734 } else { 24735 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24736 0, NULL_FUNC, NULL, blkno, nblk); 24737 } 24738 24739 if (rval == 0) { 24740 /* We were given a SCSI packet, continue. */ 24741 break; 24742 } 24743 24744 if (i == 0) { 24745 if (wr_bp->b_flags & B_ERROR) { 24746 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24747 "no resources for dumping; " 24748 "error code: 0x%x, retrying", 24749 geterror(wr_bp)); 24750 } else { 24751 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24752 "no resources for dumping; retrying"); 24753 } 24754 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24755 if (wr_bp->b_flags & B_ERROR) { 24756 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24757 "no resources for dumping; error code: " 24758 "0x%x, retrying\n", geterror(wr_bp)); 24759 } 24760 } else { 24761 if (wr_bp->b_flags & B_ERROR) { 24762 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24763 "no resources for dumping; " 24764 "error code: 0x%x, retries failed, " 24765 "giving up.\n", geterror(wr_bp)); 24766 } else { 24767 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24768 "no resources for dumping; " 24769 "retries failed, giving up.\n"); 24770 } 24771 mutex_enter(SD_MUTEX(un)); 24772 Restore_state(un); 24773 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24774 mutex_exit(SD_MUTEX(un)); 24775 scsi_free_consistent_buf(wr_bp); 24776 } else { 24777 mutex_exit(SD_MUTEX(un)); 24778 } 24779 return (EIO); 24780 } 24781 drv_usecwait(10000); 24782 } 24783 24784 if (un->un_partial_dma_supported == 1) { 24785 /* 24786 * save the resid from PARTIAL_DMA 24787 */ 24788 dma_resid = wr_pktp->pkt_resid; 24789 if (dma_resid != 0) 24790 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24791 wr_pktp->pkt_resid = 0; 24792 } else { 24793 dma_resid = 0; 24794 } 24795 24796 /* SunBug 1222170 */ 24797 wr_pktp->pkt_flags = FLAG_NOINTR; 24798 24799 err = EIO; 24800 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24801 24802 /* 24803 * Scsi_poll returns 0 (success) if the command completes and 24804 * the status block is STATUS_GOOD. We should only check 24805 * errors if this condition is not true. Even then we should 24806 * send our own request sense packet only if we have a check 24807 * condition and auto request sense has not been performed by 24808 * the hba. 24809 */ 24810 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 24811 24812 if ((sd_scsi_poll(un, wr_pktp) == 0) && 24813 (wr_pktp->pkt_resid == 0)) { 24814 err = SD_SUCCESS; 24815 break; 24816 } 24817 24818 /* 24819 * Check CMD_DEV_GONE 1st, give up if device is gone. 24820 */ 24821 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 24822 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24823 "Error while dumping state...Device is gone\n"); 24824 break; 24825 } 24826 24827 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 24828 SD_INFO(SD_LOG_DUMP, un, 24829 "sddump: write failed with CHECK, try # %d\n", i); 24830 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 24831 (void) sd_send_polled_RQS(un); 24832 } 24833 24834 continue; 24835 } 24836 24837 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 24838 int reset_retval = 0; 24839 24840 SD_INFO(SD_LOG_DUMP, un, 24841 "sddump: write failed with BUSY, try # %d\n", i); 24842 24843 if (un->un_f_lun_reset_enabled == TRUE) { 24844 reset_retval = scsi_reset(SD_ADDRESS(un), 24845 RESET_LUN); 24846 } 24847 if (reset_retval == 0) { 24848 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24849 } 24850 (void) sd_send_polled_RQS(un); 24851 24852 } else { 24853 SD_INFO(SD_LOG_DUMP, un, 24854 "sddump: write failed with 0x%x, try # %d\n", 24855 SD_GET_PKT_STATUS(wr_pktp), i); 24856 mutex_enter(SD_MUTEX(un)); 24857 sd_reset_target(un, wr_pktp); 24858 mutex_exit(SD_MUTEX(un)); 24859 } 24860 24861 /* 24862 * If we are not getting anywhere with lun/target resets, 24863 * let's reset the bus. 24864 */ 24865 if (i == SD_NDUMP_RETRIES/2) { 24866 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24867 (void) sd_send_polled_RQS(un); 24868 } 24869 } 24870 } 24871 24872 scsi_destroy_pkt(wr_pktp); 24873 mutex_enter(SD_MUTEX(un)); 24874 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 24875 mutex_exit(SD_MUTEX(un)); 24876 scsi_free_consistent_buf(wr_bp); 24877 } else { 24878 mutex_exit(SD_MUTEX(un)); 24879 } 24880 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 24881 return (err); 24882 } 24883 24884 /* 24885 * Function: sd_scsi_poll() 24886 * 24887 * Description: This is a wrapper for the scsi_poll call. 24888 * 24889 * Arguments: sd_lun - The unit structure 24890 * scsi_pkt - The scsi packet being sent to the device. 24891 * 24892 * Return Code: 0 - Command completed successfully with good status 24893 * -1 - Command failed. This could indicate a check condition 24894 * or other status value requiring recovery action. 24895 * 24896 * NOTE: This code is only called off sddump(). 24897 */ 24898 24899 static int 24900 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 24901 { 24902 int status; 24903 24904 ASSERT(un != NULL); 24905 ASSERT(!mutex_owned(SD_MUTEX(un))); 24906 ASSERT(pktp != NULL); 24907 24908 status = SD_SUCCESS; 24909 24910 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 24911 pktp->pkt_flags |= un->un_tagflags; 24912 pktp->pkt_flags &= ~FLAG_NODISCON; 24913 } 24914 24915 status = sd_ddi_scsi_poll(pktp); 24916 /* 24917 * Scsi_poll returns 0 (success) if the command completes and the 24918 * status block is STATUS_GOOD. We should only check errors if this 24919 * condition is not true. Even then we should send our own request 24920 * sense packet only if we have a check condition and auto 24921 * request sense has not been performed by the hba. 24922 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 24923 */ 24924 if ((status != SD_SUCCESS) && 24925 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 24926 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 24927 (pktp->pkt_reason != CMD_DEV_GONE)) 24928 (void) sd_send_polled_RQS(un); 24929 24930 return (status); 24931 } 24932 24933 /* 24934 * Function: sd_send_polled_RQS() 24935 * 24936 * Description: This sends the request sense command to a device. 24937 * 24938 * Arguments: sd_lun - The unit structure 24939 * 24940 * Return Code: 0 - Command completed successfully with good status 24941 * -1 - Command failed. 24942 * 24943 */ 24944 24945 static int 24946 sd_send_polled_RQS(struct sd_lun *un) 24947 { 24948 int ret_val; 24949 struct scsi_pkt *rqs_pktp; 24950 struct buf *rqs_bp; 24951 24952 ASSERT(un != NULL); 24953 ASSERT(!mutex_owned(SD_MUTEX(un))); 24954 24955 ret_val = SD_SUCCESS; 24956 24957 rqs_pktp = un->un_rqs_pktp; 24958 rqs_bp = un->un_rqs_bp; 24959 24960 mutex_enter(SD_MUTEX(un)); 24961 24962 if (un->un_sense_isbusy) { 24963 ret_val = SD_FAILURE; 24964 mutex_exit(SD_MUTEX(un)); 24965 return (ret_val); 24966 } 24967 24968 /* 24969 * If the request sense buffer (and packet) is not in use, 24970 * let's set the un_sense_isbusy and send our packet 24971 */ 24972 un->un_sense_isbusy = 1; 24973 rqs_pktp->pkt_resid = 0; 24974 rqs_pktp->pkt_reason = 0; 24975 rqs_pktp->pkt_flags |= FLAG_NOINTR; 24976 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 24977 24978 mutex_exit(SD_MUTEX(un)); 24979 24980 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 24981 " 0x%p\n", rqs_bp->b_un.b_addr); 24982 24983 /* 24984 * Can't send this to sd_scsi_poll, we wrap ourselves around the 24985 * axle - it has a call into us! 24986 */ 24987 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 24988 SD_INFO(SD_LOG_COMMON, un, 24989 "sd_send_polled_RQS: RQS failed\n"); 24990 } 24991 24992 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 24993 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 24994 24995 mutex_enter(SD_MUTEX(un)); 24996 un->un_sense_isbusy = 0; 24997 mutex_exit(SD_MUTEX(un)); 24998 24999 return (ret_val); 25000 } 25001 25002 /* 25003 * Defines needed for localized version of the scsi_poll routine. 25004 */ 25005 #define CSEC 10000 /* usecs */ 25006 #define SEC_TO_CSEC (1000000/CSEC) 25007 25008 /* 25009 * Function: sd_ddi_scsi_poll() 25010 * 25011 * Description: Localized version of the scsi_poll routine. The purpose is to 25012 * send a scsi_pkt to a device as a polled command. This version 25013 * is to ensure more robust handling of transport errors. 25014 * Specifically this routine cures not ready, coming ready 25015 * transition for power up and reset of sonoma's. This can take 25016 * up to 45 seconds for power-on and 20 seconds for reset of a 25017 * sonoma lun. 25018 * 25019 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25020 * 25021 * Return Code: 0 - Command completed successfully with good status 25022 * -1 - Command failed. 25023 * 25024 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25025 * be fixed (removing this code), we need to determine how to handle the 25026 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25027 * 25028 * NOTE: This code is only called off sddump(). 25029 */ 25030 static int 25031 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25032 { 25033 int rval = -1; 25034 int savef; 25035 long savet; 25036 void (*savec)(); 25037 int timeout; 25038 int busy_count; 25039 int poll_delay; 25040 int rc; 25041 uint8_t *sensep; 25042 struct scsi_arq_status *arqstat; 25043 extern int do_polled_io; 25044 25045 ASSERT(pkt->pkt_scbp); 25046 25047 /* 25048 * save old flags.. 25049 */ 25050 savef = pkt->pkt_flags; 25051 savec = pkt->pkt_comp; 25052 savet = pkt->pkt_time; 25053 25054 pkt->pkt_flags |= FLAG_NOINTR; 25055 25056 /* 25057 * XXX there is nothing in the SCSA spec that states that we should not 25058 * do a callback for polled cmds; however, removing this will break sd 25059 * and probably other target drivers 25060 */ 25061 pkt->pkt_comp = NULL; 25062 25063 /* 25064 * we don't like a polled command without timeout. 25065 * 60 seconds seems long enough. 25066 */ 25067 if (pkt->pkt_time == 0) 25068 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25069 25070 /* 25071 * Send polled cmd. 25072 * 25073 * We do some error recovery for various errors. Tran_busy, 25074 * queue full, and non-dispatched commands are retried every 10 msec. 25075 * as they are typically transient failures. Busy status and Not 25076 * Ready are retried every second as this status takes a while to 25077 * change. 25078 */ 25079 timeout = pkt->pkt_time * SEC_TO_CSEC; 25080 25081 for (busy_count = 0; busy_count < timeout; busy_count++) { 25082 /* 25083 * Initialize pkt status variables. 25084 */ 25085 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25086 25087 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25088 if (rc != TRAN_BUSY) { 25089 /* Transport failed - give up. */ 25090 break; 25091 } else { 25092 /* Transport busy - try again. */ 25093 poll_delay = 1 * CSEC; /* 10 msec. */ 25094 } 25095 } else { 25096 /* 25097 * Transport accepted - check pkt status. 25098 */ 25099 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25100 if ((pkt->pkt_reason == CMD_CMPLT) && 25101 (rc == STATUS_CHECK) && 25102 (pkt->pkt_state & STATE_ARQ_DONE)) { 25103 arqstat = 25104 (struct scsi_arq_status *)(pkt->pkt_scbp); 25105 sensep = (uint8_t *)&arqstat->sts_sensedata; 25106 } else { 25107 sensep = NULL; 25108 } 25109 25110 if ((pkt->pkt_reason == CMD_CMPLT) && 25111 (rc == STATUS_GOOD)) { 25112 /* No error - we're done */ 25113 rval = 0; 25114 break; 25115 25116 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25117 /* Lost connection - give up */ 25118 break; 25119 25120 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25121 (pkt->pkt_state == 0)) { 25122 /* Pkt not dispatched - try again. */ 25123 poll_delay = 1 * CSEC; /* 10 msec. */ 25124 25125 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25126 (rc == STATUS_QFULL)) { 25127 /* Queue full - try again. */ 25128 poll_delay = 1 * CSEC; /* 10 msec. */ 25129 25130 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25131 (rc == STATUS_BUSY)) { 25132 /* Busy - try again. */ 25133 poll_delay = 100 * CSEC; /* 1 sec. */ 25134 busy_count += (SEC_TO_CSEC - 1); 25135 25136 } else if ((sensep != NULL) && 25137 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25138 /* 25139 * Unit Attention - try again. 25140 * Pretend it took 1 sec. 25141 * NOTE: 'continue' avoids poll_delay 25142 */ 25143 busy_count += (SEC_TO_CSEC - 1); 25144 continue; 25145 25146 } else if ((sensep != NULL) && 25147 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25148 (scsi_sense_asc(sensep) == 0x04) && 25149 (scsi_sense_ascq(sensep) == 0x01)) { 25150 /* 25151 * Not ready -> ready - try again. 25152 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25153 * ...same as STATUS_BUSY 25154 */ 25155 poll_delay = 100 * CSEC; /* 1 sec. */ 25156 busy_count += (SEC_TO_CSEC - 1); 25157 25158 } else { 25159 /* BAD status - give up. */ 25160 break; 25161 } 25162 } 25163 25164 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25165 !do_polled_io) { 25166 delay(drv_usectohz(poll_delay)); 25167 } else { 25168 /* we busy wait during cpr_dump or interrupt threads */ 25169 drv_usecwait(poll_delay); 25170 } 25171 } 25172 25173 pkt->pkt_flags = savef; 25174 pkt->pkt_comp = savec; 25175 pkt->pkt_time = savet; 25176 25177 /* return on error */ 25178 if (rval) 25179 return (rval); 25180 25181 /* 25182 * This is not a performance critical code path. 25183 * 25184 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25185 * issues associated with looking at DMA memory prior to 25186 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25187 */ 25188 scsi_sync_pkt(pkt); 25189 return (0); 25190 } 25191 25192 25193 25194 /* 25195 * Function: sd_persistent_reservation_in_read_keys 25196 * 25197 * Description: This routine is the driver entry point for handling CD-ROM 25198 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25199 * by sending the SCSI-3 PRIN commands to the device. 25200 * Processes the read keys command response by copying the 25201 * reservation key information into the user provided buffer. 25202 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25203 * 25204 * Arguments: un - Pointer to soft state struct for the target. 25205 * usrp - user provided pointer to multihost Persistent In Read 25206 * Keys structure (mhioc_inkeys_t) 25207 * flag - this argument is a pass through to ddi_copyxxx() 25208 * directly from the mode argument of ioctl(). 25209 * 25210 * Return Code: 0 - Success 25211 * EACCES 25212 * ENOTSUP 25213 * errno return code from sd_send_scsi_cmd() 25214 * 25215 * Context: Can sleep. Does not return until command is completed. 25216 */ 25217 25218 static int 25219 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25220 mhioc_inkeys_t *usrp, int flag) 25221 { 25222 #ifdef _MULTI_DATAMODEL 25223 struct mhioc_key_list32 li32; 25224 #endif 25225 sd_prin_readkeys_t *in; 25226 mhioc_inkeys_t *ptr; 25227 mhioc_key_list_t li; 25228 uchar_t *data_bufp; 25229 int data_len; 25230 int rval = 0; 25231 size_t copysz; 25232 sd_ssc_t *ssc; 25233 25234 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25235 return (EINVAL); 25236 } 25237 bzero(&li, sizeof (mhioc_key_list_t)); 25238 25239 ssc = sd_ssc_init(un); 25240 25241 /* 25242 * Get the listsize from user 25243 */ 25244 #ifdef _MULTI_DATAMODEL 25245 25246 switch (ddi_model_convert_from(flag & FMODELS)) { 25247 case DDI_MODEL_ILP32: 25248 copysz = sizeof (struct mhioc_key_list32); 25249 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25250 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25251 "sd_persistent_reservation_in_read_keys: " 25252 "failed ddi_copyin: mhioc_key_list32_t\n"); 25253 rval = EFAULT; 25254 goto done; 25255 } 25256 li.listsize = li32.listsize; 25257 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25258 break; 25259 25260 case DDI_MODEL_NONE: 25261 copysz = sizeof (mhioc_key_list_t); 25262 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25263 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25264 "sd_persistent_reservation_in_read_keys: " 25265 "failed ddi_copyin: mhioc_key_list_t\n"); 25266 rval = EFAULT; 25267 goto done; 25268 } 25269 break; 25270 } 25271 25272 #else /* ! _MULTI_DATAMODEL */ 25273 copysz = sizeof (mhioc_key_list_t); 25274 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25275 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25276 "sd_persistent_reservation_in_read_keys: " 25277 "failed ddi_copyin: mhioc_key_list_t\n"); 25278 rval = EFAULT; 25279 goto done; 25280 } 25281 #endif 25282 25283 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25284 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25285 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25286 25287 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25288 data_len, data_bufp); 25289 if (rval != 0) { 25290 if (rval == EIO) 25291 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25292 else 25293 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25294 goto done; 25295 } 25296 in = (sd_prin_readkeys_t *)data_bufp; 25297 ptr->generation = BE_32(in->generation); 25298 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25299 25300 /* 25301 * Return the min(listsize, listlen) keys 25302 */ 25303 #ifdef _MULTI_DATAMODEL 25304 25305 switch (ddi_model_convert_from(flag & FMODELS)) { 25306 case DDI_MODEL_ILP32: 25307 li32.listlen = li.listlen; 25308 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25309 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25310 "sd_persistent_reservation_in_read_keys: " 25311 "failed ddi_copyout: mhioc_key_list32_t\n"); 25312 rval = EFAULT; 25313 goto done; 25314 } 25315 break; 25316 25317 case DDI_MODEL_NONE: 25318 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25319 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25320 "sd_persistent_reservation_in_read_keys: " 25321 "failed ddi_copyout: mhioc_key_list_t\n"); 25322 rval = EFAULT; 25323 goto done; 25324 } 25325 break; 25326 } 25327 25328 #else /* ! _MULTI_DATAMODEL */ 25329 25330 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25331 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25332 "sd_persistent_reservation_in_read_keys: " 25333 "failed ddi_copyout: mhioc_key_list_t\n"); 25334 rval = EFAULT; 25335 goto done; 25336 } 25337 25338 #endif /* _MULTI_DATAMODEL */ 25339 25340 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25341 li.listsize * MHIOC_RESV_KEY_SIZE); 25342 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25343 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25344 "sd_persistent_reservation_in_read_keys: " 25345 "failed ddi_copyout: keylist\n"); 25346 rval = EFAULT; 25347 } 25348 done: 25349 sd_ssc_fini(ssc); 25350 kmem_free(data_bufp, data_len); 25351 return (rval); 25352 } 25353 25354 25355 /* 25356 * Function: sd_persistent_reservation_in_read_resv 25357 * 25358 * Description: This routine is the driver entry point for handling CD-ROM 25359 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25360 * by sending the SCSI-3 PRIN commands to the device. 25361 * Process the read persistent reservations command response by 25362 * copying the reservation information into the user provided 25363 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25364 * 25365 * Arguments: un - Pointer to soft state struct for the target. 25366 * usrp - user provided pointer to multihost Persistent In Read 25367 * Keys structure (mhioc_inkeys_t) 25368 * flag - this argument is a pass through to ddi_copyxxx() 25369 * directly from the mode argument of ioctl(). 25370 * 25371 * Return Code: 0 - Success 25372 * EACCES 25373 * ENOTSUP 25374 * errno return code from sd_send_scsi_cmd() 25375 * 25376 * Context: Can sleep. Does not return until command is completed. 25377 */ 25378 25379 static int 25380 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25381 mhioc_inresvs_t *usrp, int flag) 25382 { 25383 #ifdef _MULTI_DATAMODEL 25384 struct mhioc_resv_desc_list32 resvlist32; 25385 #endif 25386 sd_prin_readresv_t *in; 25387 mhioc_inresvs_t *ptr; 25388 sd_readresv_desc_t *readresv_ptr; 25389 mhioc_resv_desc_list_t resvlist; 25390 mhioc_resv_desc_t resvdesc; 25391 uchar_t *data_bufp = NULL; 25392 int data_len; 25393 int rval = 0; 25394 int i; 25395 size_t copysz; 25396 mhioc_resv_desc_t *bufp; 25397 sd_ssc_t *ssc; 25398 25399 if ((ptr = usrp) == NULL) { 25400 return (EINVAL); 25401 } 25402 25403 ssc = sd_ssc_init(un); 25404 25405 /* 25406 * Get the listsize from user 25407 */ 25408 #ifdef _MULTI_DATAMODEL 25409 switch (ddi_model_convert_from(flag & FMODELS)) { 25410 case DDI_MODEL_ILP32: 25411 copysz = sizeof (struct mhioc_resv_desc_list32); 25412 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25413 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25414 "sd_persistent_reservation_in_read_resv: " 25415 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25416 rval = EFAULT; 25417 goto done; 25418 } 25419 resvlist.listsize = resvlist32.listsize; 25420 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25421 break; 25422 25423 case DDI_MODEL_NONE: 25424 copysz = sizeof (mhioc_resv_desc_list_t); 25425 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25426 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25427 "sd_persistent_reservation_in_read_resv: " 25428 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25429 rval = EFAULT; 25430 goto done; 25431 } 25432 break; 25433 } 25434 #else /* ! _MULTI_DATAMODEL */ 25435 copysz = sizeof (mhioc_resv_desc_list_t); 25436 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25437 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25438 "sd_persistent_reservation_in_read_resv: " 25439 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25440 rval = EFAULT; 25441 goto done; 25442 } 25443 #endif /* ! _MULTI_DATAMODEL */ 25444 25445 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25446 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25447 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25448 25449 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25450 data_len, data_bufp); 25451 if (rval != 0) { 25452 if (rval == EIO) 25453 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25454 else 25455 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25456 goto done; 25457 } 25458 in = (sd_prin_readresv_t *)data_bufp; 25459 ptr->generation = BE_32(in->generation); 25460 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25461 25462 /* 25463 * Return the min(listsize, listlen( keys 25464 */ 25465 #ifdef _MULTI_DATAMODEL 25466 25467 switch (ddi_model_convert_from(flag & FMODELS)) { 25468 case DDI_MODEL_ILP32: 25469 resvlist32.listlen = resvlist.listlen; 25470 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25471 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25472 "sd_persistent_reservation_in_read_resv: " 25473 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25474 rval = EFAULT; 25475 goto done; 25476 } 25477 break; 25478 25479 case DDI_MODEL_NONE: 25480 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25481 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25482 "sd_persistent_reservation_in_read_resv: " 25483 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25484 rval = EFAULT; 25485 goto done; 25486 } 25487 break; 25488 } 25489 25490 #else /* ! _MULTI_DATAMODEL */ 25491 25492 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25493 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25494 "sd_persistent_reservation_in_read_resv: " 25495 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25496 rval = EFAULT; 25497 goto done; 25498 } 25499 25500 #endif /* ! _MULTI_DATAMODEL */ 25501 25502 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25503 bufp = resvlist.list; 25504 copysz = sizeof (mhioc_resv_desc_t); 25505 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25506 i++, readresv_ptr++, bufp++) { 25507 25508 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25509 MHIOC_RESV_KEY_SIZE); 25510 resvdesc.type = readresv_ptr->type; 25511 resvdesc.scope = readresv_ptr->scope; 25512 resvdesc.scope_specific_addr = 25513 BE_32(readresv_ptr->scope_specific_addr); 25514 25515 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25516 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25517 "sd_persistent_reservation_in_read_resv: " 25518 "failed ddi_copyout: resvlist\n"); 25519 rval = EFAULT; 25520 goto done; 25521 } 25522 } 25523 done: 25524 sd_ssc_fini(ssc); 25525 /* only if data_bufp is allocated, we need to free it */ 25526 if (data_bufp) { 25527 kmem_free(data_bufp, data_len); 25528 } 25529 return (rval); 25530 } 25531 25532 25533 /* 25534 * Function: sr_change_blkmode() 25535 * 25536 * Description: This routine is the driver entry point for handling CD-ROM 25537 * block mode ioctl requests. Support for returning and changing 25538 * the current block size in use by the device is implemented. The 25539 * LBA size is changed via a MODE SELECT Block Descriptor. 25540 * 25541 * This routine issues a mode sense with an allocation length of 25542 * 12 bytes for the mode page header and a single block descriptor. 25543 * 25544 * Arguments: dev - the device 'dev_t' 25545 * cmd - the request type; one of CDROMGBLKMODE (get) or 25546 * CDROMSBLKMODE (set) 25547 * data - current block size or requested block size 25548 * flag - this argument is a pass through to ddi_copyxxx() directly 25549 * from the mode argument of ioctl(). 25550 * 25551 * Return Code: the code returned by sd_send_scsi_cmd() 25552 * EINVAL if invalid arguments are provided 25553 * EFAULT if ddi_copyxxx() fails 25554 * ENXIO if fail ddi_get_soft_state 25555 * EIO if invalid mode sense block descriptor length 25556 * 25557 */ 25558 25559 static int 25560 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25561 { 25562 struct sd_lun *un = NULL; 25563 struct mode_header *sense_mhp, *select_mhp; 25564 struct block_descriptor *sense_desc, *select_desc; 25565 int current_bsize; 25566 int rval = EINVAL; 25567 uchar_t *sense = NULL; 25568 uchar_t *select = NULL; 25569 sd_ssc_t *ssc; 25570 25571 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25572 25573 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25574 return (ENXIO); 25575 } 25576 25577 /* 25578 * The block length is changed via the Mode Select block descriptor, the 25579 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25580 * required as part of this routine. Therefore the mode sense allocation 25581 * length is specified to be the length of a mode page header and a 25582 * block descriptor. 25583 */ 25584 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25585 25586 ssc = sd_ssc_init(un); 25587 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25588 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25589 sd_ssc_fini(ssc); 25590 if (rval != 0) { 25591 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25592 "sr_change_blkmode: Mode Sense Failed\n"); 25593 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25594 return (rval); 25595 } 25596 25597 /* Check the block descriptor len to handle only 1 block descriptor */ 25598 sense_mhp = (struct mode_header *)sense; 25599 if ((sense_mhp->bdesc_length == 0) || 25600 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25601 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25602 "sr_change_blkmode: Mode Sense returned invalid block" 25603 " descriptor length\n"); 25604 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25605 return (EIO); 25606 } 25607 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25608 current_bsize = ((sense_desc->blksize_hi << 16) | 25609 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25610 25611 /* Process command */ 25612 switch (cmd) { 25613 case CDROMGBLKMODE: 25614 /* Return the block size obtained during the mode sense */ 25615 if (ddi_copyout(¤t_bsize, (void *)data, 25616 sizeof (int), flag) != 0) 25617 rval = EFAULT; 25618 break; 25619 case CDROMSBLKMODE: 25620 /* Validate the requested block size */ 25621 switch (data) { 25622 case CDROM_BLK_512: 25623 case CDROM_BLK_1024: 25624 case CDROM_BLK_2048: 25625 case CDROM_BLK_2056: 25626 case CDROM_BLK_2336: 25627 case CDROM_BLK_2340: 25628 case CDROM_BLK_2352: 25629 case CDROM_BLK_2368: 25630 case CDROM_BLK_2448: 25631 case CDROM_BLK_2646: 25632 case CDROM_BLK_2647: 25633 break; 25634 default: 25635 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25636 "sr_change_blkmode: " 25637 "Block Size '%ld' Not Supported\n", data); 25638 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25639 return (EINVAL); 25640 } 25641 25642 /* 25643 * The current block size matches the requested block size so 25644 * there is no need to send the mode select to change the size 25645 */ 25646 if (current_bsize == data) { 25647 break; 25648 } 25649 25650 /* Build the select data for the requested block size */ 25651 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25652 select_mhp = (struct mode_header *)select; 25653 select_desc = 25654 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25655 /* 25656 * The LBA size is changed via the block descriptor, so the 25657 * descriptor is built according to the user data 25658 */ 25659 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25660 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25661 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25662 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25663 25664 /* Send the mode select for the requested block size */ 25665 ssc = sd_ssc_init(un); 25666 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25667 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25668 SD_PATH_STANDARD); 25669 sd_ssc_fini(ssc); 25670 if (rval != 0) { 25671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25672 "sr_change_blkmode: Mode Select Failed\n"); 25673 /* 25674 * The mode select failed for the requested block size, 25675 * so reset the data for the original block size and 25676 * send it to the target. The error is indicated by the 25677 * return value for the failed mode select. 25678 */ 25679 select_desc->blksize_hi = sense_desc->blksize_hi; 25680 select_desc->blksize_mid = sense_desc->blksize_mid; 25681 select_desc->blksize_lo = sense_desc->blksize_lo; 25682 ssc = sd_ssc_init(un); 25683 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25684 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25685 SD_PATH_STANDARD); 25686 sd_ssc_fini(ssc); 25687 } else { 25688 ASSERT(!mutex_owned(SD_MUTEX(un))); 25689 mutex_enter(SD_MUTEX(un)); 25690 sd_update_block_info(un, (uint32_t)data, 0); 25691 mutex_exit(SD_MUTEX(un)); 25692 } 25693 break; 25694 default: 25695 /* should not reach here, but check anyway */ 25696 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25697 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25698 rval = EINVAL; 25699 break; 25700 } 25701 25702 if (select) { 25703 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25704 } 25705 if (sense) { 25706 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25707 } 25708 return (rval); 25709 } 25710 25711 25712 /* 25713 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25714 * implement driver support for getting and setting the CD speed. The command 25715 * set used will be based on the device type. If the device has not been 25716 * identified as MMC the Toshiba vendor specific mode page will be used. If 25717 * the device is MMC but does not support the Real Time Streaming feature 25718 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25719 * be used to read the speed. 25720 */ 25721 25722 /* 25723 * Function: sr_change_speed() 25724 * 25725 * Description: This routine is the driver entry point for handling CD-ROM 25726 * drive speed ioctl requests for devices supporting the Toshiba 25727 * vendor specific drive speed mode page. Support for returning 25728 * and changing the current drive speed in use by the device is 25729 * implemented. 25730 * 25731 * Arguments: dev - the device 'dev_t' 25732 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25733 * CDROMSDRVSPEED (set) 25734 * data - current drive speed or requested drive speed 25735 * flag - this argument is a pass through to ddi_copyxxx() directly 25736 * from the mode argument of ioctl(). 25737 * 25738 * Return Code: the code returned by sd_send_scsi_cmd() 25739 * EINVAL if invalid arguments are provided 25740 * EFAULT if ddi_copyxxx() fails 25741 * ENXIO if fail ddi_get_soft_state 25742 * EIO if invalid mode sense block descriptor length 25743 */ 25744 25745 static int 25746 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25747 { 25748 struct sd_lun *un = NULL; 25749 struct mode_header *sense_mhp, *select_mhp; 25750 struct mode_speed *sense_page, *select_page; 25751 int current_speed; 25752 int rval = EINVAL; 25753 int bd_len; 25754 uchar_t *sense = NULL; 25755 uchar_t *select = NULL; 25756 sd_ssc_t *ssc; 25757 25758 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25759 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25760 return (ENXIO); 25761 } 25762 25763 /* 25764 * Note: The drive speed is being modified here according to a Toshiba 25765 * vendor specific mode page (0x31). 25766 */ 25767 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25768 25769 ssc = sd_ssc_init(un); 25770 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25771 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25772 SD_PATH_STANDARD); 25773 sd_ssc_fini(ssc); 25774 if (rval != 0) { 25775 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25776 "sr_change_speed: Mode Sense Failed\n"); 25777 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25778 return (rval); 25779 } 25780 sense_mhp = (struct mode_header *)sense; 25781 25782 /* Check the block descriptor len to handle only 1 block descriptor */ 25783 bd_len = sense_mhp->bdesc_length; 25784 if (bd_len > MODE_BLK_DESC_LENGTH) { 25785 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25786 "sr_change_speed: Mode Sense returned invalid block " 25787 "descriptor length\n"); 25788 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25789 return (EIO); 25790 } 25791 25792 sense_page = (struct mode_speed *) 25793 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25794 current_speed = sense_page->speed; 25795 25796 /* Process command */ 25797 switch (cmd) { 25798 case CDROMGDRVSPEED: 25799 /* Return the drive speed obtained during the mode sense */ 25800 if (current_speed == 0x2) { 25801 current_speed = CDROM_TWELVE_SPEED; 25802 } 25803 if (ddi_copyout(¤t_speed, (void *)data, 25804 sizeof (int), flag) != 0) { 25805 rval = EFAULT; 25806 } 25807 break; 25808 case CDROMSDRVSPEED: 25809 /* Validate the requested drive speed */ 25810 switch ((uchar_t)data) { 25811 case CDROM_TWELVE_SPEED: 25812 data = 0x2; 25813 /*FALLTHROUGH*/ 25814 case CDROM_NORMAL_SPEED: 25815 case CDROM_DOUBLE_SPEED: 25816 case CDROM_QUAD_SPEED: 25817 case CDROM_MAXIMUM_SPEED: 25818 break; 25819 default: 25820 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25821 "sr_change_speed: " 25822 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 25823 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25824 return (EINVAL); 25825 } 25826 25827 /* 25828 * The current drive speed matches the requested drive speed so 25829 * there is no need to send the mode select to change the speed 25830 */ 25831 if (current_speed == data) { 25832 break; 25833 } 25834 25835 /* Build the select data for the requested drive speed */ 25836 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25837 select_mhp = (struct mode_header *)select; 25838 select_mhp->bdesc_length = 0; 25839 select_page = 25840 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25841 select_page = 25842 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25843 select_page->mode_page.code = CDROM_MODE_SPEED; 25844 select_page->mode_page.length = 2; 25845 select_page->speed = (uchar_t)data; 25846 25847 /* Send the mode select for the requested block size */ 25848 ssc = sd_ssc_init(un); 25849 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25850 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25851 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25852 sd_ssc_fini(ssc); 25853 if (rval != 0) { 25854 /* 25855 * The mode select failed for the requested drive speed, 25856 * so reset the data for the original drive speed and 25857 * send it to the target. The error is indicated by the 25858 * return value for the failed mode select. 25859 */ 25860 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25861 "sr_drive_speed: Mode Select Failed\n"); 25862 select_page->speed = sense_page->speed; 25863 ssc = sd_ssc_init(un); 25864 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25865 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25866 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25867 sd_ssc_fini(ssc); 25868 } 25869 break; 25870 default: 25871 /* should not reach here, but check anyway */ 25872 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25873 "sr_change_speed: Command '%x' Not Supported\n", cmd); 25874 rval = EINVAL; 25875 break; 25876 } 25877 25878 if (select) { 25879 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 25880 } 25881 if (sense) { 25882 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25883 } 25884 25885 return (rval); 25886 } 25887 25888 25889 /* 25890 * Function: sr_atapi_change_speed() 25891 * 25892 * Description: This routine is the driver entry point for handling CD-ROM 25893 * drive speed ioctl requests for MMC devices that do not support 25894 * the Real Time Streaming feature (0x107). 25895 * 25896 * Note: This routine will use the SET SPEED command which may not 25897 * be supported by all devices. 25898 * 25899 * Arguments: dev- the device 'dev_t' 25900 * cmd- the request type; one of CDROMGDRVSPEED (get) or 25901 * CDROMSDRVSPEED (set) 25902 * data- current drive speed or requested drive speed 25903 * flag- this argument is a pass through to ddi_copyxxx() directly 25904 * from the mode argument of ioctl(). 25905 * 25906 * Return Code: the code returned by sd_send_scsi_cmd() 25907 * EINVAL if invalid arguments are provided 25908 * EFAULT if ddi_copyxxx() fails 25909 * ENXIO if fail ddi_get_soft_state 25910 * EIO if invalid mode sense block descriptor length 25911 */ 25912 25913 static int 25914 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25915 { 25916 struct sd_lun *un; 25917 struct uscsi_cmd *com = NULL; 25918 struct mode_header_grp2 *sense_mhp; 25919 uchar_t *sense_page; 25920 uchar_t *sense = NULL; 25921 char cdb[CDB_GROUP5]; 25922 int bd_len; 25923 int current_speed = 0; 25924 int max_speed = 0; 25925 int rval; 25926 sd_ssc_t *ssc; 25927 25928 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25929 25930 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25931 return (ENXIO); 25932 } 25933 25934 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 25935 25936 ssc = sd_ssc_init(un); 25937 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 25938 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 25939 SD_PATH_STANDARD); 25940 sd_ssc_fini(ssc); 25941 if (rval != 0) { 25942 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25943 "sr_atapi_change_speed: Mode Sense Failed\n"); 25944 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25945 return (rval); 25946 } 25947 25948 /* Check the block descriptor len to handle only 1 block descriptor */ 25949 sense_mhp = (struct mode_header_grp2 *)sense; 25950 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 25951 if (bd_len > MODE_BLK_DESC_LENGTH) { 25952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25953 "sr_atapi_change_speed: Mode Sense returned invalid " 25954 "block descriptor length\n"); 25955 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25956 return (EIO); 25957 } 25958 25959 /* Calculate the current and maximum drive speeds */ 25960 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25961 current_speed = (sense_page[14] << 8) | sense_page[15]; 25962 max_speed = (sense_page[8] << 8) | sense_page[9]; 25963 25964 /* Process the command */ 25965 switch (cmd) { 25966 case CDROMGDRVSPEED: 25967 current_speed /= SD_SPEED_1X; 25968 if (ddi_copyout(¤t_speed, (void *)data, 25969 sizeof (int), flag) != 0) 25970 rval = EFAULT; 25971 break; 25972 case CDROMSDRVSPEED: 25973 /* Convert the speed code to KB/sec */ 25974 switch ((uchar_t)data) { 25975 case CDROM_NORMAL_SPEED: 25976 current_speed = SD_SPEED_1X; 25977 break; 25978 case CDROM_DOUBLE_SPEED: 25979 current_speed = 2 * SD_SPEED_1X; 25980 break; 25981 case CDROM_QUAD_SPEED: 25982 current_speed = 4 * SD_SPEED_1X; 25983 break; 25984 case CDROM_TWELVE_SPEED: 25985 current_speed = 12 * SD_SPEED_1X; 25986 break; 25987 case CDROM_MAXIMUM_SPEED: 25988 current_speed = 0xffff; 25989 break; 25990 default: 25991 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25992 "sr_atapi_change_speed: invalid drive speed %d\n", 25993 (uchar_t)data); 25994 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25995 return (EINVAL); 25996 } 25997 25998 /* Check the request against the drive's max speed. */ 25999 if (current_speed != 0xffff) { 26000 if (current_speed > max_speed) { 26001 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26002 return (EINVAL); 26003 } 26004 } 26005 26006 /* 26007 * Build and send the SET SPEED command 26008 * 26009 * Note: The SET SPEED (0xBB) command used in this routine is 26010 * obsolete per the SCSI MMC spec but still supported in the 26011 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26012 * therefore the command is still implemented in this routine. 26013 */ 26014 bzero(cdb, sizeof (cdb)); 26015 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26016 cdb[2] = (uchar_t)(current_speed >> 8); 26017 cdb[3] = (uchar_t)current_speed; 26018 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26019 com->uscsi_cdb = (caddr_t)cdb; 26020 com->uscsi_cdblen = CDB_GROUP5; 26021 com->uscsi_bufaddr = NULL; 26022 com->uscsi_buflen = 0; 26023 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26024 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26025 break; 26026 default: 26027 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26028 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26029 rval = EINVAL; 26030 } 26031 26032 if (sense) { 26033 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26034 } 26035 if (com) { 26036 kmem_free(com, sizeof (*com)); 26037 } 26038 return (rval); 26039 } 26040 26041 26042 /* 26043 * Function: sr_pause_resume() 26044 * 26045 * Description: This routine is the driver entry point for handling CD-ROM 26046 * pause/resume ioctl requests. This only affects the audio play 26047 * operation. 26048 * 26049 * Arguments: dev - the device 'dev_t' 26050 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26051 * for setting the resume bit of the cdb. 26052 * 26053 * Return Code: the code returned by sd_send_scsi_cmd() 26054 * EINVAL if invalid mode specified 26055 * 26056 */ 26057 26058 static int 26059 sr_pause_resume(dev_t dev, int cmd) 26060 { 26061 struct sd_lun *un; 26062 struct uscsi_cmd *com; 26063 char cdb[CDB_GROUP1]; 26064 int rval; 26065 26066 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26067 return (ENXIO); 26068 } 26069 26070 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26071 bzero(cdb, CDB_GROUP1); 26072 cdb[0] = SCMD_PAUSE_RESUME; 26073 switch (cmd) { 26074 case CDROMRESUME: 26075 cdb[8] = 1; 26076 break; 26077 case CDROMPAUSE: 26078 cdb[8] = 0; 26079 break; 26080 default: 26081 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26082 " Command '%x' Not Supported\n", cmd); 26083 rval = EINVAL; 26084 goto done; 26085 } 26086 26087 com->uscsi_cdb = cdb; 26088 com->uscsi_cdblen = CDB_GROUP1; 26089 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26090 26091 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26092 SD_PATH_STANDARD); 26093 26094 done: 26095 kmem_free(com, sizeof (*com)); 26096 return (rval); 26097 } 26098 26099 26100 /* 26101 * Function: sr_play_msf() 26102 * 26103 * Description: This routine is the driver entry point for handling CD-ROM 26104 * ioctl requests to output the audio signals at the specified 26105 * starting address and continue the audio play until the specified 26106 * ending address (CDROMPLAYMSF) The address is in Minute Second 26107 * Frame (MSF) format. 26108 * 26109 * Arguments: dev - the device 'dev_t' 26110 * data - pointer to user provided audio msf structure, 26111 * specifying start/end addresses. 26112 * flag - this argument is a pass through to ddi_copyxxx() 26113 * directly from the mode argument of ioctl(). 26114 * 26115 * Return Code: the code returned by sd_send_scsi_cmd() 26116 * EFAULT if ddi_copyxxx() fails 26117 * ENXIO if fail ddi_get_soft_state 26118 * EINVAL if data pointer is NULL 26119 */ 26120 26121 static int 26122 sr_play_msf(dev_t dev, caddr_t data, int flag) 26123 { 26124 struct sd_lun *un; 26125 struct uscsi_cmd *com; 26126 struct cdrom_msf msf_struct; 26127 struct cdrom_msf *msf = &msf_struct; 26128 char cdb[CDB_GROUP1]; 26129 int rval; 26130 26131 if (data == NULL) { 26132 return (EINVAL); 26133 } 26134 26135 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26136 return (ENXIO); 26137 } 26138 26139 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26140 return (EFAULT); 26141 } 26142 26143 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26144 bzero(cdb, CDB_GROUP1); 26145 cdb[0] = SCMD_PLAYAUDIO_MSF; 26146 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26147 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26148 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26149 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26150 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26151 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26152 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26153 } else { 26154 cdb[3] = msf->cdmsf_min0; 26155 cdb[4] = msf->cdmsf_sec0; 26156 cdb[5] = msf->cdmsf_frame0; 26157 cdb[6] = msf->cdmsf_min1; 26158 cdb[7] = msf->cdmsf_sec1; 26159 cdb[8] = msf->cdmsf_frame1; 26160 } 26161 com->uscsi_cdb = cdb; 26162 com->uscsi_cdblen = CDB_GROUP1; 26163 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26164 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26165 SD_PATH_STANDARD); 26166 kmem_free(com, sizeof (*com)); 26167 return (rval); 26168 } 26169 26170 26171 /* 26172 * Function: sr_play_trkind() 26173 * 26174 * Description: This routine is the driver entry point for handling CD-ROM 26175 * ioctl requests to output the audio signals at the specified 26176 * starting address and continue the audio play until the specified 26177 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26178 * format. 26179 * 26180 * Arguments: dev - the device 'dev_t' 26181 * data - pointer to user provided audio track/index structure, 26182 * specifying start/end addresses. 26183 * flag - this argument is a pass through to ddi_copyxxx() 26184 * directly from the mode argument of ioctl(). 26185 * 26186 * Return Code: the code returned by sd_send_scsi_cmd() 26187 * EFAULT if ddi_copyxxx() fails 26188 * ENXIO if fail ddi_get_soft_state 26189 * EINVAL if data pointer is NULL 26190 */ 26191 26192 static int 26193 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26194 { 26195 struct cdrom_ti ti_struct; 26196 struct cdrom_ti *ti = &ti_struct; 26197 struct uscsi_cmd *com = NULL; 26198 char cdb[CDB_GROUP1]; 26199 int rval; 26200 26201 if (data == NULL) { 26202 return (EINVAL); 26203 } 26204 26205 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26206 return (EFAULT); 26207 } 26208 26209 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26210 bzero(cdb, CDB_GROUP1); 26211 cdb[0] = SCMD_PLAYAUDIO_TI; 26212 cdb[4] = ti->cdti_trk0; 26213 cdb[5] = ti->cdti_ind0; 26214 cdb[7] = ti->cdti_trk1; 26215 cdb[8] = ti->cdti_ind1; 26216 com->uscsi_cdb = cdb; 26217 com->uscsi_cdblen = CDB_GROUP1; 26218 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26219 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26220 SD_PATH_STANDARD); 26221 kmem_free(com, sizeof (*com)); 26222 return (rval); 26223 } 26224 26225 26226 /* 26227 * Function: sr_read_all_subcodes() 26228 * 26229 * Description: This routine is the driver entry point for handling CD-ROM 26230 * ioctl requests to return raw subcode data while the target is 26231 * playing audio (CDROMSUBCODE). 26232 * 26233 * Arguments: dev - the device 'dev_t' 26234 * data - pointer to user provided cdrom subcode structure, 26235 * specifying the transfer length and address. 26236 * flag - this argument is a pass through to ddi_copyxxx() 26237 * directly from the mode argument of ioctl(). 26238 * 26239 * Return Code: the code returned by sd_send_scsi_cmd() 26240 * EFAULT if ddi_copyxxx() fails 26241 * ENXIO if fail ddi_get_soft_state 26242 * EINVAL if data pointer is NULL 26243 */ 26244 26245 static int 26246 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26247 { 26248 struct sd_lun *un = NULL; 26249 struct uscsi_cmd *com = NULL; 26250 struct cdrom_subcode *subcode = NULL; 26251 int rval; 26252 size_t buflen; 26253 char cdb[CDB_GROUP5]; 26254 26255 #ifdef _MULTI_DATAMODEL 26256 /* To support ILP32 applications in an LP64 world */ 26257 struct cdrom_subcode32 cdrom_subcode32; 26258 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26259 #endif 26260 if (data == NULL) { 26261 return (EINVAL); 26262 } 26263 26264 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26265 return (ENXIO); 26266 } 26267 26268 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26269 26270 #ifdef _MULTI_DATAMODEL 26271 switch (ddi_model_convert_from(flag & FMODELS)) { 26272 case DDI_MODEL_ILP32: 26273 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26274 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26275 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26276 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26277 return (EFAULT); 26278 } 26279 /* Convert the ILP32 uscsi data from the application to LP64 */ 26280 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26281 break; 26282 case DDI_MODEL_NONE: 26283 if (ddi_copyin(data, subcode, 26284 sizeof (struct cdrom_subcode), flag)) { 26285 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26286 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26287 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26288 return (EFAULT); 26289 } 26290 break; 26291 } 26292 #else /* ! _MULTI_DATAMODEL */ 26293 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26294 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26295 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26296 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26297 return (EFAULT); 26298 } 26299 #endif /* _MULTI_DATAMODEL */ 26300 26301 /* 26302 * Since MMC-2 expects max 3 bytes for length, check if the 26303 * length input is greater than 3 bytes 26304 */ 26305 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26306 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26307 "sr_read_all_subcodes: " 26308 "cdrom transfer length too large: %d (limit %d)\n", 26309 subcode->cdsc_length, 0xFFFFFF); 26310 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26311 return (EINVAL); 26312 } 26313 26314 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26315 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26316 bzero(cdb, CDB_GROUP5); 26317 26318 if (un->un_f_mmc_cap == TRUE) { 26319 cdb[0] = (char)SCMD_READ_CD; 26320 cdb[2] = (char)0xff; 26321 cdb[3] = (char)0xff; 26322 cdb[4] = (char)0xff; 26323 cdb[5] = (char)0xff; 26324 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26325 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26326 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26327 cdb[10] = 1; 26328 } else { 26329 /* 26330 * Note: A vendor specific command (0xDF) is being used her to 26331 * request a read of all subcodes. 26332 */ 26333 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26334 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26335 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26336 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26337 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26338 } 26339 com->uscsi_cdb = cdb; 26340 com->uscsi_cdblen = CDB_GROUP5; 26341 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26342 com->uscsi_buflen = buflen; 26343 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26344 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26345 SD_PATH_STANDARD); 26346 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26347 kmem_free(com, sizeof (*com)); 26348 return (rval); 26349 } 26350 26351 26352 /* 26353 * Function: sr_read_subchannel() 26354 * 26355 * Description: This routine is the driver entry point for handling CD-ROM 26356 * ioctl requests to return the Q sub-channel data of the CD 26357 * current position block. (CDROMSUBCHNL) The data includes the 26358 * track number, index number, absolute CD-ROM address (LBA or MSF 26359 * format per the user) , track relative CD-ROM address (LBA or MSF 26360 * format per the user), control data and audio status. 26361 * 26362 * Arguments: dev - the device 'dev_t' 26363 * data - pointer to user provided cdrom sub-channel structure 26364 * flag - this argument is a pass through to ddi_copyxxx() 26365 * directly from the mode argument of ioctl(). 26366 * 26367 * Return Code: the code returned by sd_send_scsi_cmd() 26368 * EFAULT if ddi_copyxxx() fails 26369 * ENXIO if fail ddi_get_soft_state 26370 * EINVAL if data pointer is NULL 26371 */ 26372 26373 static int 26374 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26375 { 26376 struct sd_lun *un; 26377 struct uscsi_cmd *com; 26378 struct cdrom_subchnl subchanel; 26379 struct cdrom_subchnl *subchnl = &subchanel; 26380 char cdb[CDB_GROUP1]; 26381 caddr_t buffer; 26382 int rval; 26383 26384 if (data == NULL) { 26385 return (EINVAL); 26386 } 26387 26388 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26389 (un->un_state == SD_STATE_OFFLINE)) { 26390 return (ENXIO); 26391 } 26392 26393 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26394 return (EFAULT); 26395 } 26396 26397 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26398 bzero(cdb, CDB_GROUP1); 26399 cdb[0] = SCMD_READ_SUBCHANNEL; 26400 /* Set the MSF bit based on the user requested address format */ 26401 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26402 /* 26403 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26404 * returned 26405 */ 26406 cdb[2] = 0x40; 26407 /* 26408 * Set byte 3 to specify the return data format. A value of 0x01 26409 * indicates that the CD-ROM current position should be returned. 26410 */ 26411 cdb[3] = 0x01; 26412 cdb[8] = 0x10; 26413 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26414 com->uscsi_cdb = cdb; 26415 com->uscsi_cdblen = CDB_GROUP1; 26416 com->uscsi_bufaddr = buffer; 26417 com->uscsi_buflen = 16; 26418 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26419 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26420 SD_PATH_STANDARD); 26421 if (rval != 0) { 26422 kmem_free(buffer, 16); 26423 kmem_free(com, sizeof (*com)); 26424 return (rval); 26425 } 26426 26427 /* Process the returned Q sub-channel data */ 26428 subchnl->cdsc_audiostatus = buffer[1]; 26429 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26430 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26431 subchnl->cdsc_trk = buffer[6]; 26432 subchnl->cdsc_ind = buffer[7]; 26433 if (subchnl->cdsc_format & CDROM_LBA) { 26434 subchnl->cdsc_absaddr.lba = 26435 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26436 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26437 subchnl->cdsc_reladdr.lba = 26438 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26439 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26440 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26441 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26442 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26443 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26444 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26445 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26446 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26447 } else { 26448 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26449 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26450 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26451 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26452 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26453 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26454 } 26455 kmem_free(buffer, 16); 26456 kmem_free(com, sizeof (*com)); 26457 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26458 != 0) { 26459 return (EFAULT); 26460 } 26461 return (rval); 26462 } 26463 26464 26465 /* 26466 * Function: sr_read_tocentry() 26467 * 26468 * Description: This routine is the driver entry point for handling CD-ROM 26469 * ioctl requests to read from the Table of Contents (TOC) 26470 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26471 * fields, the starting address (LBA or MSF format per the user) 26472 * and the data mode if the user specified track is a data track. 26473 * 26474 * Note: The READ HEADER (0x44) command used in this routine is 26475 * obsolete per the SCSI MMC spec but still supported in the 26476 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26477 * therefore the command is still implemented in this routine. 26478 * 26479 * Arguments: dev - the device 'dev_t' 26480 * data - pointer to user provided toc entry structure, 26481 * specifying the track # and the address format 26482 * (LBA or MSF). 26483 * flag - this argument is a pass through to ddi_copyxxx() 26484 * directly from the mode argument of ioctl(). 26485 * 26486 * Return Code: the code returned by sd_send_scsi_cmd() 26487 * EFAULT if ddi_copyxxx() fails 26488 * ENXIO if fail ddi_get_soft_state 26489 * EINVAL if data pointer is NULL 26490 */ 26491 26492 static int 26493 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26494 { 26495 struct sd_lun *un = NULL; 26496 struct uscsi_cmd *com; 26497 struct cdrom_tocentry toc_entry; 26498 struct cdrom_tocentry *entry = &toc_entry; 26499 caddr_t buffer; 26500 int rval; 26501 char cdb[CDB_GROUP1]; 26502 26503 if (data == NULL) { 26504 return (EINVAL); 26505 } 26506 26507 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26508 (un->un_state == SD_STATE_OFFLINE)) { 26509 return (ENXIO); 26510 } 26511 26512 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26513 return (EFAULT); 26514 } 26515 26516 /* Validate the requested track and address format */ 26517 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26518 return (EINVAL); 26519 } 26520 26521 if (entry->cdte_track == 0) { 26522 return (EINVAL); 26523 } 26524 26525 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26526 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26527 bzero(cdb, CDB_GROUP1); 26528 26529 cdb[0] = SCMD_READ_TOC; 26530 /* Set the MSF bit based on the user requested address format */ 26531 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26532 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26533 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26534 } else { 26535 cdb[6] = entry->cdte_track; 26536 } 26537 26538 /* 26539 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26540 * (4 byte TOC response header + 8 byte track descriptor) 26541 */ 26542 cdb[8] = 12; 26543 com->uscsi_cdb = cdb; 26544 com->uscsi_cdblen = CDB_GROUP1; 26545 com->uscsi_bufaddr = buffer; 26546 com->uscsi_buflen = 0x0C; 26547 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26548 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26549 SD_PATH_STANDARD); 26550 if (rval != 0) { 26551 kmem_free(buffer, 12); 26552 kmem_free(com, sizeof (*com)); 26553 return (rval); 26554 } 26555 26556 /* Process the toc entry */ 26557 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26558 entry->cdte_ctrl = (buffer[5] & 0x0F); 26559 if (entry->cdte_format & CDROM_LBA) { 26560 entry->cdte_addr.lba = 26561 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26562 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26563 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26564 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26565 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26566 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26567 /* 26568 * Send a READ TOC command using the LBA address format to get 26569 * the LBA for the track requested so it can be used in the 26570 * READ HEADER request 26571 * 26572 * Note: The MSF bit of the READ HEADER command specifies the 26573 * output format. The block address specified in that command 26574 * must be in LBA format. 26575 */ 26576 cdb[1] = 0; 26577 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26578 SD_PATH_STANDARD); 26579 if (rval != 0) { 26580 kmem_free(buffer, 12); 26581 kmem_free(com, sizeof (*com)); 26582 return (rval); 26583 } 26584 } else { 26585 entry->cdte_addr.msf.minute = buffer[9]; 26586 entry->cdte_addr.msf.second = buffer[10]; 26587 entry->cdte_addr.msf.frame = buffer[11]; 26588 /* 26589 * Send a READ TOC command using the LBA address format to get 26590 * the LBA for the track requested so it can be used in the 26591 * READ HEADER request 26592 * 26593 * Note: The MSF bit of the READ HEADER command specifies the 26594 * output format. The block address specified in that command 26595 * must be in LBA format. 26596 */ 26597 cdb[1] = 0; 26598 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26599 SD_PATH_STANDARD); 26600 if (rval != 0) { 26601 kmem_free(buffer, 12); 26602 kmem_free(com, sizeof (*com)); 26603 return (rval); 26604 } 26605 } 26606 26607 /* 26608 * Build and send the READ HEADER command to determine the data mode of 26609 * the user specified track. 26610 */ 26611 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26612 (entry->cdte_track != CDROM_LEADOUT)) { 26613 bzero(cdb, CDB_GROUP1); 26614 cdb[0] = SCMD_READ_HEADER; 26615 cdb[2] = buffer[8]; 26616 cdb[3] = buffer[9]; 26617 cdb[4] = buffer[10]; 26618 cdb[5] = buffer[11]; 26619 cdb[8] = 0x08; 26620 com->uscsi_buflen = 0x08; 26621 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26622 SD_PATH_STANDARD); 26623 if (rval == 0) { 26624 entry->cdte_datamode = buffer[0]; 26625 } else { 26626 /* 26627 * READ HEADER command failed, since this is 26628 * obsoleted in one spec, its better to return 26629 * -1 for an invlid track so that we can still 26630 * receive the rest of the TOC data. 26631 */ 26632 entry->cdte_datamode = (uchar_t)-1; 26633 } 26634 } else { 26635 entry->cdte_datamode = (uchar_t)-1; 26636 } 26637 26638 kmem_free(buffer, 12); 26639 kmem_free(com, sizeof (*com)); 26640 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26641 return (EFAULT); 26642 26643 return (rval); 26644 } 26645 26646 26647 /* 26648 * Function: sr_read_tochdr() 26649 * 26650 * Description: This routine is the driver entry point for handling CD-ROM 26651 * ioctl requests to read the Table of Contents (TOC) header 26652 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26653 * and ending track numbers 26654 * 26655 * Arguments: dev - the device 'dev_t' 26656 * data - pointer to user provided toc header structure, 26657 * specifying the starting and ending track numbers. 26658 * flag - this argument is a pass through to ddi_copyxxx() 26659 * directly from the mode argument of ioctl(). 26660 * 26661 * Return Code: the code returned by sd_send_scsi_cmd() 26662 * EFAULT if ddi_copyxxx() fails 26663 * ENXIO if fail ddi_get_soft_state 26664 * EINVAL if data pointer is NULL 26665 */ 26666 26667 static int 26668 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26669 { 26670 struct sd_lun *un; 26671 struct uscsi_cmd *com; 26672 struct cdrom_tochdr toc_header; 26673 struct cdrom_tochdr *hdr = &toc_header; 26674 char cdb[CDB_GROUP1]; 26675 int rval; 26676 caddr_t buffer; 26677 26678 if (data == NULL) { 26679 return (EINVAL); 26680 } 26681 26682 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26683 (un->un_state == SD_STATE_OFFLINE)) { 26684 return (ENXIO); 26685 } 26686 26687 buffer = kmem_zalloc(4, KM_SLEEP); 26688 bzero(cdb, CDB_GROUP1); 26689 cdb[0] = SCMD_READ_TOC; 26690 /* 26691 * Specifying a track number of 0x00 in the READ TOC command indicates 26692 * that the TOC header should be returned 26693 */ 26694 cdb[6] = 0x00; 26695 /* 26696 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26697 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26698 */ 26699 cdb[8] = 0x04; 26700 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26701 com->uscsi_cdb = cdb; 26702 com->uscsi_cdblen = CDB_GROUP1; 26703 com->uscsi_bufaddr = buffer; 26704 com->uscsi_buflen = 0x04; 26705 com->uscsi_timeout = 300; 26706 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26707 26708 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26709 SD_PATH_STANDARD); 26710 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26711 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26712 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26713 } else { 26714 hdr->cdth_trk0 = buffer[2]; 26715 hdr->cdth_trk1 = buffer[3]; 26716 } 26717 kmem_free(buffer, 4); 26718 kmem_free(com, sizeof (*com)); 26719 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26720 return (EFAULT); 26721 } 26722 return (rval); 26723 } 26724 26725 26726 /* 26727 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26728 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26729 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26730 * digital audio and extended architecture digital audio. These modes are 26731 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26732 * MMC specs. 26733 * 26734 * In addition to support for the various data formats these routines also 26735 * include support for devices that implement only the direct access READ 26736 * commands (0x08, 0x28), devices that implement the READ_CD commands 26737 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26738 * READ CDXA commands (0xD8, 0xDB) 26739 */ 26740 26741 /* 26742 * Function: sr_read_mode1() 26743 * 26744 * Description: This routine is the driver entry point for handling CD-ROM 26745 * ioctl read mode1 requests (CDROMREADMODE1). 26746 * 26747 * Arguments: dev - the device 'dev_t' 26748 * data - pointer to user provided cd read structure specifying 26749 * the lba buffer address and length. 26750 * flag - this argument is a pass through to ddi_copyxxx() 26751 * directly from the mode argument of ioctl(). 26752 * 26753 * Return Code: the code returned by sd_send_scsi_cmd() 26754 * EFAULT if ddi_copyxxx() fails 26755 * ENXIO if fail ddi_get_soft_state 26756 * EINVAL if data pointer is NULL 26757 */ 26758 26759 static int 26760 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26761 { 26762 struct sd_lun *un; 26763 struct cdrom_read mode1_struct; 26764 struct cdrom_read *mode1 = &mode1_struct; 26765 int rval; 26766 sd_ssc_t *ssc; 26767 26768 #ifdef _MULTI_DATAMODEL 26769 /* To support ILP32 applications in an LP64 world */ 26770 struct cdrom_read32 cdrom_read32; 26771 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26772 #endif /* _MULTI_DATAMODEL */ 26773 26774 if (data == NULL) { 26775 return (EINVAL); 26776 } 26777 26778 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26779 (un->un_state == SD_STATE_OFFLINE)) { 26780 return (ENXIO); 26781 } 26782 26783 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26784 "sd_read_mode1: entry: un:0x%p\n", un); 26785 26786 #ifdef _MULTI_DATAMODEL 26787 switch (ddi_model_convert_from(flag & FMODELS)) { 26788 case DDI_MODEL_ILP32: 26789 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26790 return (EFAULT); 26791 } 26792 /* Convert the ILP32 uscsi data from the application to LP64 */ 26793 cdrom_read32tocdrom_read(cdrd32, mode1); 26794 break; 26795 case DDI_MODEL_NONE: 26796 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26797 return (EFAULT); 26798 } 26799 } 26800 #else /* ! _MULTI_DATAMODEL */ 26801 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26802 return (EFAULT); 26803 } 26804 #endif /* _MULTI_DATAMODEL */ 26805 26806 ssc = sd_ssc_init(un); 26807 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 26808 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 26809 sd_ssc_fini(ssc); 26810 26811 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26812 "sd_read_mode1: exit: un:0x%p\n", un); 26813 26814 return (rval); 26815 } 26816 26817 26818 /* 26819 * Function: sr_read_cd_mode2() 26820 * 26821 * Description: This routine is the driver entry point for handling CD-ROM 26822 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26823 * support the READ CD (0xBE) command or the 1st generation 26824 * READ CD (0xD4) command. 26825 * 26826 * Arguments: dev - the device 'dev_t' 26827 * data - pointer to user provided cd read structure specifying 26828 * the lba buffer address and length. 26829 * flag - this argument is a pass through to ddi_copyxxx() 26830 * directly from the mode argument of ioctl(). 26831 * 26832 * Return Code: the code returned by sd_send_scsi_cmd() 26833 * EFAULT if ddi_copyxxx() fails 26834 * ENXIO if fail ddi_get_soft_state 26835 * EINVAL if data pointer is NULL 26836 */ 26837 26838 static int 26839 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 26840 { 26841 struct sd_lun *un; 26842 struct uscsi_cmd *com; 26843 struct cdrom_read mode2_struct; 26844 struct cdrom_read *mode2 = &mode2_struct; 26845 uchar_t cdb[CDB_GROUP5]; 26846 int nblocks; 26847 int rval; 26848 #ifdef _MULTI_DATAMODEL 26849 /* To support ILP32 applications in an LP64 world */ 26850 struct cdrom_read32 cdrom_read32; 26851 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26852 #endif /* _MULTI_DATAMODEL */ 26853 26854 if (data == NULL) { 26855 return (EINVAL); 26856 } 26857 26858 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26859 (un->un_state == SD_STATE_OFFLINE)) { 26860 return (ENXIO); 26861 } 26862 26863 #ifdef _MULTI_DATAMODEL 26864 switch (ddi_model_convert_from(flag & FMODELS)) { 26865 case DDI_MODEL_ILP32: 26866 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26867 return (EFAULT); 26868 } 26869 /* Convert the ILP32 uscsi data from the application to LP64 */ 26870 cdrom_read32tocdrom_read(cdrd32, mode2); 26871 break; 26872 case DDI_MODEL_NONE: 26873 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26874 return (EFAULT); 26875 } 26876 break; 26877 } 26878 26879 #else /* ! _MULTI_DATAMODEL */ 26880 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26881 return (EFAULT); 26882 } 26883 #endif /* _MULTI_DATAMODEL */ 26884 26885 bzero(cdb, sizeof (cdb)); 26886 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 26887 /* Read command supported by 1st generation atapi drives */ 26888 cdb[0] = SCMD_READ_CDD4; 26889 } else { 26890 /* Universal CD Access Command */ 26891 cdb[0] = SCMD_READ_CD; 26892 } 26893 26894 /* 26895 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 26896 */ 26897 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 26898 26899 /* set the start address */ 26900 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 26901 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 26902 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 26903 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 26904 26905 /* set the transfer length */ 26906 nblocks = mode2->cdread_buflen / 2336; 26907 cdb[6] = (uchar_t)(nblocks >> 16); 26908 cdb[7] = (uchar_t)(nblocks >> 8); 26909 cdb[8] = (uchar_t)nblocks; 26910 26911 /* set the filter bits */ 26912 cdb[9] = CDROM_READ_CD_USERDATA; 26913 26914 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26915 com->uscsi_cdb = (caddr_t)cdb; 26916 com->uscsi_cdblen = sizeof (cdb); 26917 com->uscsi_bufaddr = mode2->cdread_bufaddr; 26918 com->uscsi_buflen = mode2->cdread_buflen; 26919 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26920 26921 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26922 SD_PATH_STANDARD); 26923 kmem_free(com, sizeof (*com)); 26924 return (rval); 26925 } 26926 26927 26928 /* 26929 * Function: sr_read_mode2() 26930 * 26931 * Description: This routine is the driver entry point for handling CD-ROM 26932 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26933 * do not support the READ CD (0xBE) command. 26934 * 26935 * Arguments: dev - the device 'dev_t' 26936 * data - pointer to user provided cd read structure specifying 26937 * the lba buffer address and length. 26938 * flag - this argument is a pass through to ddi_copyxxx() 26939 * directly from the mode argument of ioctl(). 26940 * 26941 * Return Code: the code returned by sd_send_scsi_cmd() 26942 * EFAULT if ddi_copyxxx() fails 26943 * ENXIO if fail ddi_get_soft_state 26944 * EINVAL if data pointer is NULL 26945 * EIO if fail to reset block size 26946 * EAGAIN if commands are in progress in the driver 26947 */ 26948 26949 static int 26950 sr_read_mode2(dev_t dev, caddr_t data, int flag) 26951 { 26952 struct sd_lun *un; 26953 struct cdrom_read mode2_struct; 26954 struct cdrom_read *mode2 = &mode2_struct; 26955 int rval; 26956 uint32_t restore_blksize; 26957 struct uscsi_cmd *com; 26958 uchar_t cdb[CDB_GROUP0]; 26959 int nblocks; 26960 26961 #ifdef _MULTI_DATAMODEL 26962 /* To support ILP32 applications in an LP64 world */ 26963 struct cdrom_read32 cdrom_read32; 26964 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26965 #endif /* _MULTI_DATAMODEL */ 26966 26967 if (data == NULL) { 26968 return (EINVAL); 26969 } 26970 26971 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26972 (un->un_state == SD_STATE_OFFLINE)) { 26973 return (ENXIO); 26974 } 26975 26976 /* 26977 * Because this routine will update the device and driver block size 26978 * being used we want to make sure there are no commands in progress. 26979 * If commands are in progress the user will have to try again. 26980 * 26981 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 26982 * in sdioctl to protect commands from sdioctl through to the top of 26983 * sd_uscsi_strategy. See sdioctl for details. 26984 */ 26985 mutex_enter(SD_MUTEX(un)); 26986 if (un->un_ncmds_in_driver != 1) { 26987 mutex_exit(SD_MUTEX(un)); 26988 return (EAGAIN); 26989 } 26990 mutex_exit(SD_MUTEX(un)); 26991 26992 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26993 "sd_read_mode2: entry: un:0x%p\n", un); 26994 26995 #ifdef _MULTI_DATAMODEL 26996 switch (ddi_model_convert_from(flag & FMODELS)) { 26997 case DDI_MODEL_ILP32: 26998 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26999 return (EFAULT); 27000 } 27001 /* Convert the ILP32 uscsi data from the application to LP64 */ 27002 cdrom_read32tocdrom_read(cdrd32, mode2); 27003 break; 27004 case DDI_MODEL_NONE: 27005 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27006 return (EFAULT); 27007 } 27008 break; 27009 } 27010 #else /* ! _MULTI_DATAMODEL */ 27011 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27012 return (EFAULT); 27013 } 27014 #endif /* _MULTI_DATAMODEL */ 27015 27016 /* Store the current target block size for restoration later */ 27017 restore_blksize = un->un_tgt_blocksize; 27018 27019 /* Change the device and soft state target block size to 2336 */ 27020 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27021 rval = EIO; 27022 goto done; 27023 } 27024 27025 27026 bzero(cdb, sizeof (cdb)); 27027 27028 /* set READ operation */ 27029 cdb[0] = SCMD_READ; 27030 27031 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27032 mode2->cdread_lba >>= 2; 27033 27034 /* set the start address */ 27035 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27036 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27037 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27038 27039 /* set the transfer length */ 27040 nblocks = mode2->cdread_buflen / 2336; 27041 cdb[4] = (uchar_t)nblocks & 0xFF; 27042 27043 /* build command */ 27044 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27045 com->uscsi_cdb = (caddr_t)cdb; 27046 com->uscsi_cdblen = sizeof (cdb); 27047 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27048 com->uscsi_buflen = mode2->cdread_buflen; 27049 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27050 27051 /* 27052 * Issue SCSI command with user space address for read buffer. 27053 * 27054 * This sends the command through main channel in the driver. 27055 * 27056 * Since this is accessed via an IOCTL call, we go through the 27057 * standard path, so that if the device was powered down, then 27058 * it would be 'awakened' to handle the command. 27059 */ 27060 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27061 SD_PATH_STANDARD); 27062 27063 kmem_free(com, sizeof (*com)); 27064 27065 /* Restore the device and soft state target block size */ 27066 if (sr_sector_mode(dev, restore_blksize) != 0) { 27067 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27068 "can't do switch back to mode 1\n"); 27069 /* 27070 * If sd_send_scsi_READ succeeded we still need to report 27071 * an error because we failed to reset the block size 27072 */ 27073 if (rval == 0) { 27074 rval = EIO; 27075 } 27076 } 27077 27078 done: 27079 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27080 "sd_read_mode2: exit: un:0x%p\n", un); 27081 27082 return (rval); 27083 } 27084 27085 27086 /* 27087 * Function: sr_sector_mode() 27088 * 27089 * Description: This utility function is used by sr_read_mode2 to set the target 27090 * block size based on the user specified size. This is a legacy 27091 * implementation based upon a vendor specific mode page 27092 * 27093 * Arguments: dev - the device 'dev_t' 27094 * data - flag indicating if block size is being set to 2336 or 27095 * 512. 27096 * 27097 * Return Code: the code returned by sd_send_scsi_cmd() 27098 * EFAULT if ddi_copyxxx() fails 27099 * ENXIO if fail ddi_get_soft_state 27100 * EINVAL if data pointer is NULL 27101 */ 27102 27103 static int 27104 sr_sector_mode(dev_t dev, uint32_t blksize) 27105 { 27106 struct sd_lun *un; 27107 uchar_t *sense; 27108 uchar_t *select; 27109 int rval; 27110 sd_ssc_t *ssc; 27111 27112 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27113 (un->un_state == SD_STATE_OFFLINE)) { 27114 return (ENXIO); 27115 } 27116 27117 sense = kmem_zalloc(20, KM_SLEEP); 27118 27119 /* Note: This is a vendor specific mode page (0x81) */ 27120 ssc = sd_ssc_init(un); 27121 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27122 SD_PATH_STANDARD); 27123 sd_ssc_fini(ssc); 27124 if (rval != 0) { 27125 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27126 "sr_sector_mode: Mode Sense failed\n"); 27127 kmem_free(sense, 20); 27128 return (rval); 27129 } 27130 select = kmem_zalloc(20, KM_SLEEP); 27131 select[3] = 0x08; 27132 select[10] = ((blksize >> 8) & 0xff); 27133 select[11] = (blksize & 0xff); 27134 select[12] = 0x01; 27135 select[13] = 0x06; 27136 select[14] = sense[14]; 27137 select[15] = sense[15]; 27138 if (blksize == SD_MODE2_BLKSIZE) { 27139 select[14] |= 0x01; 27140 } 27141 27142 ssc = sd_ssc_init(un); 27143 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27144 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27145 sd_ssc_fini(ssc); 27146 if (rval != 0) { 27147 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27148 "sr_sector_mode: Mode Select failed\n"); 27149 } else { 27150 /* 27151 * Only update the softstate block size if we successfully 27152 * changed the device block mode. 27153 */ 27154 mutex_enter(SD_MUTEX(un)); 27155 sd_update_block_info(un, blksize, 0); 27156 mutex_exit(SD_MUTEX(un)); 27157 } 27158 kmem_free(sense, 20); 27159 kmem_free(select, 20); 27160 return (rval); 27161 } 27162 27163 27164 /* 27165 * Function: sr_read_cdda() 27166 * 27167 * Description: This routine is the driver entry point for handling CD-ROM 27168 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27169 * the target supports CDDA these requests are handled via a vendor 27170 * specific command (0xD8) If the target does not support CDDA 27171 * these requests are handled via the READ CD command (0xBE). 27172 * 27173 * Arguments: dev - the device 'dev_t' 27174 * data - pointer to user provided CD-DA structure specifying 27175 * the track starting address, transfer length, and 27176 * subcode options. 27177 * flag - this argument is a pass through to ddi_copyxxx() 27178 * directly from the mode argument of ioctl(). 27179 * 27180 * Return Code: the code returned by sd_send_scsi_cmd() 27181 * EFAULT if ddi_copyxxx() fails 27182 * ENXIO if fail ddi_get_soft_state 27183 * EINVAL if invalid arguments are provided 27184 * ENOTTY 27185 */ 27186 27187 static int 27188 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27189 { 27190 struct sd_lun *un; 27191 struct uscsi_cmd *com; 27192 struct cdrom_cdda *cdda; 27193 int rval; 27194 size_t buflen; 27195 char cdb[CDB_GROUP5]; 27196 27197 #ifdef _MULTI_DATAMODEL 27198 /* To support ILP32 applications in an LP64 world */ 27199 struct cdrom_cdda32 cdrom_cdda32; 27200 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27201 #endif /* _MULTI_DATAMODEL */ 27202 27203 if (data == NULL) { 27204 return (EINVAL); 27205 } 27206 27207 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27208 return (ENXIO); 27209 } 27210 27211 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27212 27213 #ifdef _MULTI_DATAMODEL 27214 switch (ddi_model_convert_from(flag & FMODELS)) { 27215 case DDI_MODEL_ILP32: 27216 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27217 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27218 "sr_read_cdda: ddi_copyin Failed\n"); 27219 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27220 return (EFAULT); 27221 } 27222 /* Convert the ILP32 uscsi data from the application to LP64 */ 27223 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27224 break; 27225 case DDI_MODEL_NONE: 27226 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27227 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27228 "sr_read_cdda: ddi_copyin Failed\n"); 27229 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27230 return (EFAULT); 27231 } 27232 break; 27233 } 27234 #else /* ! _MULTI_DATAMODEL */ 27235 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27236 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27237 "sr_read_cdda: ddi_copyin Failed\n"); 27238 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27239 return (EFAULT); 27240 } 27241 #endif /* _MULTI_DATAMODEL */ 27242 27243 /* 27244 * Since MMC-2 expects max 3 bytes for length, check if the 27245 * length input is greater than 3 bytes 27246 */ 27247 if ((cdda->cdda_length & 0xFF000000) != 0) { 27248 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27249 "cdrom transfer length too large: %d (limit %d)\n", 27250 cdda->cdda_length, 0xFFFFFF); 27251 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27252 return (EINVAL); 27253 } 27254 27255 switch (cdda->cdda_subcode) { 27256 case CDROM_DA_NO_SUBCODE: 27257 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27258 break; 27259 case CDROM_DA_SUBQ: 27260 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27261 break; 27262 case CDROM_DA_ALL_SUBCODE: 27263 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27264 break; 27265 case CDROM_DA_SUBCODE_ONLY: 27266 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27267 break; 27268 default: 27269 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27270 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27271 cdda->cdda_subcode); 27272 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27273 return (EINVAL); 27274 } 27275 27276 /* Build and send the command */ 27277 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27278 bzero(cdb, CDB_GROUP5); 27279 27280 if (un->un_f_cfg_cdda == TRUE) { 27281 cdb[0] = (char)SCMD_READ_CD; 27282 cdb[1] = 0x04; 27283 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27284 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27285 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27286 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27287 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27288 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27289 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27290 cdb[9] = 0x10; 27291 switch (cdda->cdda_subcode) { 27292 case CDROM_DA_NO_SUBCODE : 27293 cdb[10] = 0x0; 27294 break; 27295 case CDROM_DA_SUBQ : 27296 cdb[10] = 0x2; 27297 break; 27298 case CDROM_DA_ALL_SUBCODE : 27299 cdb[10] = 0x1; 27300 break; 27301 case CDROM_DA_SUBCODE_ONLY : 27302 /* FALLTHROUGH */ 27303 default : 27304 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27305 kmem_free(com, sizeof (*com)); 27306 return (ENOTTY); 27307 } 27308 } else { 27309 cdb[0] = (char)SCMD_READ_CDDA; 27310 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27311 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27312 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27313 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27314 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27315 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27316 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27317 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27318 cdb[10] = cdda->cdda_subcode; 27319 } 27320 27321 com->uscsi_cdb = cdb; 27322 com->uscsi_cdblen = CDB_GROUP5; 27323 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27324 com->uscsi_buflen = buflen; 27325 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27326 27327 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27328 SD_PATH_STANDARD); 27329 27330 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27331 kmem_free(com, sizeof (*com)); 27332 return (rval); 27333 } 27334 27335 27336 /* 27337 * Function: sr_read_cdxa() 27338 * 27339 * Description: This routine is the driver entry point for handling CD-ROM 27340 * ioctl requests to return CD-XA (Extended Architecture) data. 27341 * (CDROMCDXA). 27342 * 27343 * Arguments: dev - the device 'dev_t' 27344 * data - pointer to user provided CD-XA structure specifying 27345 * the data starting address, transfer length, and format 27346 * flag - this argument is a pass through to ddi_copyxxx() 27347 * directly from the mode argument of ioctl(). 27348 * 27349 * Return Code: the code returned by sd_send_scsi_cmd() 27350 * EFAULT if ddi_copyxxx() fails 27351 * ENXIO if fail ddi_get_soft_state 27352 * EINVAL if data pointer is NULL 27353 */ 27354 27355 static int 27356 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27357 { 27358 struct sd_lun *un; 27359 struct uscsi_cmd *com; 27360 struct cdrom_cdxa *cdxa; 27361 int rval; 27362 size_t buflen; 27363 char cdb[CDB_GROUP5]; 27364 uchar_t read_flags; 27365 27366 #ifdef _MULTI_DATAMODEL 27367 /* To support ILP32 applications in an LP64 world */ 27368 struct cdrom_cdxa32 cdrom_cdxa32; 27369 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27370 #endif /* _MULTI_DATAMODEL */ 27371 27372 if (data == NULL) { 27373 return (EINVAL); 27374 } 27375 27376 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27377 return (ENXIO); 27378 } 27379 27380 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27381 27382 #ifdef _MULTI_DATAMODEL 27383 switch (ddi_model_convert_from(flag & FMODELS)) { 27384 case DDI_MODEL_ILP32: 27385 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27386 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27387 return (EFAULT); 27388 } 27389 /* 27390 * Convert the ILP32 uscsi data from the 27391 * application to LP64 for internal use. 27392 */ 27393 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27394 break; 27395 case DDI_MODEL_NONE: 27396 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27397 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27398 return (EFAULT); 27399 } 27400 break; 27401 } 27402 #else /* ! _MULTI_DATAMODEL */ 27403 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27404 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27405 return (EFAULT); 27406 } 27407 #endif /* _MULTI_DATAMODEL */ 27408 27409 /* 27410 * Since MMC-2 expects max 3 bytes for length, check if the 27411 * length input is greater than 3 bytes 27412 */ 27413 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27414 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27415 "cdrom transfer length too large: %d (limit %d)\n", 27416 cdxa->cdxa_length, 0xFFFFFF); 27417 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27418 return (EINVAL); 27419 } 27420 27421 switch (cdxa->cdxa_format) { 27422 case CDROM_XA_DATA: 27423 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27424 read_flags = 0x10; 27425 break; 27426 case CDROM_XA_SECTOR_DATA: 27427 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27428 read_flags = 0xf8; 27429 break; 27430 case CDROM_XA_DATA_W_ERROR: 27431 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27432 read_flags = 0xfc; 27433 break; 27434 default: 27435 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27436 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27437 cdxa->cdxa_format); 27438 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27439 return (EINVAL); 27440 } 27441 27442 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27443 bzero(cdb, CDB_GROUP5); 27444 if (un->un_f_mmc_cap == TRUE) { 27445 cdb[0] = (char)SCMD_READ_CD; 27446 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27447 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27448 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27449 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27450 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27451 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27452 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27453 cdb[9] = (char)read_flags; 27454 } else { 27455 /* 27456 * Note: A vendor specific command (0xDB) is being used her to 27457 * request a read of all subcodes. 27458 */ 27459 cdb[0] = (char)SCMD_READ_CDXA; 27460 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27461 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27462 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27463 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27464 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27465 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27466 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27467 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27468 cdb[10] = cdxa->cdxa_format; 27469 } 27470 com->uscsi_cdb = cdb; 27471 com->uscsi_cdblen = CDB_GROUP5; 27472 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27473 com->uscsi_buflen = buflen; 27474 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27475 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27476 SD_PATH_STANDARD); 27477 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27478 kmem_free(com, sizeof (*com)); 27479 return (rval); 27480 } 27481 27482 27483 /* 27484 * Function: sr_eject() 27485 * 27486 * Description: This routine is the driver entry point for handling CD-ROM 27487 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27488 * 27489 * Arguments: dev - the device 'dev_t' 27490 * 27491 * Return Code: the code returned by sd_send_scsi_cmd() 27492 */ 27493 27494 static int 27495 sr_eject(dev_t dev) 27496 { 27497 struct sd_lun *un; 27498 int rval; 27499 sd_ssc_t *ssc; 27500 27501 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27502 (un->un_state == SD_STATE_OFFLINE)) { 27503 return (ENXIO); 27504 } 27505 27506 /* 27507 * To prevent race conditions with the eject 27508 * command, keep track of an eject command as 27509 * it progresses. If we are already handling 27510 * an eject command in the driver for the given 27511 * unit and another request to eject is received 27512 * immediately return EAGAIN so we don't lose 27513 * the command if the current eject command fails. 27514 */ 27515 mutex_enter(SD_MUTEX(un)); 27516 if (un->un_f_ejecting == TRUE) { 27517 mutex_exit(SD_MUTEX(un)); 27518 return (EAGAIN); 27519 } 27520 un->un_f_ejecting = TRUE; 27521 mutex_exit(SD_MUTEX(un)); 27522 27523 ssc = sd_ssc_init(un); 27524 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27525 SD_PATH_STANDARD); 27526 sd_ssc_fini(ssc); 27527 27528 if (rval != 0) { 27529 mutex_enter(SD_MUTEX(un)); 27530 un->un_f_ejecting = FALSE; 27531 mutex_exit(SD_MUTEX(un)); 27532 return (rval); 27533 } 27534 27535 ssc = sd_ssc_init(un); 27536 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27537 SD_PATH_STANDARD); 27538 sd_ssc_fini(ssc); 27539 27540 if (rval == 0) { 27541 mutex_enter(SD_MUTEX(un)); 27542 sr_ejected(un); 27543 un->un_mediastate = DKIO_EJECTED; 27544 un->un_f_ejecting = FALSE; 27545 cv_broadcast(&un->un_state_cv); 27546 mutex_exit(SD_MUTEX(un)); 27547 } else { 27548 mutex_enter(SD_MUTEX(un)); 27549 un->un_f_ejecting = FALSE; 27550 mutex_exit(SD_MUTEX(un)); 27551 } 27552 return (rval); 27553 } 27554 27555 27556 /* 27557 * Function: sr_ejected() 27558 * 27559 * Description: This routine updates the soft state structure to invalidate the 27560 * geometry information after the media has been ejected or a 27561 * media eject has been detected. 27562 * 27563 * Arguments: un - driver soft state (unit) structure 27564 */ 27565 27566 static void 27567 sr_ejected(struct sd_lun *un) 27568 { 27569 struct sd_errstats *stp; 27570 27571 ASSERT(un != NULL); 27572 ASSERT(mutex_owned(SD_MUTEX(un))); 27573 27574 un->un_f_blockcount_is_valid = FALSE; 27575 un->un_f_tgt_blocksize_is_valid = FALSE; 27576 mutex_exit(SD_MUTEX(un)); 27577 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27578 mutex_enter(SD_MUTEX(un)); 27579 27580 if (un->un_errstats != NULL) { 27581 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27582 stp->sd_capacity.value.ui64 = 0; 27583 } 27584 } 27585 27586 27587 /* 27588 * Function: sr_check_wp() 27589 * 27590 * Description: This routine checks the write protection of a removable 27591 * media disk and hotpluggable devices via the write protect bit of 27592 * the Mode Page Header device specific field. Some devices choke 27593 * on unsupported mode page. In order to workaround this issue, 27594 * this routine has been implemented to use 0x3f mode page(request 27595 * for all pages) for all device types. 27596 * 27597 * Arguments: dev - the device 'dev_t' 27598 * 27599 * Return Code: int indicating if the device is write protected (1) or not (0) 27600 * 27601 * Context: Kernel thread. 27602 * 27603 */ 27604 27605 static int 27606 sr_check_wp(dev_t dev) 27607 { 27608 struct sd_lun *un; 27609 uchar_t device_specific; 27610 uchar_t *sense; 27611 int hdrlen; 27612 int rval = FALSE; 27613 int status; 27614 sd_ssc_t *ssc; 27615 27616 /* 27617 * Note: The return codes for this routine should be reworked to 27618 * properly handle the case of a NULL softstate. 27619 */ 27620 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27621 return (FALSE); 27622 } 27623 27624 if (un->un_f_cfg_is_atapi == TRUE) { 27625 /* 27626 * The mode page contents are not required; set the allocation 27627 * length for the mode page header only 27628 */ 27629 hdrlen = MODE_HEADER_LENGTH_GRP2; 27630 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27631 ssc = sd_ssc_init(un); 27632 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27633 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27634 sd_ssc_fini(ssc); 27635 if (status != 0) 27636 goto err_exit; 27637 device_specific = 27638 ((struct mode_header_grp2 *)sense)->device_specific; 27639 } else { 27640 hdrlen = MODE_HEADER_LENGTH; 27641 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27642 ssc = sd_ssc_init(un); 27643 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27644 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27645 sd_ssc_fini(ssc); 27646 if (status != 0) 27647 goto err_exit; 27648 device_specific = 27649 ((struct mode_header *)sense)->device_specific; 27650 } 27651 27652 27653 /* 27654 * Write protect mode sense failed; not all disks 27655 * understand this query. Return FALSE assuming that 27656 * these devices are not writable. 27657 */ 27658 if (device_specific & WRITE_PROTECT) { 27659 rval = TRUE; 27660 } 27661 27662 err_exit: 27663 kmem_free(sense, hdrlen); 27664 return (rval); 27665 } 27666 27667 /* 27668 * Function: sr_volume_ctrl() 27669 * 27670 * Description: This routine is the driver entry point for handling CD-ROM 27671 * audio output volume ioctl requests. (CDROMVOLCTRL) 27672 * 27673 * Arguments: dev - the device 'dev_t' 27674 * data - pointer to user audio volume control structure 27675 * flag - this argument is a pass through to ddi_copyxxx() 27676 * directly from the mode argument of ioctl(). 27677 * 27678 * Return Code: the code returned by sd_send_scsi_cmd() 27679 * EFAULT if ddi_copyxxx() fails 27680 * ENXIO if fail ddi_get_soft_state 27681 * EINVAL if data pointer is NULL 27682 * 27683 */ 27684 27685 static int 27686 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27687 { 27688 struct sd_lun *un; 27689 struct cdrom_volctrl volume; 27690 struct cdrom_volctrl *vol = &volume; 27691 uchar_t *sense_page; 27692 uchar_t *select_page; 27693 uchar_t *sense; 27694 uchar_t *select; 27695 int sense_buflen; 27696 int select_buflen; 27697 int rval; 27698 sd_ssc_t *ssc; 27699 27700 if (data == NULL) { 27701 return (EINVAL); 27702 } 27703 27704 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27705 (un->un_state == SD_STATE_OFFLINE)) { 27706 return (ENXIO); 27707 } 27708 27709 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27710 return (EFAULT); 27711 } 27712 27713 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27714 struct mode_header_grp2 *sense_mhp; 27715 struct mode_header_grp2 *select_mhp; 27716 int bd_len; 27717 27718 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27719 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27720 MODEPAGE_AUDIO_CTRL_LEN; 27721 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27722 select = kmem_zalloc(select_buflen, KM_SLEEP); 27723 ssc = sd_ssc_init(un); 27724 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27725 sense_buflen, MODEPAGE_AUDIO_CTRL, 27726 SD_PATH_STANDARD); 27727 sd_ssc_fini(ssc); 27728 27729 if (rval != 0) { 27730 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27731 "sr_volume_ctrl: Mode Sense Failed\n"); 27732 kmem_free(sense, sense_buflen); 27733 kmem_free(select, select_buflen); 27734 return (rval); 27735 } 27736 sense_mhp = (struct mode_header_grp2 *)sense; 27737 select_mhp = (struct mode_header_grp2 *)select; 27738 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27739 sense_mhp->bdesc_length_lo; 27740 if (bd_len > MODE_BLK_DESC_LENGTH) { 27741 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27742 "sr_volume_ctrl: Mode Sense returned invalid " 27743 "block descriptor length\n"); 27744 kmem_free(sense, sense_buflen); 27745 kmem_free(select, select_buflen); 27746 return (EIO); 27747 } 27748 sense_page = (uchar_t *) 27749 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27750 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27751 select_mhp->length_msb = 0; 27752 select_mhp->length_lsb = 0; 27753 select_mhp->bdesc_length_hi = 0; 27754 select_mhp->bdesc_length_lo = 0; 27755 } else { 27756 struct mode_header *sense_mhp, *select_mhp; 27757 27758 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27759 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27760 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27761 select = kmem_zalloc(select_buflen, KM_SLEEP); 27762 ssc = sd_ssc_init(un); 27763 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27764 sense_buflen, MODEPAGE_AUDIO_CTRL, 27765 SD_PATH_STANDARD); 27766 sd_ssc_fini(ssc); 27767 27768 if (rval != 0) { 27769 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27770 "sr_volume_ctrl: Mode Sense Failed\n"); 27771 kmem_free(sense, sense_buflen); 27772 kmem_free(select, select_buflen); 27773 return (rval); 27774 } 27775 sense_mhp = (struct mode_header *)sense; 27776 select_mhp = (struct mode_header *)select; 27777 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27778 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27779 "sr_volume_ctrl: Mode Sense returned invalid " 27780 "block descriptor length\n"); 27781 kmem_free(sense, sense_buflen); 27782 kmem_free(select, select_buflen); 27783 return (EIO); 27784 } 27785 sense_page = (uchar_t *) 27786 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27787 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27788 select_mhp->length = 0; 27789 select_mhp->bdesc_length = 0; 27790 } 27791 /* 27792 * Note: An audio control data structure could be created and overlayed 27793 * on the following in place of the array indexing method implemented. 27794 */ 27795 27796 /* Build the select data for the user volume data */ 27797 select_page[0] = MODEPAGE_AUDIO_CTRL; 27798 select_page[1] = 0xE; 27799 /* Set the immediate bit */ 27800 select_page[2] = 0x04; 27801 /* Zero out reserved fields */ 27802 select_page[3] = 0x00; 27803 select_page[4] = 0x00; 27804 /* Return sense data for fields not to be modified */ 27805 select_page[5] = sense_page[5]; 27806 select_page[6] = sense_page[6]; 27807 select_page[7] = sense_page[7]; 27808 /* Set the user specified volume levels for channel 0 and 1 */ 27809 select_page[8] = 0x01; 27810 select_page[9] = vol->channel0; 27811 select_page[10] = 0x02; 27812 select_page[11] = vol->channel1; 27813 /* Channel 2 and 3 are currently unsupported so return the sense data */ 27814 select_page[12] = sense_page[12]; 27815 select_page[13] = sense_page[13]; 27816 select_page[14] = sense_page[14]; 27817 select_page[15] = sense_page[15]; 27818 27819 ssc = sd_ssc_init(un); 27820 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27821 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 27822 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27823 } else { 27824 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27825 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27826 } 27827 sd_ssc_fini(ssc); 27828 27829 kmem_free(sense, sense_buflen); 27830 kmem_free(select, select_buflen); 27831 return (rval); 27832 } 27833 27834 27835 /* 27836 * Function: sr_read_sony_session_offset() 27837 * 27838 * Description: This routine is the driver entry point for handling CD-ROM 27839 * ioctl requests for session offset information. (CDROMREADOFFSET) 27840 * The address of the first track in the last session of a 27841 * multi-session CD-ROM is returned 27842 * 27843 * Note: This routine uses a vendor specific key value in the 27844 * command control field without implementing any vendor check here 27845 * or in the ioctl routine. 27846 * 27847 * Arguments: dev - the device 'dev_t' 27848 * data - pointer to an int to hold the requested address 27849 * flag - this argument is a pass through to ddi_copyxxx() 27850 * directly from the mode argument of ioctl(). 27851 * 27852 * Return Code: the code returned by sd_send_scsi_cmd() 27853 * EFAULT if ddi_copyxxx() fails 27854 * ENXIO if fail ddi_get_soft_state 27855 * EINVAL if data pointer is NULL 27856 */ 27857 27858 static int 27859 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 27860 { 27861 struct sd_lun *un; 27862 struct uscsi_cmd *com; 27863 caddr_t buffer; 27864 char cdb[CDB_GROUP1]; 27865 int session_offset = 0; 27866 int rval; 27867 27868 if (data == NULL) { 27869 return (EINVAL); 27870 } 27871 27872 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27873 (un->un_state == SD_STATE_OFFLINE)) { 27874 return (ENXIO); 27875 } 27876 27877 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 27878 bzero(cdb, CDB_GROUP1); 27879 cdb[0] = SCMD_READ_TOC; 27880 /* 27881 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27882 * (4 byte TOC response header + 8 byte response data) 27883 */ 27884 cdb[8] = SONY_SESSION_OFFSET_LEN; 27885 /* Byte 9 is the control byte. A vendor specific value is used */ 27886 cdb[9] = SONY_SESSION_OFFSET_KEY; 27887 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27888 com->uscsi_cdb = cdb; 27889 com->uscsi_cdblen = CDB_GROUP1; 27890 com->uscsi_bufaddr = buffer; 27891 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 27892 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27893 27894 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27895 SD_PATH_STANDARD); 27896 if (rval != 0) { 27897 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27898 kmem_free(com, sizeof (*com)); 27899 return (rval); 27900 } 27901 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 27902 session_offset = 27903 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27904 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27905 /* 27906 * Offset returned offset in current lbasize block's. Convert to 27907 * 2k block's to return to the user 27908 */ 27909 if (un->un_tgt_blocksize == CDROM_BLK_512) { 27910 session_offset >>= 2; 27911 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 27912 session_offset >>= 1; 27913 } 27914 } 27915 27916 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 27917 rval = EFAULT; 27918 } 27919 27920 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27921 kmem_free(com, sizeof (*com)); 27922 return (rval); 27923 } 27924 27925 27926 /* 27927 * Function: sd_wm_cache_constructor() 27928 * 27929 * Description: Cache Constructor for the wmap cache for the read/modify/write 27930 * devices. 27931 * 27932 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27933 * un - sd_lun structure for the device. 27934 * flag - the km flags passed to constructor 27935 * 27936 * Return Code: 0 on success. 27937 * -1 on failure. 27938 */ 27939 27940 /*ARGSUSED*/ 27941 static int 27942 sd_wm_cache_constructor(void *wm, void *un, int flags) 27943 { 27944 bzero(wm, sizeof (struct sd_w_map)); 27945 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 27946 return (0); 27947 } 27948 27949 27950 /* 27951 * Function: sd_wm_cache_destructor() 27952 * 27953 * Description: Cache destructor for the wmap cache for the read/modify/write 27954 * devices. 27955 * 27956 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27957 * un - sd_lun structure for the device. 27958 */ 27959 /*ARGSUSED*/ 27960 static void 27961 sd_wm_cache_destructor(void *wm, void *un) 27962 { 27963 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 27964 } 27965 27966 27967 /* 27968 * Function: sd_range_lock() 27969 * 27970 * Description: Lock the range of blocks specified as parameter to ensure 27971 * that read, modify write is atomic and no other i/o writes 27972 * to the same location. The range is specified in terms 27973 * of start and end blocks. Block numbers are the actual 27974 * media block numbers and not system. 27975 * 27976 * Arguments: un - sd_lun structure for the device. 27977 * startb - The starting block number 27978 * endb - The end block number 27979 * typ - type of i/o - simple/read_modify_write 27980 * 27981 * Return Code: wm - pointer to the wmap structure. 27982 * 27983 * Context: This routine can sleep. 27984 */ 27985 27986 static struct sd_w_map * 27987 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 27988 { 27989 struct sd_w_map *wmp = NULL; 27990 struct sd_w_map *sl_wmp = NULL; 27991 struct sd_w_map *tmp_wmp; 27992 wm_state state = SD_WM_CHK_LIST; 27993 27994 27995 ASSERT(un != NULL); 27996 ASSERT(!mutex_owned(SD_MUTEX(un))); 27997 27998 mutex_enter(SD_MUTEX(un)); 27999 28000 while (state != SD_WM_DONE) { 28001 28002 switch (state) { 28003 case SD_WM_CHK_LIST: 28004 /* 28005 * This is the starting state. Check the wmap list 28006 * to see if the range is currently available. 28007 */ 28008 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28009 /* 28010 * If this is a simple write and no rmw 28011 * i/o is pending then try to lock the 28012 * range as the range should be available. 28013 */ 28014 state = SD_WM_LOCK_RANGE; 28015 } else { 28016 tmp_wmp = sd_get_range(un, startb, endb); 28017 if (tmp_wmp != NULL) { 28018 if ((wmp != NULL) && ONLIST(un, wmp)) { 28019 /* 28020 * Should not keep onlist wmps 28021 * while waiting this macro 28022 * will also do wmp = NULL; 28023 */ 28024 FREE_ONLIST_WMAP(un, wmp); 28025 } 28026 /* 28027 * sl_wmp is the wmap on which wait 28028 * is done, since the tmp_wmp points 28029 * to the inuse wmap, set sl_wmp to 28030 * tmp_wmp and change the state to sleep 28031 */ 28032 sl_wmp = tmp_wmp; 28033 state = SD_WM_WAIT_MAP; 28034 } else { 28035 state = SD_WM_LOCK_RANGE; 28036 } 28037 28038 } 28039 break; 28040 28041 case SD_WM_LOCK_RANGE: 28042 ASSERT(un->un_wm_cache); 28043 /* 28044 * The range need to be locked, try to get a wmap. 28045 * First attempt it with NO_SLEEP, want to avoid a sleep 28046 * if possible as we will have to release the sd mutex 28047 * if we have to sleep. 28048 */ 28049 if (wmp == NULL) 28050 wmp = kmem_cache_alloc(un->un_wm_cache, 28051 KM_NOSLEEP); 28052 if (wmp == NULL) { 28053 mutex_exit(SD_MUTEX(un)); 28054 _NOTE(DATA_READABLE_WITHOUT_LOCK 28055 (sd_lun::un_wm_cache)) 28056 wmp = kmem_cache_alloc(un->un_wm_cache, 28057 KM_SLEEP); 28058 mutex_enter(SD_MUTEX(un)); 28059 /* 28060 * we released the mutex so recheck and go to 28061 * check list state. 28062 */ 28063 state = SD_WM_CHK_LIST; 28064 } else { 28065 /* 28066 * We exit out of state machine since we 28067 * have the wmap. Do the housekeeping first. 28068 * place the wmap on the wmap list if it is not 28069 * on it already and then set the state to done. 28070 */ 28071 wmp->wm_start = startb; 28072 wmp->wm_end = endb; 28073 wmp->wm_flags = typ | SD_WM_BUSY; 28074 if (typ & SD_WTYPE_RMW) { 28075 un->un_rmw_count++; 28076 } 28077 /* 28078 * If not already on the list then link 28079 */ 28080 if (!ONLIST(un, wmp)) { 28081 wmp->wm_next = un->un_wm; 28082 wmp->wm_prev = NULL; 28083 if (wmp->wm_next) 28084 wmp->wm_next->wm_prev = wmp; 28085 un->un_wm = wmp; 28086 } 28087 state = SD_WM_DONE; 28088 } 28089 break; 28090 28091 case SD_WM_WAIT_MAP: 28092 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28093 /* 28094 * Wait is done on sl_wmp, which is set in the 28095 * check_list state. 28096 */ 28097 sl_wmp->wm_wanted_count++; 28098 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28099 sl_wmp->wm_wanted_count--; 28100 /* 28101 * We can reuse the memory from the completed sl_wmp 28102 * lock range for our new lock, but only if noone is 28103 * waiting for it. 28104 */ 28105 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28106 if (sl_wmp->wm_wanted_count == 0) { 28107 if (wmp != NULL) 28108 CHK_N_FREEWMP(un, wmp); 28109 wmp = sl_wmp; 28110 } 28111 sl_wmp = NULL; 28112 /* 28113 * After waking up, need to recheck for availability of 28114 * range. 28115 */ 28116 state = SD_WM_CHK_LIST; 28117 break; 28118 28119 default: 28120 panic("sd_range_lock: " 28121 "Unknown state %d in sd_range_lock", state); 28122 /*NOTREACHED*/ 28123 } /* switch(state) */ 28124 28125 } /* while(state != SD_WM_DONE) */ 28126 28127 mutex_exit(SD_MUTEX(un)); 28128 28129 ASSERT(wmp != NULL); 28130 28131 return (wmp); 28132 } 28133 28134 28135 /* 28136 * Function: sd_get_range() 28137 * 28138 * Description: Find if there any overlapping I/O to this one 28139 * Returns the write-map of 1st such I/O, NULL otherwise. 28140 * 28141 * Arguments: un - sd_lun structure for the device. 28142 * startb - The starting block number 28143 * endb - The end block number 28144 * 28145 * Return Code: wm - pointer to the wmap structure. 28146 */ 28147 28148 static struct sd_w_map * 28149 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28150 { 28151 struct sd_w_map *wmp; 28152 28153 ASSERT(un != NULL); 28154 28155 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28156 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28157 continue; 28158 } 28159 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28160 break; 28161 } 28162 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28163 break; 28164 } 28165 } 28166 28167 return (wmp); 28168 } 28169 28170 28171 /* 28172 * Function: sd_free_inlist_wmap() 28173 * 28174 * Description: Unlink and free a write map struct. 28175 * 28176 * Arguments: un - sd_lun structure for the device. 28177 * wmp - sd_w_map which needs to be unlinked. 28178 */ 28179 28180 static void 28181 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28182 { 28183 ASSERT(un != NULL); 28184 28185 if (un->un_wm == wmp) { 28186 un->un_wm = wmp->wm_next; 28187 } else { 28188 wmp->wm_prev->wm_next = wmp->wm_next; 28189 } 28190 28191 if (wmp->wm_next) { 28192 wmp->wm_next->wm_prev = wmp->wm_prev; 28193 } 28194 28195 wmp->wm_next = wmp->wm_prev = NULL; 28196 28197 kmem_cache_free(un->un_wm_cache, wmp); 28198 } 28199 28200 28201 /* 28202 * Function: sd_range_unlock() 28203 * 28204 * Description: Unlock the range locked by wm. 28205 * Free write map if nobody else is waiting on it. 28206 * 28207 * Arguments: un - sd_lun structure for the device. 28208 * wmp - sd_w_map which needs to be unlinked. 28209 */ 28210 28211 static void 28212 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28213 { 28214 ASSERT(un != NULL); 28215 ASSERT(wm != NULL); 28216 ASSERT(!mutex_owned(SD_MUTEX(un))); 28217 28218 mutex_enter(SD_MUTEX(un)); 28219 28220 if (wm->wm_flags & SD_WTYPE_RMW) { 28221 un->un_rmw_count--; 28222 } 28223 28224 if (wm->wm_wanted_count) { 28225 wm->wm_flags = 0; 28226 /* 28227 * Broadcast that the wmap is available now. 28228 */ 28229 cv_broadcast(&wm->wm_avail); 28230 } else { 28231 /* 28232 * If no one is waiting on the map, it should be free'ed. 28233 */ 28234 sd_free_inlist_wmap(un, wm); 28235 } 28236 28237 mutex_exit(SD_MUTEX(un)); 28238 } 28239 28240 28241 /* 28242 * Function: sd_read_modify_write_task 28243 * 28244 * Description: Called from a taskq thread to initiate the write phase of 28245 * a read-modify-write request. This is used for targets where 28246 * un->un_sys_blocksize != un->un_tgt_blocksize. 28247 * 28248 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28249 * 28250 * Context: Called under taskq thread context. 28251 */ 28252 28253 static void 28254 sd_read_modify_write_task(void *arg) 28255 { 28256 struct sd_mapblocksize_info *bsp; 28257 struct buf *bp; 28258 struct sd_xbuf *xp; 28259 struct sd_lun *un; 28260 28261 bp = arg; /* The bp is given in arg */ 28262 ASSERT(bp != NULL); 28263 28264 /* Get the pointer to the layer-private data struct */ 28265 xp = SD_GET_XBUF(bp); 28266 ASSERT(xp != NULL); 28267 bsp = xp->xb_private; 28268 ASSERT(bsp != NULL); 28269 28270 un = SD_GET_UN(bp); 28271 ASSERT(un != NULL); 28272 ASSERT(!mutex_owned(SD_MUTEX(un))); 28273 28274 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28275 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28276 28277 /* 28278 * This is the write phase of a read-modify-write request, called 28279 * under the context of a taskq thread in response to the completion 28280 * of the read portion of the rmw request completing under interrupt 28281 * context. The write request must be sent from here down the iostart 28282 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28283 * we use the layer index saved in the layer-private data area. 28284 */ 28285 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28286 28287 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28288 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28289 } 28290 28291 28292 /* 28293 * Function: sddump_do_read_of_rmw() 28294 * 28295 * Description: This routine will be called from sddump, If sddump is called 28296 * with an I/O which not aligned on device blocksize boundary 28297 * then the write has to be converted to read-modify-write. 28298 * Do the read part here in order to keep sddump simple. 28299 * Note - That the sd_mutex is held across the call to this 28300 * routine. 28301 * 28302 * Arguments: un - sd_lun 28303 * blkno - block number in terms of media block size. 28304 * nblk - number of blocks. 28305 * bpp - pointer to pointer to the buf structure. On return 28306 * from this function, *bpp points to the valid buffer 28307 * to which the write has to be done. 28308 * 28309 * Return Code: 0 for success or errno-type return code 28310 */ 28311 28312 static int 28313 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28314 struct buf **bpp) 28315 { 28316 int err; 28317 int i; 28318 int rval; 28319 struct buf *bp; 28320 struct scsi_pkt *pkt = NULL; 28321 uint32_t target_blocksize; 28322 28323 ASSERT(un != NULL); 28324 ASSERT(mutex_owned(SD_MUTEX(un))); 28325 28326 target_blocksize = un->un_tgt_blocksize; 28327 28328 mutex_exit(SD_MUTEX(un)); 28329 28330 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28331 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28332 if (bp == NULL) { 28333 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28334 "no resources for dumping; giving up"); 28335 err = ENOMEM; 28336 goto done; 28337 } 28338 28339 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28340 blkno, nblk); 28341 if (rval != 0) { 28342 scsi_free_consistent_buf(bp); 28343 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28344 "no resources for dumping; giving up"); 28345 err = ENOMEM; 28346 goto done; 28347 } 28348 28349 pkt->pkt_flags |= FLAG_NOINTR; 28350 28351 err = EIO; 28352 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28353 28354 /* 28355 * Scsi_poll returns 0 (success) if the command completes and 28356 * the status block is STATUS_GOOD. We should only check 28357 * errors if this condition is not true. Even then we should 28358 * send our own request sense packet only if we have a check 28359 * condition and auto request sense has not been performed by 28360 * the hba. 28361 */ 28362 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28363 28364 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28365 err = 0; 28366 break; 28367 } 28368 28369 /* 28370 * Check CMD_DEV_GONE 1st, give up if device is gone, 28371 * no need to read RQS data. 28372 */ 28373 if (pkt->pkt_reason == CMD_DEV_GONE) { 28374 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28375 "Error while dumping state with rmw..." 28376 "Device is gone\n"); 28377 break; 28378 } 28379 28380 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28381 SD_INFO(SD_LOG_DUMP, un, 28382 "sddump: read failed with CHECK, try # %d\n", i); 28383 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28384 (void) sd_send_polled_RQS(un); 28385 } 28386 28387 continue; 28388 } 28389 28390 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28391 int reset_retval = 0; 28392 28393 SD_INFO(SD_LOG_DUMP, un, 28394 "sddump: read failed with BUSY, try # %d\n", i); 28395 28396 if (un->un_f_lun_reset_enabled == TRUE) { 28397 reset_retval = scsi_reset(SD_ADDRESS(un), 28398 RESET_LUN); 28399 } 28400 if (reset_retval == 0) { 28401 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28402 } 28403 (void) sd_send_polled_RQS(un); 28404 28405 } else { 28406 SD_INFO(SD_LOG_DUMP, un, 28407 "sddump: read failed with 0x%x, try # %d\n", 28408 SD_GET_PKT_STATUS(pkt), i); 28409 mutex_enter(SD_MUTEX(un)); 28410 sd_reset_target(un, pkt); 28411 mutex_exit(SD_MUTEX(un)); 28412 } 28413 28414 /* 28415 * If we are not getting anywhere with lun/target resets, 28416 * let's reset the bus. 28417 */ 28418 if (i > SD_NDUMP_RETRIES/2) { 28419 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28420 (void) sd_send_polled_RQS(un); 28421 } 28422 28423 } 28424 scsi_destroy_pkt(pkt); 28425 28426 if (err != 0) { 28427 scsi_free_consistent_buf(bp); 28428 *bpp = NULL; 28429 } else { 28430 *bpp = bp; 28431 } 28432 28433 done: 28434 mutex_enter(SD_MUTEX(un)); 28435 return (err); 28436 } 28437 28438 28439 /* 28440 * Function: sd_failfast_flushq 28441 * 28442 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28443 * in b_flags and move them onto the failfast queue, then kick 28444 * off a thread to return all bp's on the failfast queue to 28445 * their owners with an error set. 28446 * 28447 * Arguments: un - pointer to the soft state struct for the instance. 28448 * 28449 * Context: may execute in interrupt context. 28450 */ 28451 28452 static void 28453 sd_failfast_flushq(struct sd_lun *un) 28454 { 28455 struct buf *bp; 28456 struct buf *next_waitq_bp; 28457 struct buf *prev_waitq_bp = NULL; 28458 28459 ASSERT(un != NULL); 28460 ASSERT(mutex_owned(SD_MUTEX(un))); 28461 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28462 ASSERT(un->un_failfast_bp == NULL); 28463 28464 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28465 "sd_failfast_flushq: entry: un:0x%p\n", un); 28466 28467 /* 28468 * Check if we should flush all bufs when entering failfast state, or 28469 * just those with B_FAILFAST set. 28470 */ 28471 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28472 /* 28473 * Move *all* bp's on the wait queue to the failfast flush 28474 * queue, including those that do NOT have B_FAILFAST set. 28475 */ 28476 if (un->un_failfast_headp == NULL) { 28477 ASSERT(un->un_failfast_tailp == NULL); 28478 un->un_failfast_headp = un->un_waitq_headp; 28479 } else { 28480 ASSERT(un->un_failfast_tailp != NULL); 28481 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28482 } 28483 28484 un->un_failfast_tailp = un->un_waitq_tailp; 28485 28486 /* update kstat for each bp moved out of the waitq */ 28487 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28488 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28489 } 28490 28491 /* empty the waitq */ 28492 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28493 28494 } else { 28495 /* 28496 * Go thru the wait queue, pick off all entries with 28497 * B_FAILFAST set, and move these onto the failfast queue. 28498 */ 28499 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28500 /* 28501 * Save the pointer to the next bp on the wait queue, 28502 * so we get to it on the next iteration of this loop. 28503 */ 28504 next_waitq_bp = bp->av_forw; 28505 28506 /* 28507 * If this bp from the wait queue does NOT have 28508 * B_FAILFAST set, just move on to the next element 28509 * in the wait queue. Note, this is the only place 28510 * where it is correct to set prev_waitq_bp. 28511 */ 28512 if ((bp->b_flags & B_FAILFAST) == 0) { 28513 prev_waitq_bp = bp; 28514 continue; 28515 } 28516 28517 /* 28518 * Remove the bp from the wait queue. 28519 */ 28520 if (bp == un->un_waitq_headp) { 28521 /* The bp is the first element of the waitq. */ 28522 un->un_waitq_headp = next_waitq_bp; 28523 if (un->un_waitq_headp == NULL) { 28524 /* The wait queue is now empty */ 28525 un->un_waitq_tailp = NULL; 28526 } 28527 } else { 28528 /* 28529 * The bp is either somewhere in the middle 28530 * or at the end of the wait queue. 28531 */ 28532 ASSERT(un->un_waitq_headp != NULL); 28533 ASSERT(prev_waitq_bp != NULL); 28534 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28535 == 0); 28536 if (bp == un->un_waitq_tailp) { 28537 /* bp is the last entry on the waitq. */ 28538 ASSERT(next_waitq_bp == NULL); 28539 un->un_waitq_tailp = prev_waitq_bp; 28540 } 28541 prev_waitq_bp->av_forw = next_waitq_bp; 28542 } 28543 bp->av_forw = NULL; 28544 28545 /* 28546 * update kstat since the bp is moved out of 28547 * the waitq 28548 */ 28549 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28550 28551 /* 28552 * Now put the bp onto the failfast queue. 28553 */ 28554 if (un->un_failfast_headp == NULL) { 28555 /* failfast queue is currently empty */ 28556 ASSERT(un->un_failfast_tailp == NULL); 28557 un->un_failfast_headp = 28558 un->un_failfast_tailp = bp; 28559 } else { 28560 /* Add the bp to the end of the failfast q */ 28561 ASSERT(un->un_failfast_tailp != NULL); 28562 ASSERT(un->un_failfast_tailp->b_flags & 28563 B_FAILFAST); 28564 un->un_failfast_tailp->av_forw = bp; 28565 un->un_failfast_tailp = bp; 28566 } 28567 } 28568 } 28569 28570 /* 28571 * Now return all bp's on the failfast queue to their owners. 28572 */ 28573 while ((bp = un->un_failfast_headp) != NULL) { 28574 28575 un->un_failfast_headp = bp->av_forw; 28576 if (un->un_failfast_headp == NULL) { 28577 un->un_failfast_tailp = NULL; 28578 } 28579 28580 /* 28581 * We want to return the bp with a failure error code, but 28582 * we do not want a call to sd_start_cmds() to occur here, 28583 * so use sd_return_failed_command_no_restart() instead of 28584 * sd_return_failed_command(). 28585 */ 28586 sd_return_failed_command_no_restart(un, bp, EIO); 28587 } 28588 28589 /* Flush the xbuf queues if required. */ 28590 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28591 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28592 } 28593 28594 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28595 "sd_failfast_flushq: exit: un:0x%p\n", un); 28596 } 28597 28598 28599 /* 28600 * Function: sd_failfast_flushq_callback 28601 * 28602 * Description: Return TRUE if the given bp meets the criteria for failfast 28603 * flushing. Used with ddi_xbuf_flushq(9F). 28604 * 28605 * Arguments: bp - ptr to buf struct to be examined. 28606 * 28607 * Context: Any 28608 */ 28609 28610 static int 28611 sd_failfast_flushq_callback(struct buf *bp) 28612 { 28613 /* 28614 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28615 * state is entered; OR (2) the given bp has B_FAILFAST set. 28616 */ 28617 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28618 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28619 } 28620 28621 28622 28623 /* 28624 * Function: sd_setup_next_xfer 28625 * 28626 * Description: Prepare next I/O operation using DMA_PARTIAL 28627 * 28628 */ 28629 28630 static int 28631 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28632 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28633 { 28634 ssize_t num_blks_not_xfered; 28635 daddr_t strt_blk_num; 28636 ssize_t bytes_not_xfered; 28637 int rval; 28638 28639 ASSERT(pkt->pkt_resid == 0); 28640 28641 /* 28642 * Calculate next block number and amount to be transferred. 28643 * 28644 * How much data NOT transfered to the HBA yet. 28645 */ 28646 bytes_not_xfered = xp->xb_dma_resid; 28647 28648 /* 28649 * figure how many blocks NOT transfered to the HBA yet. 28650 */ 28651 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28652 28653 /* 28654 * set starting block number to the end of what WAS transfered. 28655 */ 28656 strt_blk_num = xp->xb_blkno + 28657 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28658 28659 /* 28660 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28661 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28662 * the disk mutex here. 28663 */ 28664 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28665 strt_blk_num, num_blks_not_xfered); 28666 28667 if (rval == 0) { 28668 28669 /* 28670 * Success. 28671 * 28672 * Adjust things if there are still more blocks to be 28673 * transfered. 28674 */ 28675 xp->xb_dma_resid = pkt->pkt_resid; 28676 pkt->pkt_resid = 0; 28677 28678 return (1); 28679 } 28680 28681 /* 28682 * There's really only one possible return value from 28683 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28684 * returns NULL. 28685 */ 28686 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28687 28688 bp->b_resid = bp->b_bcount; 28689 bp->b_flags |= B_ERROR; 28690 28691 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28692 "Error setting up next portion of DMA transfer\n"); 28693 28694 return (0); 28695 } 28696 28697 /* 28698 * Function: sd_panic_for_res_conflict 28699 * 28700 * Description: Call panic with a string formatted with "Reservation Conflict" 28701 * and a human readable identifier indicating the SD instance 28702 * that experienced the reservation conflict. 28703 * 28704 * Arguments: un - pointer to the soft state struct for the instance. 28705 * 28706 * Context: may execute in interrupt context. 28707 */ 28708 28709 #define SD_RESV_CONFLICT_FMT_LEN 40 28710 void 28711 sd_panic_for_res_conflict(struct sd_lun *un) 28712 { 28713 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28714 char path_str[MAXPATHLEN]; 28715 28716 (void) snprintf(panic_str, sizeof (panic_str), 28717 "Reservation Conflict\nDisk: %s", 28718 ddi_pathname(SD_DEVINFO(un), path_str)); 28719 28720 panic(panic_str); 28721 } 28722 28723 /* 28724 * Note: The following sd_faultinjection_ioctl( ) routines implement 28725 * driver support for handling fault injection for error analysis 28726 * causing faults in multiple layers of the driver. 28727 * 28728 */ 28729 28730 #ifdef SD_FAULT_INJECTION 28731 static uint_t sd_fault_injection_on = 0; 28732 28733 /* 28734 * Function: sd_faultinjection_ioctl() 28735 * 28736 * Description: This routine is the driver entry point for handling 28737 * faultinjection ioctls to inject errors into the 28738 * layer model 28739 * 28740 * Arguments: cmd - the ioctl cmd received 28741 * arg - the arguments from user and returns 28742 */ 28743 28744 static void 28745 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28746 28747 uint_t i = 0; 28748 uint_t rval; 28749 28750 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28751 28752 mutex_enter(SD_MUTEX(un)); 28753 28754 switch (cmd) { 28755 case SDIOCRUN: 28756 /* Allow pushed faults to be injected */ 28757 SD_INFO(SD_LOG_SDTEST, un, 28758 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28759 28760 sd_fault_injection_on = 1; 28761 28762 SD_INFO(SD_LOG_IOERR, un, 28763 "sd_faultinjection_ioctl: run finished\n"); 28764 break; 28765 28766 case SDIOCSTART: 28767 /* Start Injection Session */ 28768 SD_INFO(SD_LOG_SDTEST, un, 28769 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28770 28771 sd_fault_injection_on = 0; 28772 un->sd_injection_mask = 0xFFFFFFFF; 28773 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28774 un->sd_fi_fifo_pkt[i] = NULL; 28775 un->sd_fi_fifo_xb[i] = NULL; 28776 un->sd_fi_fifo_un[i] = NULL; 28777 un->sd_fi_fifo_arq[i] = NULL; 28778 } 28779 un->sd_fi_fifo_start = 0; 28780 un->sd_fi_fifo_end = 0; 28781 28782 mutex_enter(&(un->un_fi_mutex)); 28783 un->sd_fi_log[0] = '\0'; 28784 un->sd_fi_buf_len = 0; 28785 mutex_exit(&(un->un_fi_mutex)); 28786 28787 SD_INFO(SD_LOG_IOERR, un, 28788 "sd_faultinjection_ioctl: start finished\n"); 28789 break; 28790 28791 case SDIOCSTOP: 28792 /* Stop Injection Session */ 28793 SD_INFO(SD_LOG_SDTEST, un, 28794 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28795 sd_fault_injection_on = 0; 28796 un->sd_injection_mask = 0x0; 28797 28798 /* Empty stray or unuseds structs from fifo */ 28799 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28800 if (un->sd_fi_fifo_pkt[i] != NULL) { 28801 kmem_free(un->sd_fi_fifo_pkt[i], 28802 sizeof (struct sd_fi_pkt)); 28803 } 28804 if (un->sd_fi_fifo_xb[i] != NULL) { 28805 kmem_free(un->sd_fi_fifo_xb[i], 28806 sizeof (struct sd_fi_xb)); 28807 } 28808 if (un->sd_fi_fifo_un[i] != NULL) { 28809 kmem_free(un->sd_fi_fifo_un[i], 28810 sizeof (struct sd_fi_un)); 28811 } 28812 if (un->sd_fi_fifo_arq[i] != NULL) { 28813 kmem_free(un->sd_fi_fifo_arq[i], 28814 sizeof (struct sd_fi_arq)); 28815 } 28816 un->sd_fi_fifo_pkt[i] = NULL; 28817 un->sd_fi_fifo_un[i] = NULL; 28818 un->sd_fi_fifo_xb[i] = NULL; 28819 un->sd_fi_fifo_arq[i] = NULL; 28820 } 28821 un->sd_fi_fifo_start = 0; 28822 un->sd_fi_fifo_end = 0; 28823 28824 SD_INFO(SD_LOG_IOERR, un, 28825 "sd_faultinjection_ioctl: stop finished\n"); 28826 break; 28827 28828 case SDIOCINSERTPKT: 28829 /* Store a packet struct to be pushed onto fifo */ 28830 SD_INFO(SD_LOG_SDTEST, un, 28831 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 28832 28833 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28834 28835 sd_fault_injection_on = 0; 28836 28837 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 28838 if (un->sd_fi_fifo_pkt[i] != NULL) { 28839 kmem_free(un->sd_fi_fifo_pkt[i], 28840 sizeof (struct sd_fi_pkt)); 28841 } 28842 if (arg != NULL) { 28843 un->sd_fi_fifo_pkt[i] = 28844 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 28845 if (un->sd_fi_fifo_pkt[i] == NULL) { 28846 /* Alloc failed don't store anything */ 28847 break; 28848 } 28849 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 28850 sizeof (struct sd_fi_pkt), 0); 28851 if (rval == -1) { 28852 kmem_free(un->sd_fi_fifo_pkt[i], 28853 sizeof (struct sd_fi_pkt)); 28854 un->sd_fi_fifo_pkt[i] = NULL; 28855 } 28856 } else { 28857 SD_INFO(SD_LOG_IOERR, un, 28858 "sd_faultinjection_ioctl: pkt null\n"); 28859 } 28860 break; 28861 28862 case SDIOCINSERTXB: 28863 /* Store a xb struct to be pushed onto fifo */ 28864 SD_INFO(SD_LOG_SDTEST, un, 28865 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 28866 28867 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28868 28869 sd_fault_injection_on = 0; 28870 28871 if (un->sd_fi_fifo_xb[i] != NULL) { 28872 kmem_free(un->sd_fi_fifo_xb[i], 28873 sizeof (struct sd_fi_xb)); 28874 un->sd_fi_fifo_xb[i] = NULL; 28875 } 28876 if (arg != NULL) { 28877 un->sd_fi_fifo_xb[i] = 28878 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 28879 if (un->sd_fi_fifo_xb[i] == NULL) { 28880 /* Alloc failed don't store anything */ 28881 break; 28882 } 28883 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 28884 sizeof (struct sd_fi_xb), 0); 28885 28886 if (rval == -1) { 28887 kmem_free(un->sd_fi_fifo_xb[i], 28888 sizeof (struct sd_fi_xb)); 28889 un->sd_fi_fifo_xb[i] = NULL; 28890 } 28891 } else { 28892 SD_INFO(SD_LOG_IOERR, un, 28893 "sd_faultinjection_ioctl: xb null\n"); 28894 } 28895 break; 28896 28897 case SDIOCINSERTUN: 28898 /* Store a un struct to be pushed onto fifo */ 28899 SD_INFO(SD_LOG_SDTEST, un, 28900 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 28901 28902 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28903 28904 sd_fault_injection_on = 0; 28905 28906 if (un->sd_fi_fifo_un[i] != NULL) { 28907 kmem_free(un->sd_fi_fifo_un[i], 28908 sizeof (struct sd_fi_un)); 28909 un->sd_fi_fifo_un[i] = NULL; 28910 } 28911 if (arg != NULL) { 28912 un->sd_fi_fifo_un[i] = 28913 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 28914 if (un->sd_fi_fifo_un[i] == NULL) { 28915 /* Alloc failed don't store anything */ 28916 break; 28917 } 28918 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 28919 sizeof (struct sd_fi_un), 0); 28920 if (rval == -1) { 28921 kmem_free(un->sd_fi_fifo_un[i], 28922 sizeof (struct sd_fi_un)); 28923 un->sd_fi_fifo_un[i] = NULL; 28924 } 28925 28926 } else { 28927 SD_INFO(SD_LOG_IOERR, un, 28928 "sd_faultinjection_ioctl: un null\n"); 28929 } 28930 28931 break; 28932 28933 case SDIOCINSERTARQ: 28934 /* Store a arq struct to be pushed onto fifo */ 28935 SD_INFO(SD_LOG_SDTEST, un, 28936 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 28937 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28938 28939 sd_fault_injection_on = 0; 28940 28941 if (un->sd_fi_fifo_arq[i] != NULL) { 28942 kmem_free(un->sd_fi_fifo_arq[i], 28943 sizeof (struct sd_fi_arq)); 28944 un->sd_fi_fifo_arq[i] = NULL; 28945 } 28946 if (arg != NULL) { 28947 un->sd_fi_fifo_arq[i] = 28948 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 28949 if (un->sd_fi_fifo_arq[i] == NULL) { 28950 /* Alloc failed don't store anything */ 28951 break; 28952 } 28953 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 28954 sizeof (struct sd_fi_arq), 0); 28955 if (rval == -1) { 28956 kmem_free(un->sd_fi_fifo_arq[i], 28957 sizeof (struct sd_fi_arq)); 28958 un->sd_fi_fifo_arq[i] = NULL; 28959 } 28960 28961 } else { 28962 SD_INFO(SD_LOG_IOERR, un, 28963 "sd_faultinjection_ioctl: arq null\n"); 28964 } 28965 28966 break; 28967 28968 case SDIOCPUSH: 28969 /* Push stored xb, pkt, un, and arq onto fifo */ 28970 sd_fault_injection_on = 0; 28971 28972 if (arg != NULL) { 28973 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 28974 if (rval != -1 && 28975 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28976 un->sd_fi_fifo_end += i; 28977 } 28978 } else { 28979 SD_INFO(SD_LOG_IOERR, un, 28980 "sd_faultinjection_ioctl: push arg null\n"); 28981 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28982 un->sd_fi_fifo_end++; 28983 } 28984 } 28985 SD_INFO(SD_LOG_IOERR, un, 28986 "sd_faultinjection_ioctl: push to end=%d\n", 28987 un->sd_fi_fifo_end); 28988 break; 28989 28990 case SDIOCRETRIEVE: 28991 /* Return buffer of log from Injection session */ 28992 SD_INFO(SD_LOG_SDTEST, un, 28993 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 28994 28995 sd_fault_injection_on = 0; 28996 28997 mutex_enter(&(un->un_fi_mutex)); 28998 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 28999 un->sd_fi_buf_len+1, 0); 29000 mutex_exit(&(un->un_fi_mutex)); 29001 29002 if (rval == -1) { 29003 /* 29004 * arg is possibly invalid setting 29005 * it to NULL for return 29006 */ 29007 arg = NULL; 29008 } 29009 break; 29010 } 29011 29012 mutex_exit(SD_MUTEX(un)); 29013 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29014 " exit\n"); 29015 } 29016 29017 29018 /* 29019 * Function: sd_injection_log() 29020 * 29021 * Description: This routine adds buff to the already existing injection log 29022 * for retrieval via faultinjection_ioctl for use in fault 29023 * detection and recovery 29024 * 29025 * Arguments: buf - the string to add to the log 29026 */ 29027 29028 static void 29029 sd_injection_log(char *buf, struct sd_lun *un) 29030 { 29031 uint_t len; 29032 29033 ASSERT(un != NULL); 29034 ASSERT(buf != NULL); 29035 29036 mutex_enter(&(un->un_fi_mutex)); 29037 29038 len = min(strlen(buf), 255); 29039 /* Add logged value to Injection log to be returned later */ 29040 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29041 uint_t offset = strlen((char *)un->sd_fi_log); 29042 char *destp = (char *)un->sd_fi_log + offset; 29043 int i; 29044 for (i = 0; i < len; i++) { 29045 *destp++ = *buf++; 29046 } 29047 un->sd_fi_buf_len += len; 29048 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29049 } 29050 29051 mutex_exit(&(un->un_fi_mutex)); 29052 } 29053 29054 29055 /* 29056 * Function: sd_faultinjection() 29057 * 29058 * Description: This routine takes the pkt and changes its 29059 * content based on error injection scenerio. 29060 * 29061 * Arguments: pktp - packet to be changed 29062 */ 29063 29064 static void 29065 sd_faultinjection(struct scsi_pkt *pktp) 29066 { 29067 uint_t i; 29068 struct sd_fi_pkt *fi_pkt; 29069 struct sd_fi_xb *fi_xb; 29070 struct sd_fi_un *fi_un; 29071 struct sd_fi_arq *fi_arq; 29072 struct buf *bp; 29073 struct sd_xbuf *xb; 29074 struct sd_lun *un; 29075 29076 ASSERT(pktp != NULL); 29077 29078 /* pull bp xb and un from pktp */ 29079 bp = (struct buf *)pktp->pkt_private; 29080 xb = SD_GET_XBUF(bp); 29081 un = SD_GET_UN(bp); 29082 29083 ASSERT(un != NULL); 29084 29085 mutex_enter(SD_MUTEX(un)); 29086 29087 SD_TRACE(SD_LOG_SDTEST, un, 29088 "sd_faultinjection: entry Injection from sdintr\n"); 29089 29090 /* if injection is off return */ 29091 if (sd_fault_injection_on == 0 || 29092 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29093 mutex_exit(SD_MUTEX(un)); 29094 return; 29095 } 29096 29097 SD_INFO(SD_LOG_SDTEST, un, 29098 "sd_faultinjection: is working for copying\n"); 29099 29100 /* take next set off fifo */ 29101 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29102 29103 fi_pkt = un->sd_fi_fifo_pkt[i]; 29104 fi_xb = un->sd_fi_fifo_xb[i]; 29105 fi_un = un->sd_fi_fifo_un[i]; 29106 fi_arq = un->sd_fi_fifo_arq[i]; 29107 29108 29109 /* set variables accordingly */ 29110 /* set pkt if it was on fifo */ 29111 if (fi_pkt != NULL) { 29112 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29113 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29114 if (fi_pkt->pkt_cdbp != 0xff) 29115 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29116 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29117 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29118 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29119 29120 } 29121 /* set xb if it was on fifo */ 29122 if (fi_xb != NULL) { 29123 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29124 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29125 if (fi_xb->xb_retry_count != 0) 29126 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29127 SD_CONDSET(xb, xb, xb_victim_retry_count, 29128 "xb_victim_retry_count"); 29129 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29130 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29131 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29132 29133 /* copy in block data from sense */ 29134 /* 29135 * if (fi_xb->xb_sense_data[0] != -1) { 29136 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29137 * SENSE_LENGTH); 29138 * } 29139 */ 29140 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29141 29142 /* copy in extended sense codes */ 29143 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29144 xb, es_code, "es_code"); 29145 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29146 xb, es_key, "es_key"); 29147 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29148 xb, es_add_code, "es_add_code"); 29149 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29150 xb, es_qual_code, "es_qual_code"); 29151 struct scsi_extended_sense *esp; 29152 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29153 esp->es_class = CLASS_EXTENDED_SENSE; 29154 } 29155 29156 /* set un if it was on fifo */ 29157 if (fi_un != NULL) { 29158 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29159 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29160 SD_CONDSET(un, un, un_reset_retry_count, 29161 "un_reset_retry_count"); 29162 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29163 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29164 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29165 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29166 "un_f_allow_bus_device_reset"); 29167 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29168 29169 } 29170 29171 /* copy in auto request sense if it was on fifo */ 29172 if (fi_arq != NULL) { 29173 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29174 } 29175 29176 /* free structs */ 29177 if (un->sd_fi_fifo_pkt[i] != NULL) { 29178 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29179 } 29180 if (un->sd_fi_fifo_xb[i] != NULL) { 29181 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29182 } 29183 if (un->sd_fi_fifo_un[i] != NULL) { 29184 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29185 } 29186 if (un->sd_fi_fifo_arq[i] != NULL) { 29187 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29188 } 29189 29190 /* 29191 * kmem_free does not gurantee to set to NULL 29192 * since we uses these to determine if we set 29193 * values or not lets confirm they are always 29194 * NULL after free 29195 */ 29196 un->sd_fi_fifo_pkt[i] = NULL; 29197 un->sd_fi_fifo_un[i] = NULL; 29198 un->sd_fi_fifo_xb[i] = NULL; 29199 un->sd_fi_fifo_arq[i] = NULL; 29200 29201 un->sd_fi_fifo_start++; 29202 29203 mutex_exit(SD_MUTEX(un)); 29204 29205 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29206 } 29207 29208 #endif /* SD_FAULT_INJECTION */ 29209 29210 /* 29211 * This routine is invoked in sd_unit_attach(). Before calling it, the 29212 * properties in conf file should be processed already, and "hotpluggable" 29213 * property was processed also. 29214 * 29215 * The sd driver distinguishes 3 different type of devices: removable media, 29216 * non-removable media, and hotpluggable. Below the differences are defined: 29217 * 29218 * 1. Device ID 29219 * 29220 * The device ID of a device is used to identify this device. Refer to 29221 * ddi_devid_register(9F). 29222 * 29223 * For a non-removable media disk device which can provide 0x80 or 0x83 29224 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29225 * device ID is created to identify this device. For other non-removable 29226 * media devices, a default device ID is created only if this device has 29227 * at least 2 alter cylinders. Otherwise, this device has no devid. 29228 * 29229 * ------------------------------------------------------- 29230 * removable media hotpluggable | Can Have Device ID 29231 * ------------------------------------------------------- 29232 * false false | Yes 29233 * false true | Yes 29234 * true x | No 29235 * ------------------------------------------------------ 29236 * 29237 * 29238 * 2. SCSI group 4 commands 29239 * 29240 * In SCSI specs, only some commands in group 4 command set can use 29241 * 8-byte addresses that can be used to access >2TB storage spaces. 29242 * Other commands have no such capability. Without supporting group4, 29243 * it is impossible to make full use of storage spaces of a disk with 29244 * capacity larger than 2TB. 29245 * 29246 * ----------------------------------------------- 29247 * removable media hotpluggable LP64 | Group 29248 * ----------------------------------------------- 29249 * false false false | 1 29250 * false false true | 4 29251 * false true false | 1 29252 * false true true | 4 29253 * true x x | 5 29254 * ----------------------------------------------- 29255 * 29256 * 29257 * 3. Check for VTOC Label 29258 * 29259 * If a direct-access disk has no EFI label, sd will check if it has a 29260 * valid VTOC label. Now, sd also does that check for removable media 29261 * and hotpluggable devices. 29262 * 29263 * -------------------------------------------------------------- 29264 * Direct-Access removable media hotpluggable | Check Label 29265 * ------------------------------------------------------------- 29266 * false false false | No 29267 * false false true | No 29268 * false true false | Yes 29269 * false true true | Yes 29270 * true x x | Yes 29271 * -------------------------------------------------------------- 29272 * 29273 * 29274 * 4. Building default VTOC label 29275 * 29276 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29277 * If those devices have no valid VTOC label, sd(7d) will attempt to 29278 * create default VTOC for them. Currently sd creates default VTOC label 29279 * for all devices on x86 platform (VTOC_16), but only for removable 29280 * media devices on SPARC (VTOC_8). 29281 * 29282 * ----------------------------------------------------------- 29283 * removable media hotpluggable platform | Default Label 29284 * ----------------------------------------------------------- 29285 * false false sparc | No 29286 * false true x86 | Yes 29287 * false true sparc | Yes 29288 * true x x | Yes 29289 * ---------------------------------------------------------- 29290 * 29291 * 29292 * 5. Supported blocksizes of target devices 29293 * 29294 * Sd supports non-512-byte blocksize for removable media devices only. 29295 * For other devices, only 512-byte blocksize is supported. This may be 29296 * changed in near future because some RAID devices require non-512-byte 29297 * blocksize 29298 * 29299 * ----------------------------------------------------------- 29300 * removable media hotpluggable | non-512-byte blocksize 29301 * ----------------------------------------------------------- 29302 * false false | No 29303 * false true | No 29304 * true x | Yes 29305 * ----------------------------------------------------------- 29306 * 29307 * 29308 * 6. Automatic mount & unmount 29309 * 29310 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29311 * if a device is removable media device. It return 1 for removable media 29312 * devices, and 0 for others. 29313 * 29314 * The automatic mounting subsystem should distinguish between the types 29315 * of devices and apply automounting policies to each. 29316 * 29317 * 29318 * 7. fdisk partition management 29319 * 29320 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29321 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29322 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29323 * fdisk partitions on both x86 and SPARC platform. 29324 * 29325 * ----------------------------------------------------------- 29326 * platform removable media USB/1394 | fdisk supported 29327 * ----------------------------------------------------------- 29328 * x86 X X | true 29329 * ------------------------------------------------------------ 29330 * sparc X X | false 29331 * ------------------------------------------------------------ 29332 * 29333 * 29334 * 8. MBOOT/MBR 29335 * 29336 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29337 * read/write mboot for removable media devices on sparc platform. 29338 * 29339 * ----------------------------------------------------------- 29340 * platform removable media USB/1394 | mboot supported 29341 * ----------------------------------------------------------- 29342 * x86 X X | true 29343 * ------------------------------------------------------------ 29344 * sparc false false | false 29345 * sparc false true | true 29346 * sparc true false | true 29347 * sparc true true | true 29348 * ------------------------------------------------------------ 29349 * 29350 * 29351 * 9. error handling during opening device 29352 * 29353 * If failed to open a disk device, an errno is returned. For some kinds 29354 * of errors, different errno is returned depending on if this device is 29355 * a removable media device. This brings USB/1394 hard disks in line with 29356 * expected hard disk behavior. It is not expected that this breaks any 29357 * application. 29358 * 29359 * ------------------------------------------------------ 29360 * removable media hotpluggable | errno 29361 * ------------------------------------------------------ 29362 * false false | EIO 29363 * false true | EIO 29364 * true x | ENXIO 29365 * ------------------------------------------------------ 29366 * 29367 * 29368 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29369 * 29370 * These IOCTLs are applicable only to removable media devices. 29371 * 29372 * ----------------------------------------------------------- 29373 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29374 * ----------------------------------------------------------- 29375 * false false | No 29376 * false true | No 29377 * true x | Yes 29378 * ----------------------------------------------------------- 29379 * 29380 * 29381 * 12. Kstats for partitions 29382 * 29383 * sd creates partition kstat for non-removable media devices. USB and 29384 * Firewire hard disks now have partition kstats 29385 * 29386 * ------------------------------------------------------ 29387 * removable media hotpluggable | kstat 29388 * ------------------------------------------------------ 29389 * false false | Yes 29390 * false true | Yes 29391 * true x | No 29392 * ------------------------------------------------------ 29393 * 29394 * 29395 * 13. Removable media & hotpluggable properties 29396 * 29397 * Sd driver creates a "removable-media" property for removable media 29398 * devices. Parent nexus drivers create a "hotpluggable" property if 29399 * it supports hotplugging. 29400 * 29401 * --------------------------------------------------------------------- 29402 * removable media hotpluggable | "removable-media" " hotpluggable" 29403 * --------------------------------------------------------------------- 29404 * false false | No No 29405 * false true | No Yes 29406 * true false | Yes No 29407 * true true | Yes Yes 29408 * --------------------------------------------------------------------- 29409 * 29410 * 29411 * 14. Power Management 29412 * 29413 * sd only power manages removable media devices or devices that support 29414 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29415 * 29416 * A parent nexus that supports hotplugging can also set "pm-capable" 29417 * if the disk can be power managed. 29418 * 29419 * ------------------------------------------------------------ 29420 * removable media hotpluggable pm-capable | power manage 29421 * ------------------------------------------------------------ 29422 * false false false | No 29423 * false false true | Yes 29424 * false true false | No 29425 * false true true | Yes 29426 * true x x | Yes 29427 * ------------------------------------------------------------ 29428 * 29429 * USB and firewire hard disks can now be power managed independently 29430 * of the framebuffer 29431 * 29432 * 29433 * 15. Support for USB disks with capacity larger than 1TB 29434 * 29435 * Currently, sd doesn't permit a fixed disk device with capacity 29436 * larger than 1TB to be used in a 32-bit operating system environment. 29437 * However, sd doesn't do that for removable media devices. Instead, it 29438 * assumes that removable media devices cannot have a capacity larger 29439 * than 1TB. Therefore, using those devices on 32-bit system is partially 29440 * supported, which can cause some unexpected results. 29441 * 29442 * --------------------------------------------------------------------- 29443 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29444 * --------------------------------------------------------------------- 29445 * false false | true | no 29446 * false true | true | no 29447 * true false | true | Yes 29448 * true true | true | Yes 29449 * --------------------------------------------------------------------- 29450 * 29451 * 29452 * 16. Check write-protection at open time 29453 * 29454 * When a removable media device is being opened for writing without NDELAY 29455 * flag, sd will check if this device is writable. If attempting to open 29456 * without NDELAY flag a write-protected device, this operation will abort. 29457 * 29458 * ------------------------------------------------------------ 29459 * removable media USB/1394 | WP Check 29460 * ------------------------------------------------------------ 29461 * false false | No 29462 * false true | No 29463 * true false | Yes 29464 * true true | Yes 29465 * ------------------------------------------------------------ 29466 * 29467 * 29468 * 17. syslog when corrupted VTOC is encountered 29469 * 29470 * Currently, if an invalid VTOC is encountered, sd only print syslog 29471 * for fixed SCSI disks. 29472 * ------------------------------------------------------------ 29473 * removable media USB/1394 | print syslog 29474 * ------------------------------------------------------------ 29475 * false false | Yes 29476 * false true | No 29477 * true false | No 29478 * true true | No 29479 * ------------------------------------------------------------ 29480 */ 29481 static void 29482 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29483 { 29484 int pm_capable_prop; 29485 29486 ASSERT(un->un_sd); 29487 ASSERT(un->un_sd->sd_inq); 29488 29489 /* 29490 * Enable SYNC CACHE support for all devices. 29491 */ 29492 un->un_f_sync_cache_supported = TRUE; 29493 29494 /* 29495 * Set the sync cache required flag to false. 29496 * This would ensure that there is no SYNC CACHE 29497 * sent when there are no writes 29498 */ 29499 un->un_f_sync_cache_required = FALSE; 29500 29501 if (un->un_sd->sd_inq->inq_rmb) { 29502 /* 29503 * The media of this device is removable. And for this kind 29504 * of devices, it is possible to change medium after opening 29505 * devices. Thus we should support this operation. 29506 */ 29507 un->un_f_has_removable_media = TRUE; 29508 29509 /* 29510 * support non-512-byte blocksize of removable media devices 29511 */ 29512 un->un_f_non_devbsize_supported = TRUE; 29513 29514 /* 29515 * Assume that all removable media devices support DOOR_LOCK 29516 */ 29517 un->un_f_doorlock_supported = TRUE; 29518 29519 /* 29520 * For a removable media device, it is possible to be opened 29521 * with NDELAY flag when there is no media in drive, in this 29522 * case we don't care if device is writable. But if without 29523 * NDELAY flag, we need to check if media is write-protected. 29524 */ 29525 un->un_f_chk_wp_open = TRUE; 29526 29527 /* 29528 * need to start a SCSI watch thread to monitor media state, 29529 * when media is being inserted or ejected, notify syseventd. 29530 */ 29531 un->un_f_monitor_media_state = TRUE; 29532 29533 /* 29534 * Some devices don't support START_STOP_UNIT command. 29535 * Therefore, we'd better check if a device supports it 29536 * before sending it. 29537 */ 29538 un->un_f_check_start_stop = TRUE; 29539 29540 /* 29541 * support eject media ioctl: 29542 * FDEJECT, DKIOCEJECT, CDROMEJECT 29543 */ 29544 un->un_f_eject_media_supported = TRUE; 29545 29546 /* 29547 * Because many removable-media devices don't support 29548 * LOG_SENSE, we couldn't use this command to check if 29549 * a removable media device support power-management. 29550 * We assume that they support power-management via 29551 * START_STOP_UNIT command and can be spun up and down 29552 * without limitations. 29553 */ 29554 un->un_f_pm_supported = TRUE; 29555 29556 /* 29557 * Need to create a zero length (Boolean) property 29558 * removable-media for the removable media devices. 29559 * Note that the return value of the property is not being 29560 * checked, since if unable to create the property 29561 * then do not want the attach to fail altogether. Consistent 29562 * with other property creation in attach. 29563 */ 29564 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29565 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29566 29567 } else { 29568 /* 29569 * create device ID for device 29570 */ 29571 un->un_f_devid_supported = TRUE; 29572 29573 /* 29574 * Spin up non-removable-media devices once it is attached 29575 */ 29576 un->un_f_attach_spinup = TRUE; 29577 29578 /* 29579 * According to SCSI specification, Sense data has two kinds of 29580 * format: fixed format, and descriptor format. At present, we 29581 * don't support descriptor format sense data for removable 29582 * media. 29583 */ 29584 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29585 un->un_f_descr_format_supported = TRUE; 29586 } 29587 29588 /* 29589 * kstats are created only for non-removable media devices. 29590 * 29591 * Set this in sd.conf to 0 in order to disable kstats. The 29592 * default is 1, so they are enabled by default. 29593 */ 29594 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29595 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29596 "enable-partition-kstats", 1)); 29597 29598 /* 29599 * Check if HBA has set the "pm-capable" property. 29600 * If "pm-capable" exists and is non-zero then we can 29601 * power manage the device without checking the start/stop 29602 * cycle count log sense page. 29603 * 29604 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29605 * then we should not power manage the device. 29606 * 29607 * If "pm-capable" doesn't exist then pm_capable_prop will 29608 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29609 * sd will check the start/stop cycle count log sense page 29610 * and power manage the device if the cycle count limit has 29611 * not been exceeded. 29612 */ 29613 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29614 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29615 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29616 un->un_f_log_sense_supported = TRUE; 29617 } else { 29618 /* 29619 * pm-capable property exists. 29620 * 29621 * Convert "TRUE" values for pm_capable_prop to 29622 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29623 * later. "TRUE" values are any values except 29624 * SD_PM_CAPABLE_FALSE (0) and 29625 * SD_PM_CAPABLE_UNDEFINED (-1) 29626 */ 29627 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29628 un->un_f_log_sense_supported = FALSE; 29629 } else { 29630 un->un_f_pm_supported = TRUE; 29631 } 29632 29633 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29634 "sd_unit_attach: un:0x%p pm-capable " 29635 "property set to %d.\n", un, un->un_f_pm_supported); 29636 } 29637 } 29638 29639 if (un->un_f_is_hotpluggable) { 29640 29641 /* 29642 * Have to watch hotpluggable devices as well, since 29643 * that's the only way for userland applications to 29644 * detect hot removal while device is busy/mounted. 29645 */ 29646 un->un_f_monitor_media_state = TRUE; 29647 29648 un->un_f_check_start_stop = TRUE; 29649 29650 } 29651 } 29652 29653 /* 29654 * sd_tg_rdwr: 29655 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29656 * in sys block size, req_length in bytes. 29657 * 29658 */ 29659 static int 29660 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29661 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29662 { 29663 struct sd_lun *un; 29664 int path_flag = (int)(uintptr_t)tg_cookie; 29665 char *dkl = NULL; 29666 diskaddr_t real_addr = start_block; 29667 diskaddr_t first_byte, end_block; 29668 29669 size_t buffer_size = reqlength; 29670 int rval = 0; 29671 diskaddr_t cap; 29672 uint32_t lbasize; 29673 sd_ssc_t *ssc; 29674 29675 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29676 if (un == NULL) 29677 return (ENXIO); 29678 29679 if (cmd != TG_READ && cmd != TG_WRITE) 29680 return (EINVAL); 29681 29682 ssc = sd_ssc_init(un); 29683 mutex_enter(SD_MUTEX(un)); 29684 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29685 mutex_exit(SD_MUTEX(un)); 29686 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29687 &lbasize, path_flag); 29688 if (rval != 0) 29689 goto done1; 29690 mutex_enter(SD_MUTEX(un)); 29691 sd_update_block_info(un, lbasize, cap); 29692 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29693 mutex_exit(SD_MUTEX(un)); 29694 rval = EIO; 29695 goto done; 29696 } 29697 } 29698 29699 if (NOT_DEVBSIZE(un)) { 29700 /* 29701 * sys_blocksize != tgt_blocksize, need to re-adjust 29702 * blkno and save the index to beginning of dk_label 29703 */ 29704 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29705 real_addr = first_byte / un->un_tgt_blocksize; 29706 29707 end_block = (first_byte + reqlength + 29708 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29709 29710 /* round up buffer size to multiple of target block size */ 29711 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29712 29713 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29714 "label_addr: 0x%x allocation size: 0x%x\n", 29715 real_addr, buffer_size); 29716 29717 if (((first_byte % un->un_tgt_blocksize) != 0) || 29718 (reqlength % un->un_tgt_blocksize) != 0) 29719 /* the request is not aligned */ 29720 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29721 } 29722 29723 /* 29724 * The MMC standard allows READ CAPACITY to be 29725 * inaccurate by a bounded amount (in the interest of 29726 * response latency). As a result, failed READs are 29727 * commonplace (due to the reading of metadata and not 29728 * data). Depending on the per-Vendor/drive Sense data, 29729 * the failed READ can cause many (unnecessary) retries. 29730 */ 29731 29732 if (ISCD(un) && (cmd == TG_READ) && 29733 (un->un_f_blockcount_is_valid == TRUE) && 29734 ((start_block == (un->un_blockcount - 1))|| 29735 (start_block == (un->un_blockcount - 2)))) { 29736 path_flag = SD_PATH_DIRECT_PRIORITY; 29737 } 29738 29739 mutex_exit(SD_MUTEX(un)); 29740 if (cmd == TG_READ) { 29741 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29742 buffer_size, real_addr, path_flag); 29743 if (dkl != NULL) 29744 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29745 real_addr), bufaddr, reqlength); 29746 } else { 29747 if (dkl) { 29748 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29749 real_addr, path_flag); 29750 if (rval) { 29751 goto done1; 29752 } 29753 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29754 real_addr), reqlength); 29755 } 29756 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29757 buffer_size, real_addr, path_flag); 29758 } 29759 29760 done1: 29761 if (dkl != NULL) 29762 kmem_free(dkl, buffer_size); 29763 29764 if (rval != 0) { 29765 if (rval == EIO) 29766 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29767 else 29768 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29769 } 29770 done: 29771 sd_ssc_fini(ssc); 29772 return (rval); 29773 } 29774 29775 29776 static int 29777 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29778 { 29779 29780 struct sd_lun *un; 29781 diskaddr_t cap; 29782 uint32_t lbasize; 29783 int path_flag = (int)(uintptr_t)tg_cookie; 29784 int ret = 0; 29785 29786 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29787 if (un == NULL) 29788 return (ENXIO); 29789 29790 switch (cmd) { 29791 case TG_GETPHYGEOM: 29792 case TG_GETVIRTGEOM: 29793 case TG_GETCAPACITY: 29794 case TG_GETBLOCKSIZE: 29795 mutex_enter(SD_MUTEX(un)); 29796 29797 if ((un->un_f_blockcount_is_valid == TRUE) && 29798 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29799 cap = un->un_blockcount; 29800 lbasize = un->un_tgt_blocksize; 29801 mutex_exit(SD_MUTEX(un)); 29802 } else { 29803 sd_ssc_t *ssc; 29804 mutex_exit(SD_MUTEX(un)); 29805 ssc = sd_ssc_init(un); 29806 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29807 &lbasize, path_flag); 29808 if (ret != 0) { 29809 if (ret == EIO) 29810 sd_ssc_assessment(ssc, 29811 SD_FMT_STATUS_CHECK); 29812 else 29813 sd_ssc_assessment(ssc, 29814 SD_FMT_IGNORE); 29815 sd_ssc_fini(ssc); 29816 return (ret); 29817 } 29818 sd_ssc_fini(ssc); 29819 mutex_enter(SD_MUTEX(un)); 29820 sd_update_block_info(un, lbasize, cap); 29821 if ((un->un_f_blockcount_is_valid == FALSE) || 29822 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 29823 mutex_exit(SD_MUTEX(un)); 29824 return (EIO); 29825 } 29826 mutex_exit(SD_MUTEX(un)); 29827 } 29828 29829 if (cmd == TG_GETCAPACITY) { 29830 *(diskaddr_t *)arg = cap; 29831 return (0); 29832 } 29833 29834 if (cmd == TG_GETBLOCKSIZE) { 29835 *(uint32_t *)arg = lbasize; 29836 return (0); 29837 } 29838 29839 if (cmd == TG_GETPHYGEOM) 29840 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 29841 cap, lbasize, path_flag); 29842 else 29843 /* TG_GETVIRTGEOM */ 29844 ret = sd_get_virtual_geometry(un, 29845 (cmlb_geom_t *)arg, cap, lbasize); 29846 29847 return (ret); 29848 29849 case TG_GETATTR: 29850 mutex_enter(SD_MUTEX(un)); 29851 ((tg_attribute_t *)arg)->media_is_writable = 29852 un->un_f_mmc_writable_media; 29853 mutex_exit(SD_MUTEX(un)); 29854 return (0); 29855 default: 29856 return (ENOTTY); 29857 29858 } 29859 } 29860 29861 /* 29862 * Function: sd_ssc_ereport_post 29863 * 29864 * Description: Will be called when SD driver need to post an ereport. 29865 * 29866 * Context: Kernel thread or interrupt context. 29867 */ 29868 static void 29869 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 29870 { 29871 int uscsi_path_instance = 0; 29872 uchar_t uscsi_pkt_reason; 29873 uint32_t uscsi_pkt_state; 29874 uint32_t uscsi_pkt_statistics; 29875 uint64_t uscsi_ena; 29876 uchar_t op_code; 29877 uint8_t *sensep; 29878 union scsi_cdb *cdbp; 29879 uint_t cdblen = 0; 29880 uint_t senlen = 0; 29881 struct sd_lun *un; 29882 dev_info_t *dip; 29883 char *devid; 29884 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 29885 SSC_FLAGS_INVALID_STATUS | 29886 SSC_FLAGS_INVALID_SENSE | 29887 SSC_FLAGS_INVALID_DATA; 29888 char assessment[16]; 29889 29890 ASSERT(ssc != NULL); 29891 ASSERT(ssc->ssc_uscsi_cmd != NULL); 29892 ASSERT(ssc->ssc_uscsi_info != NULL); 29893 29894 un = ssc->ssc_un; 29895 ASSERT(un != NULL); 29896 29897 dip = un->un_sd->sd_dev; 29898 29899 /* 29900 * Get the devid: 29901 * devid will only be passed to non-transport error reports. 29902 */ 29903 devid = DEVI(dip)->devi_devid_str; 29904 29905 /* 29906 * If we are syncing or dumping, the command will not be executed 29907 * so we bypass this situation. 29908 */ 29909 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 29910 (un->un_state == SD_STATE_DUMPING)) 29911 return; 29912 29913 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 29914 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 29915 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 29916 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 29917 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 29918 29919 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 29920 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 29921 29922 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 29923 if (cdbp == NULL) { 29924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29925 "sd_ssc_ereport_post meet empty cdb\n"); 29926 return; 29927 } 29928 29929 op_code = cdbp->scc_cmd; 29930 29931 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 29932 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 29933 ssc->ssc_uscsi_cmd->uscsi_rqresid); 29934 29935 if (senlen > 0) 29936 ASSERT(sensep != NULL); 29937 29938 /* 29939 * Initialize drv_assess to corresponding values. 29940 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 29941 * on the sense-key returned back. 29942 */ 29943 switch (drv_assess) { 29944 case SD_FM_DRV_RECOVERY: 29945 (void) sprintf(assessment, "%s", "recovered"); 29946 break; 29947 case SD_FM_DRV_RETRY: 29948 (void) sprintf(assessment, "%s", "retry"); 29949 break; 29950 case SD_FM_DRV_NOTICE: 29951 (void) sprintf(assessment, "%s", "info"); 29952 break; 29953 case SD_FM_DRV_FATAL: 29954 default: 29955 (void) sprintf(assessment, "%s", "unknown"); 29956 } 29957 /* 29958 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 29959 * command, we will post ereport.io.scsi.cmd.disk.recovered. 29960 * driver-assessment will always be "recovered" here. 29961 */ 29962 if (drv_assess == SD_FM_DRV_RECOVERY) { 29963 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29964 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 29965 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29966 "driver-assessment", DATA_TYPE_STRING, assessment, 29967 "op-code", DATA_TYPE_UINT8, op_code, 29968 "cdb", DATA_TYPE_UINT8_ARRAY, 29969 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29970 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29971 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 29972 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 29973 NULL); 29974 return; 29975 } 29976 29977 /* 29978 * If there is un-expected/un-decodable data, we should post 29979 * ereport.io.scsi.cmd.disk.dev.uderr. 29980 * driver-assessment will be set based on parameter drv_assess. 29981 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 29982 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 29983 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 29984 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 29985 */ 29986 if (ssc->ssc_flags & ssc_invalid_flags) { 29987 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 29988 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29989 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 29990 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29991 "driver-assessment", DATA_TYPE_STRING, 29992 drv_assess == SD_FM_DRV_FATAL ? 29993 "fail" : assessment, 29994 "op-code", DATA_TYPE_UINT8, op_code, 29995 "cdb", DATA_TYPE_UINT8_ARRAY, 29996 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29997 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29998 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 29999 "pkt-stats", DATA_TYPE_UINT32, 30000 uscsi_pkt_statistics, 30001 "stat-code", DATA_TYPE_UINT8, 30002 ssc->ssc_uscsi_cmd->uscsi_status, 30003 "un-decode-info", DATA_TYPE_STRING, 30004 ssc->ssc_info, 30005 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30006 senlen, sensep, 30007 NULL); 30008 } else { 30009 /* 30010 * For other type of invalid data, the 30011 * un-decode-value field would be empty because the 30012 * un-decodable content could be seen from upper 30013 * level payload or inside un-decode-info. 30014 */ 30015 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30016 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30017 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30018 "driver-assessment", DATA_TYPE_STRING, 30019 drv_assess == SD_FM_DRV_FATAL ? 30020 "fail" : assessment, 30021 "op-code", DATA_TYPE_UINT8, op_code, 30022 "cdb", DATA_TYPE_UINT8_ARRAY, 30023 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30024 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30025 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30026 "pkt-stats", DATA_TYPE_UINT32, 30027 uscsi_pkt_statistics, 30028 "stat-code", DATA_TYPE_UINT8, 30029 ssc->ssc_uscsi_cmd->uscsi_status, 30030 "un-decode-info", DATA_TYPE_STRING, 30031 ssc->ssc_info, 30032 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30033 0, NULL, 30034 NULL); 30035 } 30036 ssc->ssc_flags &= ~ssc_invalid_flags; 30037 return; 30038 } 30039 30040 if (uscsi_pkt_reason != CMD_CMPLT || 30041 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30042 /* 30043 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30044 * set inside sd_start_cmds due to errors(bad packet or 30045 * fatal transport error), we should take it as a 30046 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30047 * driver-assessment will be set based on drv_assess. 30048 * We will set devid to NULL because it is a transport 30049 * error. 30050 */ 30051 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30052 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30053 30054 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30055 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30056 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30057 "driver-assessment", DATA_TYPE_STRING, 30058 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30059 "op-code", DATA_TYPE_UINT8, op_code, 30060 "cdb", DATA_TYPE_UINT8_ARRAY, 30061 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30062 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30063 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30064 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30065 NULL); 30066 } else { 30067 /* 30068 * If we got here, we have a completed command, and we need 30069 * to further investigate the sense data to see what kind 30070 * of ereport we should post. 30071 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30072 * if sense-key == 0x3. 30073 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30074 * driver-assessment will be set based on the parameter 30075 * drv_assess. 30076 */ 30077 if (senlen > 0) { 30078 /* 30079 * Here we have sense data available. 30080 */ 30081 uint8_t sense_key; 30082 sense_key = scsi_sense_key(sensep); 30083 if (sense_key == 0x3) { 30084 /* 30085 * sense-key == 0x3(medium error), 30086 * driver-assessment should be "fatal" if 30087 * drv_assess is SD_FM_DRV_FATAL. 30088 */ 30089 scsi_fm_ereport_post(un->un_sd, 30090 uscsi_path_instance, 30091 "cmd.disk.dev.rqs.merr", 30092 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30093 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30094 "driver-assessment", 30095 DATA_TYPE_STRING, 30096 drv_assess == SD_FM_DRV_FATAL ? 30097 "fatal" : assessment, 30098 "op-code", 30099 DATA_TYPE_UINT8, op_code, 30100 "cdb", 30101 DATA_TYPE_UINT8_ARRAY, cdblen, 30102 ssc->ssc_uscsi_cmd->uscsi_cdb, 30103 "pkt-reason", 30104 DATA_TYPE_UINT8, uscsi_pkt_reason, 30105 "pkt-state", 30106 DATA_TYPE_UINT8, uscsi_pkt_state, 30107 "pkt-stats", 30108 DATA_TYPE_UINT32, 30109 uscsi_pkt_statistics, 30110 "stat-code", 30111 DATA_TYPE_UINT8, 30112 ssc->ssc_uscsi_cmd->uscsi_status, 30113 "key", 30114 DATA_TYPE_UINT8, 30115 scsi_sense_key(sensep), 30116 "asc", 30117 DATA_TYPE_UINT8, 30118 scsi_sense_asc(sensep), 30119 "ascq", 30120 DATA_TYPE_UINT8, 30121 scsi_sense_ascq(sensep), 30122 "sense-data", 30123 DATA_TYPE_UINT8_ARRAY, 30124 senlen, sensep, 30125 "lba", 30126 DATA_TYPE_UINT64, 30127 ssc->ssc_uscsi_info->ui_lba, 30128 NULL); 30129 } else { 30130 /* 30131 * if sense-key == 0x4(hardware 30132 * error), driver-assessment should 30133 * be "fatal" if drv_assess is 30134 * SD_FM_DRV_FATAL. 30135 */ 30136 scsi_fm_ereport_post(un->un_sd, 30137 uscsi_path_instance, 30138 "cmd.disk.dev.rqs.derr", 30139 uscsi_ena, devid, DDI_NOSLEEP, 30140 FM_VERSION, 30141 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30142 "driver-assessment", 30143 DATA_TYPE_STRING, 30144 drv_assess == SD_FM_DRV_FATAL ? 30145 (sense_key == 0x4 ? 30146 "fatal" : "fail") : assessment, 30147 "op-code", 30148 DATA_TYPE_UINT8, op_code, 30149 "cdb", 30150 DATA_TYPE_UINT8_ARRAY, cdblen, 30151 ssc->ssc_uscsi_cmd->uscsi_cdb, 30152 "pkt-reason", 30153 DATA_TYPE_UINT8, uscsi_pkt_reason, 30154 "pkt-state", 30155 DATA_TYPE_UINT8, uscsi_pkt_state, 30156 "pkt-stats", 30157 DATA_TYPE_UINT32, 30158 uscsi_pkt_statistics, 30159 "stat-code", 30160 DATA_TYPE_UINT8, 30161 ssc->ssc_uscsi_cmd->uscsi_status, 30162 "key", 30163 DATA_TYPE_UINT8, 30164 scsi_sense_key(sensep), 30165 "asc", 30166 DATA_TYPE_UINT8, 30167 scsi_sense_asc(sensep), 30168 "ascq", 30169 DATA_TYPE_UINT8, 30170 scsi_sense_ascq(sensep), 30171 "sense-data", 30172 DATA_TYPE_UINT8_ARRAY, 30173 senlen, sensep, 30174 NULL); 30175 } 30176 } else { 30177 /* 30178 * For stat_code == STATUS_GOOD, this is not a 30179 * hardware error. 30180 */ 30181 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 30182 return; 30183 30184 /* 30185 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30186 * stat-code but with sense data unavailable. 30187 * driver-assessment will be set based on parameter 30188 * drv_assess. 30189 */ 30190 scsi_fm_ereport_post(un->un_sd, 30191 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30192 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30193 FM_EREPORT_VERS0, 30194 "driver-assessment", DATA_TYPE_STRING, 30195 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30196 "op-code", DATA_TYPE_UINT8, op_code, 30197 "cdb", 30198 DATA_TYPE_UINT8_ARRAY, 30199 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30200 "pkt-reason", 30201 DATA_TYPE_UINT8, uscsi_pkt_reason, 30202 "pkt-state", 30203 DATA_TYPE_UINT8, uscsi_pkt_state, 30204 "pkt-stats", 30205 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30206 "stat-code", 30207 DATA_TYPE_UINT8, 30208 ssc->ssc_uscsi_cmd->uscsi_status, 30209 NULL); 30210 } 30211 } 30212 } 30213 30214 /* 30215 * Function: sd_ssc_extract_info 30216 * 30217 * Description: Extract information available to help generate ereport. 30218 * 30219 * Context: Kernel thread or interrupt context. 30220 */ 30221 static void 30222 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30223 struct buf *bp, struct sd_xbuf *xp) 30224 { 30225 size_t senlen = 0; 30226 union scsi_cdb *cdbp; 30227 int path_instance; 30228 /* 30229 * Need scsi_cdb_size array to determine the cdb length. 30230 */ 30231 extern uchar_t scsi_cdb_size[]; 30232 30233 ASSERT(un != NULL); 30234 ASSERT(pktp != NULL); 30235 ASSERT(bp != NULL); 30236 ASSERT(xp != NULL); 30237 ASSERT(ssc != NULL); 30238 ASSERT(mutex_owned(SD_MUTEX(un))); 30239 30240 /* 30241 * Transfer the cdb buffer pointer here. 30242 */ 30243 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30244 30245 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30246 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30247 30248 /* 30249 * Transfer the sense data buffer pointer if sense data is available, 30250 * calculate the sense data length first. 30251 */ 30252 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 30253 (xp->xb_sense_state & STATE_ARQ_DONE)) { 30254 /* 30255 * For arq case, we will enter here. 30256 */ 30257 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30258 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30259 } else { 30260 senlen = SENSE_LENGTH; 30261 } 30262 } else { 30263 /* 30264 * For non-arq case, we will enter this branch. 30265 */ 30266 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30267 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 30268 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30269 } 30270 30271 } 30272 30273 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30274 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30275 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30276 30277 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30278 30279 /* 30280 * Only transfer path_instance when scsi_pkt was properly allocated. 30281 */ 30282 path_instance = pktp->pkt_path_instance; 30283 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30284 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30285 else 30286 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30287 30288 /* 30289 * Copy in the other fields we may need when posting ereport. 30290 */ 30291 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30292 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30293 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30294 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30295 30296 /* 30297 * For partially read/write command, we will not create ena 30298 * in case of a successful command be reconized as recovered. 30299 */ 30300 if ((pktp->pkt_reason == CMD_CMPLT) && 30301 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 30302 (senlen == 0)) { 30303 return; 30304 } 30305 30306 /* 30307 * To associate ereports of a single command execution flow, we 30308 * need a shared ena for a specific command. 30309 */ 30310 if (xp->xb_ena == 0) 30311 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30312 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30313 } 30314