1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else /* !__fibre */ 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif /* !__fibre */ 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_INTERCONNECT_SAS 5 779 780 #define SD_IS_PARALLEL_SCSI(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 782 #define SD_IS_SERIAL(un) \ 783 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 784 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 785 786 /* 787 * Definitions used by device id registration routines 788 */ 789 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 790 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 791 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 792 793 static kmutex_t sd_sense_mutex = {0}; 794 795 /* 796 * Macros for updates of the driver state 797 */ 798 #define New_state(un, s) \ 799 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 800 #define Restore_state(un) \ 801 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 802 803 static struct sd_cdbinfo sd_cdbtab[] = { 804 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 805 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 806 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 807 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 808 }; 809 810 /* 811 * Specifies the number of seconds that must have elapsed since the last 812 * cmd. has completed for a device to be declared idle to the PM framework. 813 */ 814 static int sd_pm_idletime = 1; 815 816 /* 817 * Internal function prototypes 818 */ 819 820 #if (defined(__fibre)) 821 /* 822 * These #defines are to avoid namespace collisions that occur because this 823 * code is currently used to compile two separate driver modules: sd and ssd. 824 * All function names need to be treated this way (even if declared static) 825 * in order to allow the debugger to resolve the names properly. 826 * It is anticipated that in the near future the ssd module will be obsoleted, 827 * at which time this ugliness should go away. 828 */ 829 #define sd_log_trace ssd_log_trace 830 #define sd_log_info ssd_log_info 831 #define sd_log_err ssd_log_err 832 #define sdprobe ssdprobe 833 #define sdinfo ssdinfo 834 #define sd_prop_op ssd_prop_op 835 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 836 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 837 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 838 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 839 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 840 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 841 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 842 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 843 #define sd_spin_up_unit ssd_spin_up_unit 844 #define sd_enable_descr_sense ssd_enable_descr_sense 845 #define sd_reenable_dsense_task ssd_reenable_dsense_task 846 #define sd_set_mmc_caps ssd_set_mmc_caps 847 #define sd_read_unit_properties ssd_read_unit_properties 848 #define sd_process_sdconf_file ssd_process_sdconf_file 849 #define sd_process_sdconf_table ssd_process_sdconf_table 850 #define sd_sdconf_id_match ssd_sdconf_id_match 851 #define sd_blank_cmp ssd_blank_cmp 852 #define sd_chk_vers1_data ssd_chk_vers1_data 853 #define sd_set_vers1_properties ssd_set_vers1_properties 854 855 #define sd_get_physical_geometry ssd_get_physical_geometry 856 #define sd_get_virtual_geometry ssd_get_virtual_geometry 857 #define sd_update_block_info ssd_update_block_info 858 #define sd_register_devid ssd_register_devid 859 #define sd_get_devid ssd_get_devid 860 #define sd_create_devid ssd_create_devid 861 #define sd_write_deviceid ssd_write_deviceid 862 #define sd_check_vpd_page_support ssd_check_vpd_page_support 863 #define sd_setup_pm ssd_setup_pm 864 #define sd_create_pm_components ssd_create_pm_components 865 #define sd_ddi_suspend ssd_ddi_suspend 866 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 867 #define sd_ddi_resume ssd_ddi_resume 868 #define sd_ddi_pm_resume ssd_ddi_pm_resume 869 #define sdpower ssdpower 870 #define sdattach ssdattach 871 #define sddetach ssddetach 872 #define sd_unit_attach ssd_unit_attach 873 #define sd_unit_detach ssd_unit_detach 874 #define sd_set_unit_attributes ssd_set_unit_attributes 875 #define sd_create_errstats ssd_create_errstats 876 #define sd_set_errstats ssd_set_errstats 877 #define sd_set_pstats ssd_set_pstats 878 #define sddump ssddump 879 #define sd_scsi_poll ssd_scsi_poll 880 #define sd_send_polled_RQS ssd_send_polled_RQS 881 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 882 #define sd_init_event_callbacks ssd_init_event_callbacks 883 #define sd_event_callback ssd_event_callback 884 #define sd_cache_control ssd_cache_control 885 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 886 #define sd_get_nv_sup ssd_get_nv_sup 887 #define sd_make_device ssd_make_device 888 #define sdopen ssdopen 889 #define sdclose ssdclose 890 #define sd_ready_and_valid ssd_ready_and_valid 891 #define sdmin ssdmin 892 #define sdread ssdread 893 #define sdwrite ssdwrite 894 #define sdaread ssdaread 895 #define sdawrite ssdawrite 896 #define sdstrategy ssdstrategy 897 #define sdioctl ssdioctl 898 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 899 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 900 #define sd_checksum_iostart ssd_checksum_iostart 901 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 902 #define sd_pm_iostart ssd_pm_iostart 903 #define sd_core_iostart ssd_core_iostart 904 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 905 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 906 #define sd_checksum_iodone ssd_checksum_iodone 907 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 908 #define sd_pm_iodone ssd_pm_iodone 909 #define sd_initpkt_for_buf ssd_initpkt_for_buf 910 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 911 #define sd_setup_rw_pkt ssd_setup_rw_pkt 912 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 913 #define sd_buf_iodone ssd_buf_iodone 914 #define sd_uscsi_strategy ssd_uscsi_strategy 915 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 916 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 917 #define sd_uscsi_iodone ssd_uscsi_iodone 918 #define sd_xbuf_strategy ssd_xbuf_strategy 919 #define sd_xbuf_init ssd_xbuf_init 920 #define sd_pm_entry ssd_pm_entry 921 #define sd_pm_exit ssd_pm_exit 922 923 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 924 #define sd_pm_timeout_handler ssd_pm_timeout_handler 925 926 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 927 #define sdintr ssdintr 928 #define sd_start_cmds ssd_start_cmds 929 #define sd_send_scsi_cmd ssd_send_scsi_cmd 930 #define sd_bioclone_alloc ssd_bioclone_alloc 931 #define sd_bioclone_free ssd_bioclone_free 932 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 933 #define sd_shadow_buf_free ssd_shadow_buf_free 934 #define sd_print_transport_rejected_message \ 935 ssd_print_transport_rejected_message 936 #define sd_retry_command ssd_retry_command 937 #define sd_set_retry_bp ssd_set_retry_bp 938 #define sd_send_request_sense_command ssd_send_request_sense_command 939 #define sd_start_retry_command ssd_start_retry_command 940 #define sd_start_direct_priority_command \ 941 ssd_start_direct_priority_command 942 #define sd_return_failed_command ssd_return_failed_command 943 #define sd_return_failed_command_no_restart \ 944 ssd_return_failed_command_no_restart 945 #define sd_return_command ssd_return_command 946 #define sd_sync_with_callback ssd_sync_with_callback 947 #define sdrunout ssdrunout 948 #define sd_mark_rqs_busy ssd_mark_rqs_busy 949 #define sd_mark_rqs_idle ssd_mark_rqs_idle 950 #define sd_reduce_throttle ssd_reduce_throttle 951 #define sd_restore_throttle ssd_restore_throttle 952 #define sd_print_incomplete_msg ssd_print_incomplete_msg 953 #define sd_init_cdb_limits ssd_init_cdb_limits 954 #define sd_pkt_status_good ssd_pkt_status_good 955 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 956 #define sd_pkt_status_busy ssd_pkt_status_busy 957 #define sd_pkt_status_reservation_conflict \ 958 ssd_pkt_status_reservation_conflict 959 #define sd_pkt_status_qfull ssd_pkt_status_qfull 960 #define sd_handle_request_sense ssd_handle_request_sense 961 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 962 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 963 #define sd_validate_sense_data ssd_validate_sense_data 964 #define sd_decode_sense ssd_decode_sense 965 #define sd_print_sense_msg ssd_print_sense_msg 966 #define sd_sense_key_no_sense ssd_sense_key_no_sense 967 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 968 #define sd_sense_key_not_ready ssd_sense_key_not_ready 969 #define sd_sense_key_medium_or_hardware_error \ 970 ssd_sense_key_medium_or_hardware_error 971 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 972 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 973 #define sd_sense_key_fail_command ssd_sense_key_fail_command 974 #define sd_sense_key_blank_check ssd_sense_key_blank_check 975 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 976 #define sd_sense_key_default ssd_sense_key_default 977 #define sd_print_retry_msg ssd_print_retry_msg 978 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 979 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 980 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 981 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 982 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 983 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 984 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 985 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 986 #define sd_pkt_reason_default ssd_pkt_reason_default 987 #define sd_reset_target ssd_reset_target 988 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 989 #define sd_start_stop_unit_task ssd_start_stop_unit_task 990 #define sd_taskq_create ssd_taskq_create 991 #define sd_taskq_delete ssd_taskq_delete 992 #define sd_target_change_task ssd_target_change_task 993 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 994 #define sd_media_change_task ssd_media_change_task 995 #define sd_handle_mchange ssd_handle_mchange 996 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 997 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 998 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 999 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1000 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1001 sd_send_scsi_feature_GET_CONFIGURATION 1002 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1003 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1004 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1005 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1006 ssd_send_scsi_PERSISTENT_RESERVE_IN 1007 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1008 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1009 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1010 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1011 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1012 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1013 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1014 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1015 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1016 #define sd_alloc_rqs ssd_alloc_rqs 1017 #define sd_free_rqs ssd_free_rqs 1018 #define sd_dump_memory ssd_dump_memory 1019 #define sd_get_media_info ssd_get_media_info 1020 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1021 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1022 #define sd_strtok_r ssd_strtok_r 1023 #define sd_set_properties ssd_set_properties 1024 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1025 #define sd_setup_next_xfer ssd_setup_next_xfer 1026 #define sd_dkio_get_temp ssd_dkio_get_temp 1027 #define sd_check_mhd ssd_check_mhd 1028 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1029 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1030 #define sd_sname ssd_sname 1031 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1032 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1033 #define sd_take_ownership ssd_take_ownership 1034 #define sd_reserve_release ssd_reserve_release 1035 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1036 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1037 #define sd_persistent_reservation_in_read_keys \ 1038 ssd_persistent_reservation_in_read_keys 1039 #define sd_persistent_reservation_in_read_resv \ 1040 ssd_persistent_reservation_in_read_resv 1041 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1042 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1043 #define sd_mhdioc_release ssd_mhdioc_release 1044 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1045 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1046 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1047 #define sr_change_blkmode ssr_change_blkmode 1048 #define sr_change_speed ssr_change_speed 1049 #define sr_atapi_change_speed ssr_atapi_change_speed 1050 #define sr_pause_resume ssr_pause_resume 1051 #define sr_play_msf ssr_play_msf 1052 #define sr_play_trkind ssr_play_trkind 1053 #define sr_read_all_subcodes ssr_read_all_subcodes 1054 #define sr_read_subchannel ssr_read_subchannel 1055 #define sr_read_tocentry ssr_read_tocentry 1056 #define sr_read_tochdr ssr_read_tochdr 1057 #define sr_read_cdda ssr_read_cdda 1058 #define sr_read_cdxa ssr_read_cdxa 1059 #define sr_read_mode1 ssr_read_mode1 1060 #define sr_read_mode2 ssr_read_mode2 1061 #define sr_read_cd_mode2 ssr_read_cd_mode2 1062 #define sr_sector_mode ssr_sector_mode 1063 #define sr_eject ssr_eject 1064 #define sr_ejected ssr_ejected 1065 #define sr_check_wp ssr_check_wp 1066 #define sd_check_media ssd_check_media 1067 #define sd_media_watch_cb ssd_media_watch_cb 1068 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1069 #define sr_volume_ctrl ssr_volume_ctrl 1070 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1071 #define sd_log_page_supported ssd_log_page_supported 1072 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1073 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1074 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1075 #define sd_range_lock ssd_range_lock 1076 #define sd_get_range ssd_get_range 1077 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1078 #define sd_range_unlock ssd_range_unlock 1079 #define sd_read_modify_write_task ssd_read_modify_write_task 1080 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1081 1082 #define sd_iostart_chain ssd_iostart_chain 1083 #define sd_iodone_chain ssd_iodone_chain 1084 #define sd_initpkt_map ssd_initpkt_map 1085 #define sd_destroypkt_map ssd_destroypkt_map 1086 #define sd_chain_type_map ssd_chain_type_map 1087 #define sd_chain_index_map ssd_chain_index_map 1088 1089 #define sd_failfast_flushctl ssd_failfast_flushctl 1090 #define sd_failfast_flushq ssd_failfast_flushq 1091 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1092 1093 #define sd_is_lsi ssd_is_lsi 1094 #define sd_tg_rdwr ssd_tg_rdwr 1095 #define sd_tg_getinfo ssd_tg_getinfo 1096 1097 #endif /* #if (defined(__fibre)) */ 1098 1099 1100 int _init(void); 1101 int _fini(void); 1102 int _info(struct modinfo *modinfop); 1103 1104 /*PRINTFLIKE3*/ 1105 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1106 /*PRINTFLIKE3*/ 1107 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1108 /*PRINTFLIKE3*/ 1109 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1110 1111 static int sdprobe(dev_info_t *devi); 1112 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1113 void **result); 1114 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1115 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1116 1117 /* 1118 * Smart probe for parallel scsi 1119 */ 1120 static void sd_scsi_probe_cache_init(void); 1121 static void sd_scsi_probe_cache_fini(void); 1122 static void sd_scsi_clear_probe_cache(void); 1123 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1124 1125 /* 1126 * Attached luns on target for parallel scsi 1127 */ 1128 static void sd_scsi_target_lun_init(void); 1129 static void sd_scsi_target_lun_fini(void); 1130 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1131 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1132 1133 static int sd_spin_up_unit(sd_ssc_t *ssc); 1134 1135 /* 1136 * Using sd_ssc_init to establish sd_ssc_t struct 1137 * Using sd_ssc_send to send uscsi internal command 1138 * Using sd_ssc_fini to free sd_ssc_t struct 1139 */ 1140 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1141 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1142 int flag, enum uio_seg dataspace, int path_flag); 1143 static void sd_ssc_fini(sd_ssc_t *ssc); 1144 1145 /* 1146 * Using sd_ssc_assessment to set correct type-of-assessment 1147 * Using sd_ssc_post to post ereport & system log 1148 * sd_ssc_post will call sd_ssc_print to print system log 1149 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1150 */ 1151 static void sd_ssc_assessment(sd_ssc_t *ssc, 1152 enum sd_type_assessment tp_assess); 1153 1154 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1155 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1156 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1157 enum sd_driver_assessment drv_assess); 1158 1159 /* 1160 * Using sd_ssc_set_info to mark an un-decodable-data error. 1161 * Using sd_ssc_extract_info to transfer information from internal 1162 * data structures to sd_ssc_t. 1163 */ 1164 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1165 const char *fmt, ...); 1166 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1167 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1168 1169 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1170 enum uio_seg dataspace, int path_flag); 1171 1172 #ifdef _LP64 1173 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1174 static void sd_reenable_dsense_task(void *arg); 1175 #endif /* _LP64 */ 1176 1177 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1178 1179 static void sd_read_unit_properties(struct sd_lun *un); 1180 static int sd_process_sdconf_file(struct sd_lun *un); 1181 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1182 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1183 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1184 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1185 int *data_list, sd_tunables *values); 1186 static void sd_process_sdconf_table(struct sd_lun *un); 1187 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1188 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1189 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1190 int list_len, char *dataname_ptr); 1191 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1192 sd_tunables *prop_list); 1193 1194 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1195 int reservation_flag); 1196 static int sd_get_devid(sd_ssc_t *ssc); 1197 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1198 static int sd_write_deviceid(sd_ssc_t *ssc); 1199 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1200 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1201 1202 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1203 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1204 1205 static int sd_ddi_suspend(dev_info_t *devi); 1206 static int sd_ddi_pm_suspend(struct sd_lun *un); 1207 static int sd_ddi_resume(dev_info_t *devi); 1208 static int sd_ddi_pm_resume(struct sd_lun *un); 1209 static int sdpower(dev_info_t *devi, int component, int level); 1210 1211 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1212 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1213 static int sd_unit_attach(dev_info_t *devi); 1214 static int sd_unit_detach(dev_info_t *devi); 1215 1216 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1217 static void sd_create_errstats(struct sd_lun *un, int instance); 1218 static void sd_set_errstats(struct sd_lun *un); 1219 static void sd_set_pstats(struct sd_lun *un); 1220 1221 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1222 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1223 static int sd_send_polled_RQS(struct sd_lun *un); 1224 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1225 1226 #if (defined(__fibre)) 1227 /* 1228 * Event callbacks (photon) 1229 */ 1230 static void sd_init_event_callbacks(struct sd_lun *un); 1231 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1232 #endif 1233 1234 /* 1235 * Defines for sd_cache_control 1236 */ 1237 1238 #define SD_CACHE_ENABLE 1 1239 #define SD_CACHE_DISABLE 0 1240 #define SD_CACHE_NOCHANGE -1 1241 1242 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1243 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1244 static void sd_get_nv_sup(sd_ssc_t *ssc); 1245 static dev_t sd_make_device(dev_info_t *devi); 1246 1247 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1248 uint64_t capacity); 1249 1250 /* 1251 * Driver entry point functions. 1252 */ 1253 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1254 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1255 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1256 1257 static void sdmin(struct buf *bp); 1258 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1259 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1260 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1261 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1262 1263 static int sdstrategy(struct buf *bp); 1264 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1265 1266 /* 1267 * Function prototypes for layering functions in the iostart chain. 1268 */ 1269 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1270 struct buf *bp); 1271 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1275 struct buf *bp); 1276 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1277 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1278 1279 /* 1280 * Function prototypes for layering functions in the iodone chain. 1281 */ 1282 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1283 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1284 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1285 struct buf *bp); 1286 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1290 struct buf *bp); 1291 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1292 1293 /* 1294 * Prototypes for functions to support buf(9S) based IO. 1295 */ 1296 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1297 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1298 static void sd_destroypkt_for_buf(struct buf *); 1299 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1300 struct buf *bp, int flags, 1301 int (*callback)(caddr_t), caddr_t callback_arg, 1302 diskaddr_t lba, uint32_t blockcount); 1303 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1304 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1305 1306 /* 1307 * Prototypes for functions to support USCSI IO. 1308 */ 1309 static int sd_uscsi_strategy(struct buf *bp); 1310 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1311 static void sd_destroypkt_for_uscsi(struct buf *); 1312 1313 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1314 uchar_t chain_type, void *pktinfop); 1315 1316 static int sd_pm_entry(struct sd_lun *un); 1317 static void sd_pm_exit(struct sd_lun *un); 1318 1319 static void sd_pm_idletimeout_handler(void *arg); 1320 1321 /* 1322 * sd_core internal functions (used at the sd_core_io layer). 1323 */ 1324 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1325 static void sdintr(struct scsi_pkt *pktp); 1326 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1327 1328 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1329 enum uio_seg dataspace, int path_flag); 1330 1331 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1332 daddr_t blkno, int (*func)(struct buf *)); 1333 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1334 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1335 static void sd_bioclone_free(struct buf *bp); 1336 static void sd_shadow_buf_free(struct buf *bp); 1337 1338 static void sd_print_transport_rejected_message(struct sd_lun *un, 1339 struct sd_xbuf *xp, int code); 1340 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1341 void *arg, int code); 1342 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1343 void *arg, int code); 1344 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1345 void *arg, int code); 1346 1347 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1348 int retry_check_flag, 1349 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1350 int c), 1351 void *user_arg, int failure_code, clock_t retry_delay, 1352 void (*statp)(kstat_io_t *)); 1353 1354 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1355 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1356 1357 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1358 struct scsi_pkt *pktp); 1359 static void sd_start_retry_command(void *arg); 1360 static void sd_start_direct_priority_command(void *arg); 1361 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1362 int errcode); 1363 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1364 struct buf *bp, int errcode); 1365 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1366 static void sd_sync_with_callback(struct sd_lun *un); 1367 static int sdrunout(caddr_t arg); 1368 1369 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1370 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1371 1372 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1373 static void sd_restore_throttle(void *arg); 1374 1375 static void sd_init_cdb_limits(struct sd_lun *un); 1376 1377 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1378 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1379 1380 /* 1381 * Error handling functions 1382 */ 1383 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1388 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 1392 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1393 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1394 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1395 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1396 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1397 struct sd_xbuf *xp, size_t actual_len); 1398 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1399 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1400 1401 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1402 void *arg, int code); 1403 1404 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1405 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_not_ready(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1413 uint8_t *sense_datap, 1414 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1415 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1416 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_unit_attention(struct sd_lun *un, 1418 uint8_t *sense_datap, 1419 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1420 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1421 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1422 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1423 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1424 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1425 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 static void sd_sense_key_default(struct sd_lun *un, 1427 uint8_t *sense_datap, 1428 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1429 1430 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1431 void *arg, int flag); 1432 1433 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1434 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1435 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1436 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1437 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1438 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1439 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1441 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1442 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1443 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1445 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1446 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1447 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1448 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1449 1450 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1451 1452 static void sd_start_stop_unit_callback(void *arg); 1453 static void sd_start_stop_unit_task(void *arg); 1454 1455 static void sd_taskq_create(void); 1456 static void sd_taskq_delete(void); 1457 static void sd_target_change_task(void *arg); 1458 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1459 static void sd_media_change_task(void *arg); 1460 1461 static int sd_handle_mchange(struct sd_lun *un); 1462 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1463 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1464 uint32_t *lbap, int path_flag); 1465 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1466 uint32_t *lbap, int path_flag); 1467 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1468 int path_flag); 1469 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1470 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1471 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1472 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1473 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1474 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1475 uchar_t usr_cmd, uchar_t *usr_bufp); 1476 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1477 struct dk_callback *dkc); 1478 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1479 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, int path_flag); 1482 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1483 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1484 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1485 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1486 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1487 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1488 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1489 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1490 size_t buflen, daddr_t start_block, int path_flag); 1491 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1492 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1493 path_flag) 1494 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1495 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1496 path_flag) 1497 1498 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1499 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1500 uint16_t param_ptr, int path_flag); 1501 1502 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1503 static void sd_free_rqs(struct sd_lun *un); 1504 1505 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1506 uchar_t *data, int len, int fmt); 1507 static void sd_panic_for_res_conflict(struct sd_lun *un); 1508 1509 /* 1510 * Disk Ioctl Function Prototypes 1511 */ 1512 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1513 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1514 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1515 1516 /* 1517 * Multi-host Ioctl Prototypes 1518 */ 1519 static int sd_check_mhd(dev_t dev, int interval); 1520 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1521 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1522 static char *sd_sname(uchar_t status); 1523 static void sd_mhd_resvd_recover(void *arg); 1524 static void sd_resv_reclaim_thread(); 1525 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1526 static int sd_reserve_release(dev_t dev, int cmd); 1527 static void sd_rmv_resv_reclaim_req(dev_t dev); 1528 static void sd_mhd_reset_notify_cb(caddr_t arg); 1529 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1530 mhioc_inkeys_t *usrp, int flag); 1531 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1532 mhioc_inresvs_t *usrp, int flag); 1533 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1534 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_release(dev_t dev); 1536 static int sd_mhdioc_register_devid(dev_t dev); 1537 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1538 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1539 1540 /* 1541 * SCSI removable prototypes 1542 */ 1543 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1544 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1545 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1546 static int sr_pause_resume(dev_t dev, int mode); 1547 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1548 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1555 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1556 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1557 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1558 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1559 static int sr_eject(dev_t dev); 1560 static void sr_ejected(register struct sd_lun *un); 1561 static int sr_check_wp(dev_t dev); 1562 static int sd_check_media(dev_t dev, enum dkio_state state); 1563 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1564 static void sd_delayed_cv_broadcast(void *arg); 1565 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1566 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1567 1568 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1569 1570 /* 1571 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1572 */ 1573 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1574 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1575 static void sd_wm_cache_destructor(void *wm, void *un); 1576 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1577 daddr_t endb, ushort_t typ); 1578 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1579 daddr_t endb); 1580 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1581 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1582 static void sd_read_modify_write_task(void * arg); 1583 static int 1584 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1585 struct buf **bpp); 1586 1587 1588 /* 1589 * Function prototypes for failfast support. 1590 */ 1591 static void sd_failfast_flushq(struct sd_lun *un); 1592 static int sd_failfast_flushq_callback(struct buf *bp); 1593 1594 /* 1595 * Function prototypes to check for lsi devices 1596 */ 1597 static void sd_is_lsi(struct sd_lun *un); 1598 1599 /* 1600 * Function prototypes for partial DMA support 1601 */ 1602 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1603 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1604 1605 1606 /* Function prototypes for cmlb */ 1607 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1608 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1609 1610 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1611 1612 /* 1613 * Constants for failfast support: 1614 * 1615 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1616 * failfast processing being performed. 1617 * 1618 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1619 * failfast processing on all bufs with B_FAILFAST set. 1620 */ 1621 1622 #define SD_FAILFAST_INACTIVE 0 1623 #define SD_FAILFAST_ACTIVE 1 1624 1625 /* 1626 * Bitmask to control behavior of buf(9S) flushes when a transition to 1627 * the failfast state occurs. Optional bits include: 1628 * 1629 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1630 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1631 * be flushed. 1632 * 1633 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1634 * driver, in addition to the regular wait queue. This includes the xbuf 1635 * queues. When clear, only the driver's wait queue will be flushed. 1636 */ 1637 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1638 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1639 1640 /* 1641 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1642 * to flush all queues within the driver. 1643 */ 1644 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1645 1646 1647 /* 1648 * SD Testing Fault Injection 1649 */ 1650 #ifdef SD_FAULT_INJECTION 1651 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1652 static void sd_faultinjection(struct scsi_pkt *pktp); 1653 static void sd_injection_log(char *buf, struct sd_lun *un); 1654 #endif 1655 1656 /* 1657 * Device driver ops vector 1658 */ 1659 static struct cb_ops sd_cb_ops = { 1660 sdopen, /* open */ 1661 sdclose, /* close */ 1662 sdstrategy, /* strategy */ 1663 nodev, /* print */ 1664 sddump, /* dump */ 1665 sdread, /* read */ 1666 sdwrite, /* write */ 1667 sdioctl, /* ioctl */ 1668 nodev, /* devmap */ 1669 nodev, /* mmap */ 1670 nodev, /* segmap */ 1671 nochpoll, /* poll */ 1672 sd_prop_op, /* cb_prop_op */ 1673 0, /* streamtab */ 1674 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1675 CB_REV, /* cb_rev */ 1676 sdaread, /* async I/O read entry point */ 1677 sdawrite /* async I/O write entry point */ 1678 }; 1679 1680 struct dev_ops sd_ops = { 1681 DEVO_REV, /* devo_rev, */ 1682 0, /* refcnt */ 1683 sdinfo, /* info */ 1684 nulldev, /* identify */ 1685 sdprobe, /* probe */ 1686 sdattach, /* attach */ 1687 sddetach, /* detach */ 1688 nodev, /* reset */ 1689 &sd_cb_ops, /* driver operations */ 1690 NULL, /* bus operations */ 1691 sdpower, /* power */ 1692 ddi_quiesce_not_needed, /* quiesce */ 1693 }; 1694 1695 /* 1696 * This is the loadable module wrapper. 1697 */ 1698 #include <sys/modctl.h> 1699 1700 #ifndef XPV_HVM_DRIVER 1701 static struct modldrv modldrv = { 1702 &mod_driverops, /* Type of module. This one is a driver */ 1703 SD_MODULE_NAME, /* Module name. */ 1704 &sd_ops /* driver ops */ 1705 }; 1706 1707 static struct modlinkage modlinkage = { 1708 MODREV_1, &modldrv, NULL 1709 }; 1710 1711 #else /* XPV_HVM_DRIVER */ 1712 static struct modlmisc modlmisc = { 1713 &mod_miscops, /* Type of module. This one is a misc */ 1714 "HVM " SD_MODULE_NAME, /* Module name. */ 1715 }; 1716 1717 static struct modlinkage modlinkage = { 1718 MODREV_1, &modlmisc, NULL 1719 }; 1720 1721 #endif /* XPV_HVM_DRIVER */ 1722 1723 static cmlb_tg_ops_t sd_tgops = { 1724 TG_DK_OPS_VERSION_1, 1725 sd_tg_rdwr, 1726 sd_tg_getinfo 1727 }; 1728 1729 static struct scsi_asq_key_strings sd_additional_codes[] = { 1730 0x81, 0, "Logical Unit is Reserved", 1731 0x85, 0, "Audio Address Not Valid", 1732 0xb6, 0, "Media Load Mechanism Failed", 1733 0xB9, 0, "Audio Play Operation Aborted", 1734 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1735 0x53, 2, "Medium removal prevented", 1736 0x6f, 0, "Authentication failed during key exchange", 1737 0x6f, 1, "Key not present", 1738 0x6f, 2, "Key not established", 1739 0x6f, 3, "Read without proper authentication", 1740 0x6f, 4, "Mismatched region to this logical unit", 1741 0x6f, 5, "Region reset count error", 1742 0xffff, 0x0, NULL 1743 }; 1744 1745 1746 /* 1747 * Struct for passing printing information for sense data messages 1748 */ 1749 struct sd_sense_info { 1750 int ssi_severity; 1751 int ssi_pfa_flag; 1752 }; 1753 1754 /* 1755 * Table of function pointers for iostart-side routines. Separate "chains" 1756 * of layered function calls are formed by placing the function pointers 1757 * sequentially in the desired order. Functions are called according to an 1758 * incrementing table index ordering. The last function in each chain must 1759 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1760 * in the sd_iodone_chain[] array. 1761 * 1762 * Note: It may seem more natural to organize both the iostart and iodone 1763 * functions together, into an array of structures (or some similar 1764 * organization) with a common index, rather than two separate arrays which 1765 * must be maintained in synchronization. The purpose of this division is 1766 * to achieve improved performance: individual arrays allows for more 1767 * effective cache line utilization on certain platforms. 1768 */ 1769 1770 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1771 1772 1773 static sd_chain_t sd_iostart_chain[] = { 1774 1775 /* Chain for buf IO for disk drive targets (PM enabled) */ 1776 sd_mapblockaddr_iostart, /* Index: 0 */ 1777 sd_pm_iostart, /* Index: 1 */ 1778 sd_core_iostart, /* Index: 2 */ 1779 1780 /* Chain for buf IO for disk drive targets (PM disabled) */ 1781 sd_mapblockaddr_iostart, /* Index: 3 */ 1782 sd_core_iostart, /* Index: 4 */ 1783 1784 /* Chain for buf IO for removable-media targets (PM enabled) */ 1785 sd_mapblockaddr_iostart, /* Index: 5 */ 1786 sd_mapblocksize_iostart, /* Index: 6 */ 1787 sd_pm_iostart, /* Index: 7 */ 1788 sd_core_iostart, /* Index: 8 */ 1789 1790 /* Chain for buf IO for removable-media targets (PM disabled) */ 1791 sd_mapblockaddr_iostart, /* Index: 9 */ 1792 sd_mapblocksize_iostart, /* Index: 10 */ 1793 sd_core_iostart, /* Index: 11 */ 1794 1795 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1796 sd_mapblockaddr_iostart, /* Index: 12 */ 1797 sd_checksum_iostart, /* Index: 13 */ 1798 sd_pm_iostart, /* Index: 14 */ 1799 sd_core_iostart, /* Index: 15 */ 1800 1801 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1802 sd_mapblockaddr_iostart, /* Index: 16 */ 1803 sd_checksum_iostart, /* Index: 17 */ 1804 sd_core_iostart, /* Index: 18 */ 1805 1806 /* Chain for USCSI commands (all targets) */ 1807 sd_pm_iostart, /* Index: 19 */ 1808 sd_core_iostart, /* Index: 20 */ 1809 1810 /* Chain for checksumming USCSI commands (all targets) */ 1811 sd_checksum_uscsi_iostart, /* Index: 21 */ 1812 sd_pm_iostart, /* Index: 22 */ 1813 sd_core_iostart, /* Index: 23 */ 1814 1815 /* Chain for "direct" USCSI commands (all targets) */ 1816 sd_core_iostart, /* Index: 24 */ 1817 1818 /* Chain for "direct priority" USCSI commands (all targets) */ 1819 sd_core_iostart, /* Index: 25 */ 1820 }; 1821 1822 /* 1823 * Macros to locate the first function of each iostart chain in the 1824 * sd_iostart_chain[] array. These are located by the index in the array. 1825 */ 1826 #define SD_CHAIN_DISK_IOSTART 0 1827 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1828 #define SD_CHAIN_RMMEDIA_IOSTART 5 1829 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1830 #define SD_CHAIN_CHKSUM_IOSTART 12 1831 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1832 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1833 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1834 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1835 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1836 1837 1838 /* 1839 * Table of function pointers for the iodone-side routines for the driver- 1840 * internal layering mechanism. The calling sequence for iodone routines 1841 * uses a decrementing table index, so the last routine called in a chain 1842 * must be at the lowest array index location for that chain. The last 1843 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1844 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1845 * of the functions in an iodone side chain must correspond to the ordering 1846 * of the iostart routines for that chain. Note that there is no iodone 1847 * side routine that corresponds to sd_core_iostart(), so there is no 1848 * entry in the table for this. 1849 */ 1850 1851 static sd_chain_t sd_iodone_chain[] = { 1852 1853 /* Chain for buf IO for disk drive targets (PM enabled) */ 1854 sd_buf_iodone, /* Index: 0 */ 1855 sd_mapblockaddr_iodone, /* Index: 1 */ 1856 sd_pm_iodone, /* Index: 2 */ 1857 1858 /* Chain for buf IO for disk drive targets (PM disabled) */ 1859 sd_buf_iodone, /* Index: 3 */ 1860 sd_mapblockaddr_iodone, /* Index: 4 */ 1861 1862 /* Chain for buf IO for removable-media targets (PM enabled) */ 1863 sd_buf_iodone, /* Index: 5 */ 1864 sd_mapblockaddr_iodone, /* Index: 6 */ 1865 sd_mapblocksize_iodone, /* Index: 7 */ 1866 sd_pm_iodone, /* Index: 8 */ 1867 1868 /* Chain for buf IO for removable-media targets (PM disabled) */ 1869 sd_buf_iodone, /* Index: 9 */ 1870 sd_mapblockaddr_iodone, /* Index: 10 */ 1871 sd_mapblocksize_iodone, /* Index: 11 */ 1872 1873 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1874 sd_buf_iodone, /* Index: 12 */ 1875 sd_mapblockaddr_iodone, /* Index: 13 */ 1876 sd_checksum_iodone, /* Index: 14 */ 1877 sd_pm_iodone, /* Index: 15 */ 1878 1879 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1880 sd_buf_iodone, /* Index: 16 */ 1881 sd_mapblockaddr_iodone, /* Index: 17 */ 1882 sd_checksum_iodone, /* Index: 18 */ 1883 1884 /* Chain for USCSI commands (non-checksum targets) */ 1885 sd_uscsi_iodone, /* Index: 19 */ 1886 sd_pm_iodone, /* Index: 20 */ 1887 1888 /* Chain for USCSI commands (checksum targets) */ 1889 sd_uscsi_iodone, /* Index: 21 */ 1890 sd_checksum_uscsi_iodone, /* Index: 22 */ 1891 sd_pm_iodone, /* Index: 22 */ 1892 1893 /* Chain for "direct" USCSI commands (all targets) */ 1894 sd_uscsi_iodone, /* Index: 24 */ 1895 1896 /* Chain for "direct priority" USCSI commands (all targets) */ 1897 sd_uscsi_iodone, /* Index: 25 */ 1898 }; 1899 1900 1901 /* 1902 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1903 * each iodone-side chain. These are located by the array index, but as the 1904 * iodone side functions are called in a decrementing-index order, the 1905 * highest index number in each chain must be specified (as these correspond 1906 * to the first function in the iodone chain that will be called by the core 1907 * at IO completion time). 1908 */ 1909 1910 #define SD_CHAIN_DISK_IODONE 2 1911 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1912 #define SD_CHAIN_RMMEDIA_IODONE 8 1913 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1914 #define SD_CHAIN_CHKSUM_IODONE 15 1915 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1916 #define SD_CHAIN_USCSI_CMD_IODONE 20 1917 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1918 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1919 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1920 1921 1922 1923 1924 /* 1925 * Array to map a layering chain index to the appropriate initpkt routine. 1926 * The redundant entries are present so that the index used for accessing 1927 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1928 * with this table as well. 1929 */ 1930 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1931 1932 static sd_initpkt_t sd_initpkt_map[] = { 1933 1934 /* Chain for buf IO for disk drive targets (PM enabled) */ 1935 sd_initpkt_for_buf, /* Index: 0 */ 1936 sd_initpkt_for_buf, /* Index: 1 */ 1937 sd_initpkt_for_buf, /* Index: 2 */ 1938 1939 /* Chain for buf IO for disk drive targets (PM disabled) */ 1940 sd_initpkt_for_buf, /* Index: 3 */ 1941 sd_initpkt_for_buf, /* Index: 4 */ 1942 1943 /* Chain for buf IO for removable-media targets (PM enabled) */ 1944 sd_initpkt_for_buf, /* Index: 5 */ 1945 sd_initpkt_for_buf, /* Index: 6 */ 1946 sd_initpkt_for_buf, /* Index: 7 */ 1947 sd_initpkt_for_buf, /* Index: 8 */ 1948 1949 /* Chain for buf IO for removable-media targets (PM disabled) */ 1950 sd_initpkt_for_buf, /* Index: 9 */ 1951 sd_initpkt_for_buf, /* Index: 10 */ 1952 sd_initpkt_for_buf, /* Index: 11 */ 1953 1954 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1955 sd_initpkt_for_buf, /* Index: 12 */ 1956 sd_initpkt_for_buf, /* Index: 13 */ 1957 sd_initpkt_for_buf, /* Index: 14 */ 1958 sd_initpkt_for_buf, /* Index: 15 */ 1959 1960 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1961 sd_initpkt_for_buf, /* Index: 16 */ 1962 sd_initpkt_for_buf, /* Index: 17 */ 1963 sd_initpkt_for_buf, /* Index: 18 */ 1964 1965 /* Chain for USCSI commands (non-checksum targets) */ 1966 sd_initpkt_for_uscsi, /* Index: 19 */ 1967 sd_initpkt_for_uscsi, /* Index: 20 */ 1968 1969 /* Chain for USCSI commands (checksum targets) */ 1970 sd_initpkt_for_uscsi, /* Index: 21 */ 1971 sd_initpkt_for_uscsi, /* Index: 22 */ 1972 sd_initpkt_for_uscsi, /* Index: 22 */ 1973 1974 /* Chain for "direct" USCSI commands (all targets) */ 1975 sd_initpkt_for_uscsi, /* Index: 24 */ 1976 1977 /* Chain for "direct priority" USCSI commands (all targets) */ 1978 sd_initpkt_for_uscsi, /* Index: 25 */ 1979 1980 }; 1981 1982 1983 /* 1984 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1985 * The redundant entries are present so that the index used for accessing 1986 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1987 * with this table as well. 1988 */ 1989 typedef void (*sd_destroypkt_t)(struct buf *); 1990 1991 static sd_destroypkt_t sd_destroypkt_map[] = { 1992 1993 /* Chain for buf IO for disk drive targets (PM enabled) */ 1994 sd_destroypkt_for_buf, /* Index: 0 */ 1995 sd_destroypkt_for_buf, /* Index: 1 */ 1996 sd_destroypkt_for_buf, /* Index: 2 */ 1997 1998 /* Chain for buf IO for disk drive targets (PM disabled) */ 1999 sd_destroypkt_for_buf, /* Index: 3 */ 2000 sd_destroypkt_for_buf, /* Index: 4 */ 2001 2002 /* Chain for buf IO for removable-media targets (PM enabled) */ 2003 sd_destroypkt_for_buf, /* Index: 5 */ 2004 sd_destroypkt_for_buf, /* Index: 6 */ 2005 sd_destroypkt_for_buf, /* Index: 7 */ 2006 sd_destroypkt_for_buf, /* Index: 8 */ 2007 2008 /* Chain for buf IO for removable-media targets (PM disabled) */ 2009 sd_destroypkt_for_buf, /* Index: 9 */ 2010 sd_destroypkt_for_buf, /* Index: 10 */ 2011 sd_destroypkt_for_buf, /* Index: 11 */ 2012 2013 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2014 sd_destroypkt_for_buf, /* Index: 12 */ 2015 sd_destroypkt_for_buf, /* Index: 13 */ 2016 sd_destroypkt_for_buf, /* Index: 14 */ 2017 sd_destroypkt_for_buf, /* Index: 15 */ 2018 2019 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2020 sd_destroypkt_for_buf, /* Index: 16 */ 2021 sd_destroypkt_for_buf, /* Index: 17 */ 2022 sd_destroypkt_for_buf, /* Index: 18 */ 2023 2024 /* Chain for USCSI commands (non-checksum targets) */ 2025 sd_destroypkt_for_uscsi, /* Index: 19 */ 2026 sd_destroypkt_for_uscsi, /* Index: 20 */ 2027 2028 /* Chain for USCSI commands (checksum targets) */ 2029 sd_destroypkt_for_uscsi, /* Index: 21 */ 2030 sd_destroypkt_for_uscsi, /* Index: 22 */ 2031 sd_destroypkt_for_uscsi, /* Index: 22 */ 2032 2033 /* Chain for "direct" USCSI commands (all targets) */ 2034 sd_destroypkt_for_uscsi, /* Index: 24 */ 2035 2036 /* Chain for "direct priority" USCSI commands (all targets) */ 2037 sd_destroypkt_for_uscsi, /* Index: 25 */ 2038 2039 }; 2040 2041 2042 2043 /* 2044 * Array to map a layering chain index to the appropriate chain "type". 2045 * The chain type indicates a specific property/usage of the chain. 2046 * The redundant entries are present so that the index used for accessing 2047 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2048 * with this table as well. 2049 */ 2050 2051 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2052 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2053 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2054 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2055 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2056 /* (for error recovery) */ 2057 2058 static int sd_chain_type_map[] = { 2059 2060 /* Chain for buf IO for disk drive targets (PM enabled) */ 2061 SD_CHAIN_BUFIO, /* Index: 0 */ 2062 SD_CHAIN_BUFIO, /* Index: 1 */ 2063 SD_CHAIN_BUFIO, /* Index: 2 */ 2064 2065 /* Chain for buf IO for disk drive targets (PM disabled) */ 2066 SD_CHAIN_BUFIO, /* Index: 3 */ 2067 SD_CHAIN_BUFIO, /* Index: 4 */ 2068 2069 /* Chain for buf IO for removable-media targets (PM enabled) */ 2070 SD_CHAIN_BUFIO, /* Index: 5 */ 2071 SD_CHAIN_BUFIO, /* Index: 6 */ 2072 SD_CHAIN_BUFIO, /* Index: 7 */ 2073 SD_CHAIN_BUFIO, /* Index: 8 */ 2074 2075 /* Chain for buf IO for removable-media targets (PM disabled) */ 2076 SD_CHAIN_BUFIO, /* Index: 9 */ 2077 SD_CHAIN_BUFIO, /* Index: 10 */ 2078 SD_CHAIN_BUFIO, /* Index: 11 */ 2079 2080 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2081 SD_CHAIN_BUFIO, /* Index: 12 */ 2082 SD_CHAIN_BUFIO, /* Index: 13 */ 2083 SD_CHAIN_BUFIO, /* Index: 14 */ 2084 SD_CHAIN_BUFIO, /* Index: 15 */ 2085 2086 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2087 SD_CHAIN_BUFIO, /* Index: 16 */ 2088 SD_CHAIN_BUFIO, /* Index: 17 */ 2089 SD_CHAIN_BUFIO, /* Index: 18 */ 2090 2091 /* Chain for USCSI commands (non-checksum targets) */ 2092 SD_CHAIN_USCSI, /* Index: 19 */ 2093 SD_CHAIN_USCSI, /* Index: 20 */ 2094 2095 /* Chain for USCSI commands (checksum targets) */ 2096 SD_CHAIN_USCSI, /* Index: 21 */ 2097 SD_CHAIN_USCSI, /* Index: 22 */ 2098 SD_CHAIN_USCSI, /* Index: 22 */ 2099 2100 /* Chain for "direct" USCSI commands (all targets) */ 2101 SD_CHAIN_DIRECT, /* Index: 24 */ 2102 2103 /* Chain for "direct priority" USCSI commands (all targets) */ 2104 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2105 }; 2106 2107 2108 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2109 #define SD_IS_BUFIO(xp) \ 2110 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2111 2112 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2113 #define SD_IS_DIRECT_PRIORITY(xp) \ 2114 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2115 2116 2117 2118 /* 2119 * Struct, array, and macros to map a specific chain to the appropriate 2120 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2121 * 2122 * The sd_chain_index_map[] array is used at attach time to set the various 2123 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2124 * chain to be used with the instance. This allows different instances to use 2125 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2126 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2127 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2128 * dynamically & without the use of locking; and (2) a layer may update the 2129 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2130 * to allow for deferred processing of an IO within the same chain from a 2131 * different execution context. 2132 */ 2133 2134 struct sd_chain_index { 2135 int sci_iostart_index; 2136 int sci_iodone_index; 2137 }; 2138 2139 static struct sd_chain_index sd_chain_index_map[] = { 2140 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2141 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2142 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2143 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2144 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2145 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2146 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2147 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2148 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2149 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2150 }; 2151 2152 2153 /* 2154 * The following are indexes into the sd_chain_index_map[] array. 2155 */ 2156 2157 /* un->un_buf_chain_type must be set to one of these */ 2158 #define SD_CHAIN_INFO_DISK 0 2159 #define SD_CHAIN_INFO_DISK_NO_PM 1 2160 #define SD_CHAIN_INFO_RMMEDIA 2 2161 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2162 #define SD_CHAIN_INFO_CHKSUM 4 2163 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2164 2165 /* un->un_uscsi_chain_type must be set to one of these */ 2166 #define SD_CHAIN_INFO_USCSI_CMD 6 2167 /* USCSI with PM disabled is the same as DIRECT */ 2168 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2169 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2170 2171 /* un->un_direct_chain_type must be set to one of these */ 2172 #define SD_CHAIN_INFO_DIRECT_CMD 8 2173 2174 /* un->un_priority_chain_type must be set to one of these */ 2175 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2176 2177 /* size for devid inquiries */ 2178 #define MAX_INQUIRY_SIZE 0xF0 2179 2180 /* 2181 * Macros used by functions to pass a given buf(9S) struct along to the 2182 * next function in the layering chain for further processing. 2183 * 2184 * In the following macros, passing more than three arguments to the called 2185 * routines causes the optimizer for the SPARC compiler to stop doing tail 2186 * call elimination which results in significant performance degradation. 2187 */ 2188 #define SD_BEGIN_IOSTART(index, un, bp) \ 2189 ((*(sd_iostart_chain[index]))(index, un, bp)) 2190 2191 #define SD_BEGIN_IODONE(index, un, bp) \ 2192 ((*(sd_iodone_chain[index]))(index, un, bp)) 2193 2194 #define SD_NEXT_IOSTART(index, un, bp) \ 2195 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2196 2197 #define SD_NEXT_IODONE(index, un, bp) \ 2198 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2199 2200 /* 2201 * Function: _init 2202 * 2203 * Description: This is the driver _init(9E) entry point. 2204 * 2205 * Return Code: Returns the value from mod_install(9F) or 2206 * ddi_soft_state_init(9F) as appropriate. 2207 * 2208 * Context: Called when driver module loaded. 2209 */ 2210 2211 int 2212 _init(void) 2213 { 2214 int err; 2215 2216 /* establish driver name from module name */ 2217 sd_label = (char *)mod_modname(&modlinkage); 2218 2219 #ifndef XPV_HVM_DRIVER 2220 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2221 SD_MAXUNIT); 2222 if (err != 0) { 2223 return (err); 2224 } 2225 2226 #else /* XPV_HVM_DRIVER */ 2227 /* Remove the leading "hvm_" from the module name */ 2228 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2229 sd_label += strlen("hvm_"); 2230 2231 #endif /* XPV_HVM_DRIVER */ 2232 2233 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2234 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2235 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2236 2237 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2238 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2239 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2240 2241 /* 2242 * it's ok to init here even for fibre device 2243 */ 2244 sd_scsi_probe_cache_init(); 2245 2246 sd_scsi_target_lun_init(); 2247 2248 /* 2249 * Creating taskq before mod_install ensures that all callers (threads) 2250 * that enter the module after a successful mod_install encounter 2251 * a valid taskq. 2252 */ 2253 sd_taskq_create(); 2254 2255 err = mod_install(&modlinkage); 2256 if (err != 0) { 2257 /* delete taskq if install fails */ 2258 sd_taskq_delete(); 2259 2260 mutex_destroy(&sd_detach_mutex); 2261 mutex_destroy(&sd_log_mutex); 2262 mutex_destroy(&sd_label_mutex); 2263 2264 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2265 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2266 cv_destroy(&sd_tr.srq_inprocess_cv); 2267 2268 sd_scsi_probe_cache_fini(); 2269 2270 sd_scsi_target_lun_fini(); 2271 2272 #ifndef XPV_HVM_DRIVER 2273 ddi_soft_state_fini(&sd_state); 2274 #endif /* !XPV_HVM_DRIVER */ 2275 return (err); 2276 } 2277 2278 return (err); 2279 } 2280 2281 2282 /* 2283 * Function: _fini 2284 * 2285 * Description: This is the driver _fini(9E) entry point. 2286 * 2287 * Return Code: Returns the value from mod_remove(9F) 2288 * 2289 * Context: Called when driver module is unloaded. 2290 */ 2291 2292 int 2293 _fini(void) 2294 { 2295 int err; 2296 2297 if ((err = mod_remove(&modlinkage)) != 0) { 2298 return (err); 2299 } 2300 2301 sd_taskq_delete(); 2302 2303 mutex_destroy(&sd_detach_mutex); 2304 mutex_destroy(&sd_log_mutex); 2305 mutex_destroy(&sd_label_mutex); 2306 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2307 2308 sd_scsi_probe_cache_fini(); 2309 2310 sd_scsi_target_lun_fini(); 2311 2312 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2313 cv_destroy(&sd_tr.srq_inprocess_cv); 2314 2315 #ifndef XPV_HVM_DRIVER 2316 ddi_soft_state_fini(&sd_state); 2317 #endif /* !XPV_HVM_DRIVER */ 2318 2319 return (err); 2320 } 2321 2322 2323 /* 2324 * Function: _info 2325 * 2326 * Description: This is the driver _info(9E) entry point. 2327 * 2328 * Arguments: modinfop - pointer to the driver modinfo structure 2329 * 2330 * Return Code: Returns the value from mod_info(9F). 2331 * 2332 * Context: Kernel thread context 2333 */ 2334 2335 int 2336 _info(struct modinfo *modinfop) 2337 { 2338 return (mod_info(&modlinkage, modinfop)); 2339 } 2340 2341 2342 /* 2343 * The following routines implement the driver message logging facility. 2344 * They provide component- and level- based debug output filtering. 2345 * Output may also be restricted to messages for a single instance by 2346 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2347 * to NULL, then messages for all instances are printed. 2348 * 2349 * These routines have been cloned from each other due to the language 2350 * constraints of macros and variable argument list processing. 2351 */ 2352 2353 2354 /* 2355 * Function: sd_log_err 2356 * 2357 * Description: This routine is called by the SD_ERROR macro for debug 2358 * logging of error conditions. 2359 * 2360 * Arguments: comp - driver component being logged 2361 * dev - pointer to driver info structure 2362 * fmt - error string and format to be logged 2363 */ 2364 2365 static void 2366 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2367 { 2368 va_list ap; 2369 dev_info_t *dev; 2370 2371 ASSERT(un != NULL); 2372 dev = SD_DEVINFO(un); 2373 ASSERT(dev != NULL); 2374 2375 /* 2376 * Filter messages based on the global component and level masks. 2377 * Also print if un matches the value of sd_debug_un, or if 2378 * sd_debug_un is set to NULL. 2379 */ 2380 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2381 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2382 mutex_enter(&sd_log_mutex); 2383 va_start(ap, fmt); 2384 (void) vsprintf(sd_log_buf, fmt, ap); 2385 va_end(ap); 2386 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2387 mutex_exit(&sd_log_mutex); 2388 } 2389 #ifdef SD_FAULT_INJECTION 2390 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2391 if (un->sd_injection_mask & comp) { 2392 mutex_enter(&sd_log_mutex); 2393 va_start(ap, fmt); 2394 (void) vsprintf(sd_log_buf, fmt, ap); 2395 va_end(ap); 2396 sd_injection_log(sd_log_buf, un); 2397 mutex_exit(&sd_log_mutex); 2398 } 2399 #endif 2400 } 2401 2402 2403 /* 2404 * Function: sd_log_info 2405 * 2406 * Description: This routine is called by the SD_INFO macro for debug 2407 * logging of general purpose informational conditions. 2408 * 2409 * Arguments: comp - driver component being logged 2410 * dev - pointer to driver info structure 2411 * fmt - info string and format to be logged 2412 */ 2413 2414 static void 2415 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2416 { 2417 va_list ap; 2418 dev_info_t *dev; 2419 2420 ASSERT(un != NULL); 2421 dev = SD_DEVINFO(un); 2422 ASSERT(dev != NULL); 2423 2424 /* 2425 * Filter messages based on the global component and level masks. 2426 * Also print if un matches the value of sd_debug_un, or if 2427 * sd_debug_un is set to NULL. 2428 */ 2429 if ((sd_component_mask & component) && 2430 (sd_level_mask & SD_LOGMASK_INFO) && 2431 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2432 mutex_enter(&sd_log_mutex); 2433 va_start(ap, fmt); 2434 (void) vsprintf(sd_log_buf, fmt, ap); 2435 va_end(ap); 2436 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2437 mutex_exit(&sd_log_mutex); 2438 } 2439 #ifdef SD_FAULT_INJECTION 2440 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2441 if (un->sd_injection_mask & component) { 2442 mutex_enter(&sd_log_mutex); 2443 va_start(ap, fmt); 2444 (void) vsprintf(sd_log_buf, fmt, ap); 2445 va_end(ap); 2446 sd_injection_log(sd_log_buf, un); 2447 mutex_exit(&sd_log_mutex); 2448 } 2449 #endif 2450 } 2451 2452 2453 /* 2454 * Function: sd_log_trace 2455 * 2456 * Description: This routine is called by the SD_TRACE macro for debug 2457 * logging of trace conditions (i.e. function entry/exit). 2458 * 2459 * Arguments: comp - driver component being logged 2460 * dev - pointer to driver info structure 2461 * fmt - trace string and format to be logged 2462 */ 2463 2464 static void 2465 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2466 { 2467 va_list ap; 2468 dev_info_t *dev; 2469 2470 ASSERT(un != NULL); 2471 dev = SD_DEVINFO(un); 2472 ASSERT(dev != NULL); 2473 2474 /* 2475 * Filter messages based on the global component and level masks. 2476 * Also print if un matches the value of sd_debug_un, or if 2477 * sd_debug_un is set to NULL. 2478 */ 2479 if ((sd_component_mask & component) && 2480 (sd_level_mask & SD_LOGMASK_TRACE) && 2481 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2482 mutex_enter(&sd_log_mutex); 2483 va_start(ap, fmt); 2484 (void) vsprintf(sd_log_buf, fmt, ap); 2485 va_end(ap); 2486 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2487 mutex_exit(&sd_log_mutex); 2488 } 2489 #ifdef SD_FAULT_INJECTION 2490 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2491 if (un->sd_injection_mask & component) { 2492 mutex_enter(&sd_log_mutex); 2493 va_start(ap, fmt); 2494 (void) vsprintf(sd_log_buf, fmt, ap); 2495 va_end(ap); 2496 sd_injection_log(sd_log_buf, un); 2497 mutex_exit(&sd_log_mutex); 2498 } 2499 #endif 2500 } 2501 2502 2503 /* 2504 * Function: sdprobe 2505 * 2506 * Description: This is the driver probe(9e) entry point function. 2507 * 2508 * Arguments: devi - opaque device info handle 2509 * 2510 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2511 * DDI_PROBE_FAILURE: If the probe failed. 2512 * DDI_PROBE_PARTIAL: If the instance is not present now, 2513 * but may be present in the future. 2514 */ 2515 2516 static int 2517 sdprobe(dev_info_t *devi) 2518 { 2519 struct scsi_device *devp; 2520 int rval; 2521 #ifndef XPV_HVM_DRIVER 2522 int instance = ddi_get_instance(devi); 2523 #endif /* !XPV_HVM_DRIVER */ 2524 2525 /* 2526 * if it wasn't for pln, sdprobe could actually be nulldev 2527 * in the "__fibre" case. 2528 */ 2529 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2530 return (DDI_PROBE_DONTCARE); 2531 } 2532 2533 devp = ddi_get_driver_private(devi); 2534 2535 if (devp == NULL) { 2536 /* Ooops... nexus driver is mis-configured... */ 2537 return (DDI_PROBE_FAILURE); 2538 } 2539 2540 #ifndef XPV_HVM_DRIVER 2541 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2542 return (DDI_PROBE_PARTIAL); 2543 } 2544 #endif /* !XPV_HVM_DRIVER */ 2545 2546 /* 2547 * Call the SCSA utility probe routine to see if we actually 2548 * have a target at this SCSI nexus. 2549 */ 2550 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2551 case SCSIPROBE_EXISTS: 2552 switch (devp->sd_inq->inq_dtype) { 2553 case DTYPE_DIRECT: 2554 rval = DDI_PROBE_SUCCESS; 2555 break; 2556 case DTYPE_RODIRECT: 2557 /* CDs etc. Can be removable media */ 2558 rval = DDI_PROBE_SUCCESS; 2559 break; 2560 case DTYPE_OPTICAL: 2561 /* 2562 * Rewritable optical driver HP115AA 2563 * Can also be removable media 2564 */ 2565 2566 /* 2567 * Do not attempt to bind to DTYPE_OPTICAL if 2568 * pre solaris 9 sparc sd behavior is required 2569 * 2570 * If first time through and sd_dtype_optical_bind 2571 * has not been set in /etc/system check properties 2572 */ 2573 2574 if (sd_dtype_optical_bind < 0) { 2575 sd_dtype_optical_bind = ddi_prop_get_int 2576 (DDI_DEV_T_ANY, devi, 0, 2577 "optical-device-bind", 1); 2578 } 2579 2580 if (sd_dtype_optical_bind == 0) { 2581 rval = DDI_PROBE_FAILURE; 2582 } else { 2583 rval = DDI_PROBE_SUCCESS; 2584 } 2585 break; 2586 2587 case DTYPE_NOTPRESENT: 2588 default: 2589 rval = DDI_PROBE_FAILURE; 2590 break; 2591 } 2592 break; 2593 default: 2594 rval = DDI_PROBE_PARTIAL; 2595 break; 2596 } 2597 2598 /* 2599 * This routine checks for resource allocation prior to freeing, 2600 * so it will take care of the "smart probing" case where a 2601 * scsi_probe() may or may not have been issued and will *not* 2602 * free previously-freed resources. 2603 */ 2604 scsi_unprobe(devp); 2605 return (rval); 2606 } 2607 2608 2609 /* 2610 * Function: sdinfo 2611 * 2612 * Description: This is the driver getinfo(9e) entry point function. 2613 * Given the device number, return the devinfo pointer from 2614 * the scsi_device structure or the instance number 2615 * associated with the dev_t. 2616 * 2617 * Arguments: dip - pointer to device info structure 2618 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2619 * DDI_INFO_DEVT2INSTANCE) 2620 * arg - driver dev_t 2621 * resultp - user buffer for request response 2622 * 2623 * Return Code: DDI_SUCCESS 2624 * DDI_FAILURE 2625 */ 2626 /* ARGSUSED */ 2627 static int 2628 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2629 { 2630 struct sd_lun *un; 2631 dev_t dev; 2632 int instance; 2633 int error; 2634 2635 switch (infocmd) { 2636 case DDI_INFO_DEVT2DEVINFO: 2637 dev = (dev_t)arg; 2638 instance = SDUNIT(dev); 2639 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2640 return (DDI_FAILURE); 2641 } 2642 *result = (void *) SD_DEVINFO(un); 2643 error = DDI_SUCCESS; 2644 break; 2645 case DDI_INFO_DEVT2INSTANCE: 2646 dev = (dev_t)arg; 2647 instance = SDUNIT(dev); 2648 *result = (void *)(uintptr_t)instance; 2649 error = DDI_SUCCESS; 2650 break; 2651 default: 2652 error = DDI_FAILURE; 2653 } 2654 return (error); 2655 } 2656 2657 /* 2658 * Function: sd_prop_op 2659 * 2660 * Description: This is the driver prop_op(9e) entry point function. 2661 * Return the number of blocks for the partition in question 2662 * or forward the request to the property facilities. 2663 * 2664 * Arguments: dev - device number 2665 * dip - pointer to device info structure 2666 * prop_op - property operator 2667 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2668 * name - pointer to property name 2669 * valuep - pointer or address of the user buffer 2670 * lengthp - property length 2671 * 2672 * Return Code: DDI_PROP_SUCCESS 2673 * DDI_PROP_NOT_FOUND 2674 * DDI_PROP_UNDEFINED 2675 * DDI_PROP_NO_MEMORY 2676 * DDI_PROP_BUF_TOO_SMALL 2677 */ 2678 2679 static int 2680 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2681 char *name, caddr_t valuep, int *lengthp) 2682 { 2683 struct sd_lun *un; 2684 2685 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2686 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2687 name, valuep, lengthp)); 2688 2689 return (cmlb_prop_op(un->un_cmlbhandle, 2690 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2691 SDPART(dev), (void *)SD_PATH_DIRECT)); 2692 } 2693 2694 /* 2695 * The following functions are for smart probing: 2696 * sd_scsi_probe_cache_init() 2697 * sd_scsi_probe_cache_fini() 2698 * sd_scsi_clear_probe_cache() 2699 * sd_scsi_probe_with_cache() 2700 */ 2701 2702 /* 2703 * Function: sd_scsi_probe_cache_init 2704 * 2705 * Description: Initializes the probe response cache mutex and head pointer. 2706 * 2707 * Context: Kernel thread context 2708 */ 2709 2710 static void 2711 sd_scsi_probe_cache_init(void) 2712 { 2713 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2714 sd_scsi_probe_cache_head = NULL; 2715 } 2716 2717 2718 /* 2719 * Function: sd_scsi_probe_cache_fini 2720 * 2721 * Description: Frees all resources associated with the probe response cache. 2722 * 2723 * Context: Kernel thread context 2724 */ 2725 2726 static void 2727 sd_scsi_probe_cache_fini(void) 2728 { 2729 struct sd_scsi_probe_cache *cp; 2730 struct sd_scsi_probe_cache *ncp; 2731 2732 /* Clean up our smart probing linked list */ 2733 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2734 ncp = cp->next; 2735 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2736 } 2737 sd_scsi_probe_cache_head = NULL; 2738 mutex_destroy(&sd_scsi_probe_cache_mutex); 2739 } 2740 2741 2742 /* 2743 * Function: sd_scsi_clear_probe_cache 2744 * 2745 * Description: This routine clears the probe response cache. This is 2746 * done when open() returns ENXIO so that when deferred 2747 * attach is attempted (possibly after a device has been 2748 * turned on) we will retry the probe. Since we don't know 2749 * which target we failed to open, we just clear the 2750 * entire cache. 2751 * 2752 * Context: Kernel thread context 2753 */ 2754 2755 static void 2756 sd_scsi_clear_probe_cache(void) 2757 { 2758 struct sd_scsi_probe_cache *cp; 2759 int i; 2760 2761 mutex_enter(&sd_scsi_probe_cache_mutex); 2762 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2763 /* 2764 * Reset all entries to SCSIPROBE_EXISTS. This will 2765 * force probing to be performed the next time 2766 * sd_scsi_probe_with_cache is called. 2767 */ 2768 for (i = 0; i < NTARGETS_WIDE; i++) { 2769 cp->cache[i] = SCSIPROBE_EXISTS; 2770 } 2771 } 2772 mutex_exit(&sd_scsi_probe_cache_mutex); 2773 } 2774 2775 2776 /* 2777 * Function: sd_scsi_probe_with_cache 2778 * 2779 * Description: This routine implements support for a scsi device probe 2780 * with cache. The driver maintains a cache of the target 2781 * responses to scsi probes. If we get no response from a 2782 * target during a probe inquiry, we remember that, and we 2783 * avoid additional calls to scsi_probe on non-zero LUNs 2784 * on the same target until the cache is cleared. By doing 2785 * so we avoid the 1/4 sec selection timeout for nonzero 2786 * LUNs. lun0 of a target is always probed. 2787 * 2788 * Arguments: devp - Pointer to a scsi_device(9S) structure 2789 * waitfunc - indicates what the allocator routines should 2790 * do when resources are not available. This value 2791 * is passed on to scsi_probe() when that routine 2792 * is called. 2793 * 2794 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2795 * otherwise the value returned by scsi_probe(9F). 2796 * 2797 * Context: Kernel thread context 2798 */ 2799 2800 static int 2801 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2802 { 2803 struct sd_scsi_probe_cache *cp; 2804 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2805 int lun, tgt; 2806 2807 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2808 SCSI_ADDR_PROP_LUN, 0); 2809 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2810 SCSI_ADDR_PROP_TARGET, -1); 2811 2812 /* Make sure caching enabled and target in range */ 2813 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2814 /* do it the old way (no cache) */ 2815 return (scsi_probe(devp, waitfn)); 2816 } 2817 2818 mutex_enter(&sd_scsi_probe_cache_mutex); 2819 2820 /* Find the cache for this scsi bus instance */ 2821 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2822 if (cp->pdip == pdip) { 2823 break; 2824 } 2825 } 2826 2827 /* If we can't find a cache for this pdip, create one */ 2828 if (cp == NULL) { 2829 int i; 2830 2831 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2832 KM_SLEEP); 2833 cp->pdip = pdip; 2834 cp->next = sd_scsi_probe_cache_head; 2835 sd_scsi_probe_cache_head = cp; 2836 for (i = 0; i < NTARGETS_WIDE; i++) { 2837 cp->cache[i] = SCSIPROBE_EXISTS; 2838 } 2839 } 2840 2841 mutex_exit(&sd_scsi_probe_cache_mutex); 2842 2843 /* Recompute the cache for this target if LUN zero */ 2844 if (lun == 0) { 2845 cp->cache[tgt] = SCSIPROBE_EXISTS; 2846 } 2847 2848 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2849 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2850 return (SCSIPROBE_NORESP); 2851 } 2852 2853 /* Do the actual probe; save & return the result */ 2854 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2855 } 2856 2857 2858 /* 2859 * Function: sd_scsi_target_lun_init 2860 * 2861 * Description: Initializes the attached lun chain mutex and head pointer. 2862 * 2863 * Context: Kernel thread context 2864 */ 2865 2866 static void 2867 sd_scsi_target_lun_init(void) 2868 { 2869 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2870 sd_scsi_target_lun_head = NULL; 2871 } 2872 2873 2874 /* 2875 * Function: sd_scsi_target_lun_fini 2876 * 2877 * Description: Frees all resources associated with the attached lun 2878 * chain 2879 * 2880 * Context: Kernel thread context 2881 */ 2882 2883 static void 2884 sd_scsi_target_lun_fini(void) 2885 { 2886 struct sd_scsi_hba_tgt_lun *cp; 2887 struct sd_scsi_hba_tgt_lun *ncp; 2888 2889 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2890 ncp = cp->next; 2891 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2892 } 2893 sd_scsi_target_lun_head = NULL; 2894 mutex_destroy(&sd_scsi_target_lun_mutex); 2895 } 2896 2897 2898 /* 2899 * Function: sd_scsi_get_target_lun_count 2900 * 2901 * Description: This routine will check in the attached lun chain to see 2902 * how many luns are attached on the required SCSI controller 2903 * and target. Currently, some capabilities like tagged queue 2904 * are supported per target based by HBA. So all luns in a 2905 * target have the same capabilities. Based on this assumption, 2906 * sd should only set these capabilities once per target. This 2907 * function is called when sd needs to decide how many luns 2908 * already attached on a target. 2909 * 2910 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2911 * controller device. 2912 * target - The target ID on the controller's SCSI bus. 2913 * 2914 * Return Code: The number of luns attached on the required target and 2915 * controller. 2916 * -1 if target ID is not in parallel SCSI scope or the given 2917 * dip is not in the chain. 2918 * 2919 * Context: Kernel thread context 2920 */ 2921 2922 static int 2923 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2924 { 2925 struct sd_scsi_hba_tgt_lun *cp; 2926 2927 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2928 return (-1); 2929 } 2930 2931 mutex_enter(&sd_scsi_target_lun_mutex); 2932 2933 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2934 if (cp->pdip == dip) { 2935 break; 2936 } 2937 } 2938 2939 mutex_exit(&sd_scsi_target_lun_mutex); 2940 2941 if (cp == NULL) { 2942 return (-1); 2943 } 2944 2945 return (cp->nlun[target]); 2946 } 2947 2948 2949 /* 2950 * Function: sd_scsi_update_lun_on_target 2951 * 2952 * Description: This routine is used to update the attached lun chain when a 2953 * lun is attached or detached on a target. 2954 * 2955 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2956 * controller device. 2957 * target - The target ID on the controller's SCSI bus. 2958 * flag - Indicate the lun is attached or detached. 2959 * 2960 * Context: Kernel thread context 2961 */ 2962 2963 static void 2964 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2965 { 2966 struct sd_scsi_hba_tgt_lun *cp; 2967 2968 mutex_enter(&sd_scsi_target_lun_mutex); 2969 2970 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2971 if (cp->pdip == dip) { 2972 break; 2973 } 2974 } 2975 2976 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2977 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2978 KM_SLEEP); 2979 cp->pdip = dip; 2980 cp->next = sd_scsi_target_lun_head; 2981 sd_scsi_target_lun_head = cp; 2982 } 2983 2984 mutex_exit(&sd_scsi_target_lun_mutex); 2985 2986 if (cp != NULL) { 2987 if (flag == SD_SCSI_LUN_ATTACH) { 2988 cp->nlun[target] ++; 2989 } else { 2990 cp->nlun[target] --; 2991 } 2992 } 2993 } 2994 2995 2996 /* 2997 * Function: sd_spin_up_unit 2998 * 2999 * Description: Issues the following commands to spin-up the device: 3000 * START STOP UNIT, and INQUIRY. 3001 * 3002 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3003 * structure for this target. 3004 * 3005 * Return Code: 0 - success 3006 * EIO - failure 3007 * EACCES - reservation conflict 3008 * 3009 * Context: Kernel thread context 3010 */ 3011 3012 static int 3013 sd_spin_up_unit(sd_ssc_t *ssc) 3014 { 3015 size_t resid = 0; 3016 int has_conflict = FALSE; 3017 uchar_t *bufaddr; 3018 int status; 3019 struct sd_lun *un; 3020 3021 ASSERT(ssc != NULL); 3022 un = ssc->ssc_un; 3023 ASSERT(un != NULL); 3024 3025 /* 3026 * Send a throwaway START UNIT command. 3027 * 3028 * If we fail on this, we don't care presently what precisely 3029 * is wrong. EMC's arrays will also fail this with a check 3030 * condition (0x2/0x4/0x3) if the device is "inactive," but 3031 * we don't want to fail the attach because it may become 3032 * "active" later. 3033 */ 3034 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3035 SD_PATH_DIRECT); 3036 3037 if (status != 0) { 3038 if (status == EACCES) 3039 has_conflict = TRUE; 3040 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3041 } 3042 3043 /* 3044 * Send another INQUIRY command to the target. This is necessary for 3045 * non-removable media direct access devices because their INQUIRY data 3046 * may not be fully qualified until they are spun up (perhaps via the 3047 * START command above). Note: This seems to be needed for some 3048 * legacy devices only.) The INQUIRY command should succeed even if a 3049 * Reservation Conflict is present. 3050 */ 3051 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3052 3053 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3054 != 0) { 3055 kmem_free(bufaddr, SUN_INQSIZE); 3056 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3057 return (EIO); 3058 } 3059 3060 /* 3061 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3062 * Note that this routine does not return a failure here even if the 3063 * INQUIRY command did not return any data. This is a legacy behavior. 3064 */ 3065 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3066 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3067 } 3068 3069 kmem_free(bufaddr, SUN_INQSIZE); 3070 3071 /* If we hit a reservation conflict above, tell the caller. */ 3072 if (has_conflict == TRUE) { 3073 return (EACCES); 3074 } 3075 3076 return (0); 3077 } 3078 3079 #ifdef _LP64 3080 /* 3081 * Function: sd_enable_descr_sense 3082 * 3083 * Description: This routine attempts to select descriptor sense format 3084 * using the Control mode page. Devices that support 64 bit 3085 * LBAs (for >2TB luns) should also implement descriptor 3086 * sense data so we will call this function whenever we see 3087 * a lun larger than 2TB. If for some reason the device 3088 * supports 64 bit LBAs but doesn't support descriptor sense 3089 * presumably the mode select will fail. Everything will 3090 * continue to work normally except that we will not get 3091 * complete sense data for commands that fail with an LBA 3092 * larger than 32 bits. 3093 * 3094 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3095 * structure for this target. 3096 * 3097 * Context: Kernel thread context only 3098 */ 3099 3100 static void 3101 sd_enable_descr_sense(sd_ssc_t *ssc) 3102 { 3103 uchar_t *header; 3104 struct mode_control_scsi3 *ctrl_bufp; 3105 size_t buflen; 3106 size_t bd_len; 3107 int status; 3108 struct sd_lun *un; 3109 3110 ASSERT(ssc != NULL); 3111 un = ssc->ssc_un; 3112 ASSERT(un != NULL); 3113 3114 /* 3115 * Read MODE SENSE page 0xA, Control Mode Page 3116 */ 3117 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3118 sizeof (struct mode_control_scsi3); 3119 header = kmem_zalloc(buflen, KM_SLEEP); 3120 3121 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3122 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3123 3124 if (status != 0) { 3125 SD_ERROR(SD_LOG_COMMON, un, 3126 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3127 goto eds_exit; 3128 } 3129 3130 /* 3131 * Determine size of Block Descriptors in order to locate 3132 * the mode page data. ATAPI devices return 0, SCSI devices 3133 * should return MODE_BLK_DESC_LENGTH. 3134 */ 3135 bd_len = ((struct mode_header *)header)->bdesc_length; 3136 3137 /* Clear the mode data length field for MODE SELECT */ 3138 ((struct mode_header *)header)->length = 0; 3139 3140 ctrl_bufp = (struct mode_control_scsi3 *) 3141 (header + MODE_HEADER_LENGTH + bd_len); 3142 3143 /* 3144 * If the page length is smaller than the expected value, 3145 * the target device doesn't support D_SENSE. Bail out here. 3146 */ 3147 if (ctrl_bufp->mode_page.length < 3148 sizeof (struct mode_control_scsi3) - 2) { 3149 SD_ERROR(SD_LOG_COMMON, un, 3150 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3151 goto eds_exit; 3152 } 3153 3154 /* 3155 * Clear PS bit for MODE SELECT 3156 */ 3157 ctrl_bufp->mode_page.ps = 0; 3158 3159 /* 3160 * Set D_SENSE to enable descriptor sense format. 3161 */ 3162 ctrl_bufp->d_sense = 1; 3163 3164 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3165 3166 /* 3167 * Use MODE SELECT to commit the change to the D_SENSE bit 3168 */ 3169 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3170 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3171 3172 if (status != 0) { 3173 SD_INFO(SD_LOG_COMMON, un, 3174 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3175 } else { 3176 kmem_free(header, buflen); 3177 return; 3178 } 3179 3180 eds_exit: 3181 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3182 kmem_free(header, buflen); 3183 } 3184 3185 /* 3186 * Function: sd_reenable_dsense_task 3187 * 3188 * Description: Re-enable descriptor sense after device or bus reset 3189 * 3190 * Context: Executes in a taskq() thread context 3191 */ 3192 static void 3193 sd_reenable_dsense_task(void *arg) 3194 { 3195 struct sd_lun *un = arg; 3196 sd_ssc_t *ssc; 3197 3198 ASSERT(un != NULL); 3199 3200 ssc = sd_ssc_init(un); 3201 sd_enable_descr_sense(ssc); 3202 sd_ssc_fini(ssc); 3203 } 3204 #endif /* _LP64 */ 3205 3206 /* 3207 * Function: sd_set_mmc_caps 3208 * 3209 * Description: This routine determines if the device is MMC compliant and if 3210 * the device supports CDDA via a mode sense of the CDVD 3211 * capabilities mode page. Also checks if the device is a 3212 * dvdram writable device. 3213 * 3214 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3215 * structure for this target. 3216 * 3217 * Context: Kernel thread context only 3218 */ 3219 3220 static void 3221 sd_set_mmc_caps(sd_ssc_t *ssc) 3222 { 3223 struct mode_header_grp2 *sense_mhp; 3224 uchar_t *sense_page; 3225 caddr_t buf; 3226 int bd_len; 3227 int status; 3228 struct uscsi_cmd com; 3229 int rtn; 3230 uchar_t *out_data_rw, *out_data_hd; 3231 uchar_t *rqbuf_rw, *rqbuf_hd; 3232 struct sd_lun *un; 3233 3234 ASSERT(ssc != NULL); 3235 un = ssc->ssc_un; 3236 ASSERT(un != NULL); 3237 3238 /* 3239 * The flags which will be set in this function are - mmc compliant, 3240 * dvdram writable device, cdda support. Initialize them to FALSE 3241 * and if a capability is detected - it will be set to TRUE. 3242 */ 3243 un->un_f_mmc_cap = FALSE; 3244 un->un_f_dvdram_writable_device = FALSE; 3245 un->un_f_cfg_cdda = FALSE; 3246 3247 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3248 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3249 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3250 3251 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3252 3253 if (status != 0) { 3254 /* command failed; just return */ 3255 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3256 return; 3257 } 3258 /* 3259 * If the mode sense request for the CDROM CAPABILITIES 3260 * page (0x2A) succeeds the device is assumed to be MMC. 3261 */ 3262 un->un_f_mmc_cap = TRUE; 3263 3264 /* Get to the page data */ 3265 sense_mhp = (struct mode_header_grp2 *)buf; 3266 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3267 sense_mhp->bdesc_length_lo; 3268 if (bd_len > MODE_BLK_DESC_LENGTH) { 3269 /* 3270 * We did not get back the expected block descriptor 3271 * length so we cannot determine if the device supports 3272 * CDDA. However, we still indicate the device is MMC 3273 * according to the successful response to the page 3274 * 0x2A mode sense request. 3275 */ 3276 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3277 "sd_set_mmc_caps: Mode Sense returned " 3278 "invalid block descriptor length\n"); 3279 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3280 return; 3281 } 3282 3283 /* See if read CDDA is supported */ 3284 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3285 bd_len); 3286 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3287 3288 /* See if writing DVD RAM is supported. */ 3289 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3290 if (un->un_f_dvdram_writable_device == TRUE) { 3291 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3292 return; 3293 } 3294 3295 /* 3296 * If the device presents DVD or CD capabilities in the mode 3297 * page, we can return here since a RRD will not have 3298 * these capabilities. 3299 */ 3300 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3301 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3302 return; 3303 } 3304 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3305 3306 /* 3307 * If un->un_f_dvdram_writable_device is still FALSE, 3308 * check for a Removable Rigid Disk (RRD). A RRD 3309 * device is identified by the features RANDOM_WRITABLE and 3310 * HARDWARE_DEFECT_MANAGEMENT. 3311 */ 3312 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3313 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3314 3315 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3316 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3317 RANDOM_WRITABLE, SD_PATH_STANDARD); 3318 3319 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3320 3321 if (rtn != 0) { 3322 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3323 kmem_free(rqbuf_rw, SENSE_LENGTH); 3324 return; 3325 } 3326 3327 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3328 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3329 3330 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3331 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3332 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3333 3334 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3335 3336 if (rtn == 0) { 3337 /* 3338 * We have good information, check for random writable 3339 * and hardware defect features. 3340 */ 3341 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3342 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3343 un->un_f_dvdram_writable_device = TRUE; 3344 } 3345 } 3346 3347 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3348 kmem_free(rqbuf_rw, SENSE_LENGTH); 3349 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3350 kmem_free(rqbuf_hd, SENSE_LENGTH); 3351 } 3352 3353 /* 3354 * Function: sd_check_for_writable_cd 3355 * 3356 * Description: This routine determines if the media in the device is 3357 * writable or not. It uses the get configuration command (0x46) 3358 * to determine if the media is writable 3359 * 3360 * Arguments: un - driver soft state (unit) structure 3361 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3362 * chain and the normal command waitq, or 3363 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3364 * "direct" chain and bypass the normal command 3365 * waitq. 3366 * 3367 * Context: Never called at interrupt context. 3368 */ 3369 3370 static void 3371 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3372 { 3373 struct uscsi_cmd com; 3374 uchar_t *out_data; 3375 uchar_t *rqbuf; 3376 int rtn; 3377 uchar_t *out_data_rw, *out_data_hd; 3378 uchar_t *rqbuf_rw, *rqbuf_hd; 3379 struct mode_header_grp2 *sense_mhp; 3380 uchar_t *sense_page; 3381 caddr_t buf; 3382 int bd_len; 3383 int status; 3384 struct sd_lun *un; 3385 3386 ASSERT(ssc != NULL); 3387 un = ssc->ssc_un; 3388 ASSERT(un != NULL); 3389 ASSERT(mutex_owned(SD_MUTEX(un))); 3390 3391 /* 3392 * Initialize the writable media to false, if configuration info. 3393 * tells us otherwise then only we will set it. 3394 */ 3395 un->un_f_mmc_writable_media = FALSE; 3396 mutex_exit(SD_MUTEX(un)); 3397 3398 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3399 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3400 3401 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3402 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3403 3404 if (rtn != 0) 3405 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3406 3407 mutex_enter(SD_MUTEX(un)); 3408 if (rtn == 0) { 3409 /* 3410 * We have good information, check for writable DVD. 3411 */ 3412 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3413 un->un_f_mmc_writable_media = TRUE; 3414 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3415 kmem_free(rqbuf, SENSE_LENGTH); 3416 return; 3417 } 3418 } 3419 3420 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3421 kmem_free(rqbuf, SENSE_LENGTH); 3422 3423 /* 3424 * Determine if this is a RRD type device. 3425 */ 3426 mutex_exit(SD_MUTEX(un)); 3427 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3428 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3429 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3430 3431 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3432 3433 mutex_enter(SD_MUTEX(un)); 3434 if (status != 0) { 3435 /* command failed; just return */ 3436 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3437 return; 3438 } 3439 3440 /* Get to the page data */ 3441 sense_mhp = (struct mode_header_grp2 *)buf; 3442 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3443 if (bd_len > MODE_BLK_DESC_LENGTH) { 3444 /* 3445 * We did not get back the expected block descriptor length so 3446 * we cannot check the mode page. 3447 */ 3448 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3449 "sd_check_for_writable_cd: Mode Sense returned " 3450 "invalid block descriptor length\n"); 3451 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3452 return; 3453 } 3454 3455 /* 3456 * If the device presents DVD or CD capabilities in the mode 3457 * page, we can return here since a RRD device will not have 3458 * these capabilities. 3459 */ 3460 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3461 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3462 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3463 return; 3464 } 3465 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3466 3467 /* 3468 * If un->un_f_mmc_writable_media is still FALSE, 3469 * check for RRD type media. A RRD device is identified 3470 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3471 */ 3472 mutex_exit(SD_MUTEX(un)); 3473 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3474 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3475 3476 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3477 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3478 RANDOM_WRITABLE, path_flag); 3479 3480 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3481 if (rtn != 0) { 3482 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3483 kmem_free(rqbuf_rw, SENSE_LENGTH); 3484 mutex_enter(SD_MUTEX(un)); 3485 return; 3486 } 3487 3488 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3489 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3490 3491 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3492 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3493 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3494 3495 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3496 mutex_enter(SD_MUTEX(un)); 3497 if (rtn == 0) { 3498 /* 3499 * We have good information, check for random writable 3500 * and hardware defect features as current. 3501 */ 3502 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3503 (out_data_rw[10] & 0x1) && 3504 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3505 (out_data_hd[10] & 0x1)) { 3506 un->un_f_mmc_writable_media = TRUE; 3507 } 3508 } 3509 3510 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3511 kmem_free(rqbuf_rw, SENSE_LENGTH); 3512 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3513 kmem_free(rqbuf_hd, SENSE_LENGTH); 3514 } 3515 3516 /* 3517 * Function: sd_read_unit_properties 3518 * 3519 * Description: The following implements a property lookup mechanism. 3520 * Properties for particular disks (keyed on vendor, model 3521 * and rev numbers) are sought in the sd.conf file via 3522 * sd_process_sdconf_file(), and if not found there, are 3523 * looked for in a list hardcoded in this driver via 3524 * sd_process_sdconf_table() Once located the properties 3525 * are used to update the driver unit structure. 3526 * 3527 * Arguments: un - driver soft state (unit) structure 3528 */ 3529 3530 static void 3531 sd_read_unit_properties(struct sd_lun *un) 3532 { 3533 /* 3534 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3535 * the "sd-config-list" property (from the sd.conf file) or if 3536 * there was not a match for the inquiry vid/pid. If this event 3537 * occurs the static driver configuration table is searched for 3538 * a match. 3539 */ 3540 ASSERT(un != NULL); 3541 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3542 sd_process_sdconf_table(un); 3543 } 3544 3545 /* check for LSI device */ 3546 sd_is_lsi(un); 3547 3548 3549 } 3550 3551 3552 /* 3553 * Function: sd_process_sdconf_file 3554 * 3555 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3556 * driver's config file (ie, sd.conf) and update the driver 3557 * soft state structure accordingly. 3558 * 3559 * Arguments: un - driver soft state (unit) structure 3560 * 3561 * Return Code: SD_SUCCESS - The properties were successfully set according 3562 * to the driver configuration file. 3563 * SD_FAILURE - The driver config list was not obtained or 3564 * there was no vid/pid match. This indicates that 3565 * the static config table should be used. 3566 * 3567 * The config file has a property, "sd-config-list". Currently we support 3568 * two kinds of formats. For both formats, the value of this property 3569 * is a list of duplets: 3570 * 3571 * sd-config-list= 3572 * <duplet>, 3573 * [,<duplet>]*; 3574 * 3575 * For the improved format, where 3576 * 3577 * <duplet>:= "<vid+pid>","<tunable-list>" 3578 * 3579 * and 3580 * 3581 * <tunable-list>:= <tunable> [, <tunable> ]*; 3582 * <tunable> = <name> : <value> 3583 * 3584 * The <vid+pid> is the string that is returned by the target device on a 3585 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3586 * to apply to all target devices with the specified <vid+pid>. 3587 * 3588 * Each <tunable> is a "<name> : <value>" pair. 3589 * 3590 * For the old format, the structure of each duplet is as follows: 3591 * 3592 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3593 * 3594 * The first entry of the duplet is the device ID string (the concatenated 3595 * vid & pid; not to be confused with a device_id). This is defined in 3596 * the same way as in the sd_disk_table. 3597 * 3598 * The second part of the duplet is a string that identifies a 3599 * data-property-name-list. The data-property-name-list is defined as 3600 * follows: 3601 * 3602 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3603 * 3604 * The syntax of <data-property-name> depends on the <version> field. 3605 * 3606 * If version = SD_CONF_VERSION_1 we have the following syntax: 3607 * 3608 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3609 * 3610 * where the prop0 value will be used to set prop0 if bit0 set in the 3611 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3612 * 3613 */ 3614 3615 static int 3616 sd_process_sdconf_file(struct sd_lun *un) 3617 { 3618 char **config_list = NULL; 3619 uint_t nelements; 3620 char *vidptr; 3621 int vidlen; 3622 char *dnlist_ptr; 3623 char *dataname_ptr; 3624 char *dataname_lasts; 3625 int *data_list = NULL; 3626 uint_t data_list_len; 3627 int rval = SD_FAILURE; 3628 int i; 3629 3630 ASSERT(un != NULL); 3631 3632 /* Obtain the configuration list associated with the .conf file */ 3633 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3634 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3635 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3636 return (SD_FAILURE); 3637 } 3638 3639 /* 3640 * Compare vids in each duplet to the inquiry vid - if a match is 3641 * made, get the data value and update the soft state structure 3642 * accordingly. 3643 * 3644 * Each duplet should show as a pair of strings, return SD_FAILURE 3645 * otherwise. 3646 */ 3647 if (nelements & 1) { 3648 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3649 "sd-config-list should show as pairs of strings.\n"); 3650 if (config_list) 3651 ddi_prop_free(config_list); 3652 return (SD_FAILURE); 3653 } 3654 3655 for (i = 0; i < nelements; i += 2) { 3656 /* 3657 * Note: The assumption here is that each vid entry is on 3658 * a unique line from its associated duplet. 3659 */ 3660 vidptr = config_list[i]; 3661 vidlen = (int)strlen(vidptr); 3662 if ((vidlen == 0) || 3663 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3664 continue; 3665 } 3666 3667 /* 3668 * dnlist contains 1 or more blank separated 3669 * data-property-name entries 3670 */ 3671 dnlist_ptr = config_list[i + 1]; 3672 3673 if (strchr(dnlist_ptr, ':') != NULL) { 3674 /* 3675 * Decode the improved format sd-config-list. 3676 */ 3677 sd_nvpair_str_decode(un, dnlist_ptr); 3678 } else { 3679 /* 3680 * The old format sd-config-list, loop through all 3681 * data-property-name entries in the 3682 * data-property-name-list 3683 * setting the properties for each. 3684 */ 3685 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3686 &dataname_lasts); dataname_ptr != NULL; 3687 dataname_ptr = sd_strtok_r(NULL, " \t", 3688 &dataname_lasts)) { 3689 int version; 3690 3691 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3692 "sd_process_sdconf_file: disk:%s, " 3693 "data:%s\n", vidptr, dataname_ptr); 3694 3695 /* Get the data list */ 3696 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3697 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3698 &data_list_len) != DDI_PROP_SUCCESS) { 3699 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3700 "sd_process_sdconf_file: data " 3701 "property (%s) has no value\n", 3702 dataname_ptr); 3703 continue; 3704 } 3705 3706 version = data_list[0]; 3707 3708 if (version == SD_CONF_VERSION_1) { 3709 sd_tunables values; 3710 3711 /* Set the properties */ 3712 if (sd_chk_vers1_data(un, data_list[1], 3713 &data_list[2], data_list_len, 3714 dataname_ptr) == SD_SUCCESS) { 3715 sd_get_tunables_from_conf(un, 3716 data_list[1], &data_list[2], 3717 &values); 3718 sd_set_vers1_properties(un, 3719 data_list[1], &values); 3720 rval = SD_SUCCESS; 3721 } else { 3722 rval = SD_FAILURE; 3723 } 3724 } else { 3725 scsi_log(SD_DEVINFO(un), sd_label, 3726 CE_WARN, "data property %s version " 3727 "0x%x is invalid.", 3728 dataname_ptr, version); 3729 rval = SD_FAILURE; 3730 } 3731 if (data_list) 3732 ddi_prop_free(data_list); 3733 } 3734 } 3735 } 3736 3737 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3738 if (config_list) { 3739 ddi_prop_free(config_list); 3740 } 3741 3742 return (rval); 3743 } 3744 3745 /* 3746 * Function: sd_nvpair_str_decode() 3747 * 3748 * Description: Parse the improved format sd-config-list to get 3749 * each entry of tunable, which includes a name-value pair. 3750 * Then call sd_set_properties() to set the property. 3751 * 3752 * Arguments: un - driver soft state (unit) structure 3753 * nvpair_str - the tunable list 3754 */ 3755 static void 3756 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3757 { 3758 char *nv, *name, *value, *token; 3759 char *nv_lasts, *v_lasts, *x_lasts; 3760 3761 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3762 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3763 token = sd_strtok_r(nv, ":", &v_lasts); 3764 name = sd_strtok_r(token, " \t", &x_lasts); 3765 token = sd_strtok_r(NULL, ":", &v_lasts); 3766 value = sd_strtok_r(token, " \t", &x_lasts); 3767 if (name == NULL || value == NULL) { 3768 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3769 "sd_nvpair_str_decode: " 3770 "name or value is not valid!\n"); 3771 } else { 3772 sd_set_properties(un, name, value); 3773 } 3774 } 3775 } 3776 3777 /* 3778 * Function: sd_strtok_r() 3779 * 3780 * Description: This function uses strpbrk and strspn to break 3781 * string into tokens on sequentially subsequent calls. Return 3782 * NULL when no non-separator characters remain. The first 3783 * argument is NULL for subsequent calls. 3784 */ 3785 static char * 3786 sd_strtok_r(char *string, const char *sepset, char **lasts) 3787 { 3788 char *q, *r; 3789 3790 /* First or subsequent call */ 3791 if (string == NULL) 3792 string = *lasts; 3793 3794 if (string == NULL) 3795 return (NULL); 3796 3797 /* Skip leading separators */ 3798 q = string + strspn(string, sepset); 3799 3800 if (*q == '\0') 3801 return (NULL); 3802 3803 if ((r = strpbrk(q, sepset)) == NULL) 3804 *lasts = NULL; 3805 else { 3806 *r = '\0'; 3807 *lasts = r + 1; 3808 } 3809 return (q); 3810 } 3811 3812 /* 3813 * Function: sd_set_properties() 3814 * 3815 * Description: Set device properties based on the improved 3816 * format sd-config-list. 3817 * 3818 * Arguments: un - driver soft state (unit) structure 3819 * name - supported tunable name 3820 * value - tunable value 3821 */ 3822 static void 3823 sd_set_properties(struct sd_lun *un, char *name, char *value) 3824 { 3825 char *endptr = NULL; 3826 long val = 0; 3827 3828 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3829 if (strcasecmp(value, "true") == 0) { 3830 un->un_f_suppress_cache_flush = TRUE; 3831 } else if (strcasecmp(value, "false") == 0) { 3832 un->un_f_suppress_cache_flush = FALSE; 3833 } else { 3834 goto value_invalid; 3835 } 3836 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3837 "suppress_cache_flush flag set to %d\n", 3838 un->un_f_suppress_cache_flush); 3839 return; 3840 } 3841 3842 if (strcasecmp(name, "controller-type") == 0) { 3843 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3844 un->un_ctype = val; 3845 } else { 3846 goto value_invalid; 3847 } 3848 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3849 "ctype set to %d\n", un->un_ctype); 3850 return; 3851 } 3852 3853 if (strcasecmp(name, "delay-busy") == 0) { 3854 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3855 un->un_busy_timeout = drv_usectohz(val / 1000); 3856 } else { 3857 goto value_invalid; 3858 } 3859 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3860 "busy_timeout set to %d\n", un->un_busy_timeout); 3861 return; 3862 } 3863 3864 if (strcasecmp(name, "disksort") == 0) { 3865 if (strcasecmp(value, "true") == 0) { 3866 un->un_f_disksort_disabled = FALSE; 3867 } else if (strcasecmp(value, "false") == 0) { 3868 un->un_f_disksort_disabled = TRUE; 3869 } else { 3870 goto value_invalid; 3871 } 3872 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3873 "disksort disabled flag set to %d\n", 3874 un->un_f_disksort_disabled); 3875 return; 3876 } 3877 3878 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3879 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3880 un->un_reserve_release_time = val; 3881 } else { 3882 goto value_invalid; 3883 } 3884 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3885 "reservation release timeout set to %d\n", 3886 un->un_reserve_release_time); 3887 return; 3888 } 3889 3890 if (strcasecmp(name, "reset-lun") == 0) { 3891 if (strcasecmp(value, "true") == 0) { 3892 un->un_f_lun_reset_enabled = TRUE; 3893 } else if (strcasecmp(value, "false") == 0) { 3894 un->un_f_lun_reset_enabled = FALSE; 3895 } else { 3896 goto value_invalid; 3897 } 3898 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3899 "lun reset enabled flag set to %d\n", 3900 un->un_f_lun_reset_enabled); 3901 return; 3902 } 3903 3904 if (strcasecmp(name, "retries-busy") == 0) { 3905 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3906 un->un_busy_retry_count = val; 3907 } else { 3908 goto value_invalid; 3909 } 3910 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3911 "busy retry count set to %d\n", un->un_busy_retry_count); 3912 return; 3913 } 3914 3915 if (strcasecmp(name, "retries-timeout") == 0) { 3916 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3917 un->un_retry_count = val; 3918 } else { 3919 goto value_invalid; 3920 } 3921 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3922 "timeout retry count set to %d\n", un->un_retry_count); 3923 return; 3924 } 3925 3926 if (strcasecmp(name, "retries-notready") == 0) { 3927 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3928 un->un_notready_retry_count = val; 3929 } else { 3930 goto value_invalid; 3931 } 3932 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3933 "notready retry count set to %d\n", 3934 un->un_notready_retry_count); 3935 return; 3936 } 3937 3938 if (strcasecmp(name, "retries-reset") == 0) { 3939 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3940 un->un_reset_retry_count = val; 3941 } else { 3942 goto value_invalid; 3943 } 3944 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3945 "reset retry count set to %d\n", 3946 un->un_reset_retry_count); 3947 return; 3948 } 3949 3950 if (strcasecmp(name, "throttle-max") == 0) { 3951 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3952 un->un_saved_throttle = un->un_throttle = val; 3953 } else { 3954 goto value_invalid; 3955 } 3956 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3957 "throttle set to %d\n", un->un_throttle); 3958 } 3959 3960 if (strcasecmp(name, "throttle-min") == 0) { 3961 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3962 un->un_min_throttle = val; 3963 } else { 3964 goto value_invalid; 3965 } 3966 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3967 "min throttle set to %d\n", un->un_min_throttle); 3968 } 3969 3970 /* 3971 * Validate the throttle values. 3972 * If any of the numbers are invalid, set everything to defaults. 3973 */ 3974 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3975 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3976 (un->un_min_throttle > un->un_throttle)) { 3977 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3978 un->un_min_throttle = sd_min_throttle; 3979 } 3980 return; 3981 3982 value_invalid: 3983 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3984 "value of prop %s is invalid\n", name); 3985 } 3986 3987 /* 3988 * Function: sd_get_tunables_from_conf() 3989 * 3990 * 3991 * This function reads the data list from the sd.conf file and pulls 3992 * the values that can have numeric values as arguments and places 3993 * the values in the appropriate sd_tunables member. 3994 * Since the order of the data list members varies across platforms 3995 * This function reads them from the data list in a platform specific 3996 * order and places them into the correct sd_tunable member that is 3997 * consistent across all platforms. 3998 */ 3999 static void 4000 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4001 sd_tunables *values) 4002 { 4003 int i; 4004 int mask; 4005 4006 bzero(values, sizeof (sd_tunables)); 4007 4008 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4009 4010 mask = 1 << i; 4011 if (mask > flags) { 4012 break; 4013 } 4014 4015 switch (mask & flags) { 4016 case 0: /* This mask bit not set in flags */ 4017 continue; 4018 case SD_CONF_BSET_THROTTLE: 4019 values->sdt_throttle = data_list[i]; 4020 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4021 "sd_get_tunables_from_conf: throttle = %d\n", 4022 values->sdt_throttle); 4023 break; 4024 case SD_CONF_BSET_CTYPE: 4025 values->sdt_ctype = data_list[i]; 4026 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4027 "sd_get_tunables_from_conf: ctype = %d\n", 4028 values->sdt_ctype); 4029 break; 4030 case SD_CONF_BSET_NRR_COUNT: 4031 values->sdt_not_rdy_retries = data_list[i]; 4032 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4033 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4034 values->sdt_not_rdy_retries); 4035 break; 4036 case SD_CONF_BSET_BSY_RETRY_COUNT: 4037 values->sdt_busy_retries = data_list[i]; 4038 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4039 "sd_get_tunables_from_conf: busy_retries = %d\n", 4040 values->sdt_busy_retries); 4041 break; 4042 case SD_CONF_BSET_RST_RETRIES: 4043 values->sdt_reset_retries = data_list[i]; 4044 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4045 "sd_get_tunables_from_conf: reset_retries = %d\n", 4046 values->sdt_reset_retries); 4047 break; 4048 case SD_CONF_BSET_RSV_REL_TIME: 4049 values->sdt_reserv_rel_time = data_list[i]; 4050 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4051 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4052 values->sdt_reserv_rel_time); 4053 break; 4054 case SD_CONF_BSET_MIN_THROTTLE: 4055 values->sdt_min_throttle = data_list[i]; 4056 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4057 "sd_get_tunables_from_conf: min_throttle = %d\n", 4058 values->sdt_min_throttle); 4059 break; 4060 case SD_CONF_BSET_DISKSORT_DISABLED: 4061 values->sdt_disk_sort_dis = data_list[i]; 4062 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4063 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4064 values->sdt_disk_sort_dis); 4065 break; 4066 case SD_CONF_BSET_LUN_RESET_ENABLED: 4067 values->sdt_lun_reset_enable = data_list[i]; 4068 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4069 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4070 "\n", values->sdt_lun_reset_enable); 4071 break; 4072 case SD_CONF_BSET_CACHE_IS_NV: 4073 values->sdt_suppress_cache_flush = data_list[i]; 4074 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4075 "sd_get_tunables_from_conf: \ 4076 suppress_cache_flush = %d" 4077 "\n", values->sdt_suppress_cache_flush); 4078 break; 4079 } 4080 } 4081 } 4082 4083 /* 4084 * Function: sd_process_sdconf_table 4085 * 4086 * Description: Search the static configuration table for a match on the 4087 * inquiry vid/pid and update the driver soft state structure 4088 * according to the table property values for the device. 4089 * 4090 * The form of a configuration table entry is: 4091 * <vid+pid>,<flags>,<property-data> 4092 * "SEAGATE ST42400N",1,0x40000, 4093 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4094 * 4095 * Arguments: un - driver soft state (unit) structure 4096 */ 4097 4098 static void 4099 sd_process_sdconf_table(struct sd_lun *un) 4100 { 4101 char *id = NULL; 4102 int table_index; 4103 int idlen; 4104 4105 ASSERT(un != NULL); 4106 for (table_index = 0; table_index < sd_disk_table_size; 4107 table_index++) { 4108 id = sd_disk_table[table_index].device_id; 4109 idlen = strlen(id); 4110 if (idlen == 0) { 4111 continue; 4112 } 4113 4114 /* 4115 * The static configuration table currently does not 4116 * implement version 10 properties. Additionally, 4117 * multiple data-property-name entries are not 4118 * implemented in the static configuration table. 4119 */ 4120 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4121 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4122 "sd_process_sdconf_table: disk %s\n", id); 4123 sd_set_vers1_properties(un, 4124 sd_disk_table[table_index].flags, 4125 sd_disk_table[table_index].properties); 4126 break; 4127 } 4128 } 4129 } 4130 4131 4132 /* 4133 * Function: sd_sdconf_id_match 4134 * 4135 * Description: This local function implements a case sensitive vid/pid 4136 * comparison as well as the boundary cases of wild card and 4137 * multiple blanks. 4138 * 4139 * Note: An implicit assumption made here is that the scsi 4140 * inquiry structure will always keep the vid, pid and 4141 * revision strings in consecutive sequence, so they can be 4142 * read as a single string. If this assumption is not the 4143 * case, a separate string, to be used for the check, needs 4144 * to be built with these strings concatenated. 4145 * 4146 * Arguments: un - driver soft state (unit) structure 4147 * id - table or config file vid/pid 4148 * idlen - length of the vid/pid (bytes) 4149 * 4150 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4151 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4152 */ 4153 4154 static int 4155 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4156 { 4157 struct scsi_inquiry *sd_inq; 4158 int rval = SD_SUCCESS; 4159 4160 ASSERT(un != NULL); 4161 sd_inq = un->un_sd->sd_inq; 4162 ASSERT(id != NULL); 4163 4164 /* 4165 * We use the inq_vid as a pointer to a buffer containing the 4166 * vid and pid and use the entire vid/pid length of the table 4167 * entry for the comparison. This works because the inq_pid 4168 * data member follows inq_vid in the scsi_inquiry structure. 4169 */ 4170 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4171 /* 4172 * The user id string is compared to the inquiry vid/pid 4173 * using a case insensitive comparison and ignoring 4174 * multiple spaces. 4175 */ 4176 rval = sd_blank_cmp(un, id, idlen); 4177 if (rval != SD_SUCCESS) { 4178 /* 4179 * User id strings that start and end with a "*" 4180 * are a special case. These do not have a 4181 * specific vendor, and the product string can 4182 * appear anywhere in the 16 byte PID portion of 4183 * the inquiry data. This is a simple strstr() 4184 * type search for the user id in the inquiry data. 4185 */ 4186 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4187 char *pidptr = &id[1]; 4188 int i; 4189 int j; 4190 int pidstrlen = idlen - 2; 4191 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4192 pidstrlen; 4193 4194 if (j < 0) { 4195 return (SD_FAILURE); 4196 } 4197 for (i = 0; i < j; i++) { 4198 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4199 pidptr, pidstrlen) == 0) { 4200 rval = SD_SUCCESS; 4201 break; 4202 } 4203 } 4204 } 4205 } 4206 } 4207 return (rval); 4208 } 4209 4210 4211 /* 4212 * Function: sd_blank_cmp 4213 * 4214 * Description: If the id string starts and ends with a space, treat 4215 * multiple consecutive spaces as equivalent to a single 4216 * space. For example, this causes a sd_disk_table entry 4217 * of " NEC CDROM " to match a device's id string of 4218 * "NEC CDROM". 4219 * 4220 * Note: The success exit condition for this routine is if 4221 * the pointer to the table entry is '\0' and the cnt of 4222 * the inquiry length is zero. This will happen if the inquiry 4223 * string returned by the device is padded with spaces to be 4224 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4225 * SCSI spec states that the inquiry string is to be padded with 4226 * spaces. 4227 * 4228 * Arguments: un - driver soft state (unit) structure 4229 * id - table or config file vid/pid 4230 * idlen - length of the vid/pid (bytes) 4231 * 4232 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4233 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4234 */ 4235 4236 static int 4237 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4238 { 4239 char *p1; 4240 char *p2; 4241 int cnt; 4242 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4243 sizeof (SD_INQUIRY(un)->inq_pid); 4244 4245 ASSERT(un != NULL); 4246 p2 = un->un_sd->sd_inq->inq_vid; 4247 ASSERT(id != NULL); 4248 p1 = id; 4249 4250 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4251 /* 4252 * Note: string p1 is terminated by a NUL but string p2 4253 * isn't. The end of p2 is determined by cnt. 4254 */ 4255 for (;;) { 4256 /* skip over any extra blanks in both strings */ 4257 while ((*p1 != '\0') && (*p1 == ' ')) { 4258 p1++; 4259 } 4260 while ((cnt != 0) && (*p2 == ' ')) { 4261 p2++; 4262 cnt--; 4263 } 4264 4265 /* compare the two strings */ 4266 if ((cnt == 0) || 4267 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4268 break; 4269 } 4270 while ((cnt > 0) && 4271 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4272 p1++; 4273 p2++; 4274 cnt--; 4275 } 4276 } 4277 } 4278 4279 /* return SD_SUCCESS if both strings match */ 4280 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4281 } 4282 4283 4284 /* 4285 * Function: sd_chk_vers1_data 4286 * 4287 * Description: Verify the version 1 device properties provided by the 4288 * user via the configuration file 4289 * 4290 * Arguments: un - driver soft state (unit) structure 4291 * flags - integer mask indicating properties to be set 4292 * prop_list - integer list of property values 4293 * list_len - number of the elements 4294 * 4295 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4296 * SD_FAILURE - Indicates the user provided data is invalid 4297 */ 4298 4299 static int 4300 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4301 int list_len, char *dataname_ptr) 4302 { 4303 int i; 4304 int mask = 1; 4305 int index = 0; 4306 4307 ASSERT(un != NULL); 4308 4309 /* Check for a NULL property name and list */ 4310 if (dataname_ptr == NULL) { 4311 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4312 "sd_chk_vers1_data: NULL data property name."); 4313 return (SD_FAILURE); 4314 } 4315 if (prop_list == NULL) { 4316 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4317 "sd_chk_vers1_data: %s NULL data property list.", 4318 dataname_ptr); 4319 return (SD_FAILURE); 4320 } 4321 4322 /* Display a warning if undefined bits are set in the flags */ 4323 if (flags & ~SD_CONF_BIT_MASK) { 4324 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4325 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4326 "Properties not set.", 4327 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4328 return (SD_FAILURE); 4329 } 4330 4331 /* 4332 * Verify the length of the list by identifying the highest bit set 4333 * in the flags and validating that the property list has a length 4334 * up to the index of this bit. 4335 */ 4336 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4337 if (flags & mask) { 4338 index++; 4339 } 4340 mask = 1 << i; 4341 } 4342 if (list_len < (index + 2)) { 4343 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4344 "sd_chk_vers1_data: " 4345 "Data property list %s size is incorrect. " 4346 "Properties not set.", dataname_ptr); 4347 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4348 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4349 return (SD_FAILURE); 4350 } 4351 return (SD_SUCCESS); 4352 } 4353 4354 4355 /* 4356 * Function: sd_set_vers1_properties 4357 * 4358 * Description: Set version 1 device properties based on a property list 4359 * retrieved from the driver configuration file or static 4360 * configuration table. Version 1 properties have the format: 4361 * 4362 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4363 * 4364 * where the prop0 value will be used to set prop0 if bit0 4365 * is set in the flags 4366 * 4367 * Arguments: un - driver soft state (unit) structure 4368 * flags - integer mask indicating properties to be set 4369 * prop_list - integer list of property values 4370 */ 4371 4372 static void 4373 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4374 { 4375 ASSERT(un != NULL); 4376 4377 /* 4378 * Set the flag to indicate cache is to be disabled. An attempt 4379 * to disable the cache via sd_cache_control() will be made 4380 * later during attach once the basic initialization is complete. 4381 */ 4382 if (flags & SD_CONF_BSET_NOCACHE) { 4383 un->un_f_opt_disable_cache = TRUE; 4384 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4385 "sd_set_vers1_properties: caching disabled flag set\n"); 4386 } 4387 4388 /* CD-specific configuration parameters */ 4389 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4390 un->un_f_cfg_playmsf_bcd = TRUE; 4391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4392 "sd_set_vers1_properties: playmsf_bcd set\n"); 4393 } 4394 if (flags & SD_CONF_BSET_READSUB_BCD) { 4395 un->un_f_cfg_readsub_bcd = TRUE; 4396 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4397 "sd_set_vers1_properties: readsub_bcd set\n"); 4398 } 4399 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4400 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4401 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4402 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4403 } 4404 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4405 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4406 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4407 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4408 } 4409 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4410 un->un_f_cfg_no_read_header = TRUE; 4411 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4412 "sd_set_vers1_properties: no_read_header set\n"); 4413 } 4414 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4415 un->un_f_cfg_read_cd_xd4 = TRUE; 4416 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4417 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4418 } 4419 4420 /* Support for devices which do not have valid/unique serial numbers */ 4421 if (flags & SD_CONF_BSET_FAB_DEVID) { 4422 un->un_f_opt_fab_devid = TRUE; 4423 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4424 "sd_set_vers1_properties: fab_devid bit set\n"); 4425 } 4426 4427 /* Support for user throttle configuration */ 4428 if (flags & SD_CONF_BSET_THROTTLE) { 4429 ASSERT(prop_list != NULL); 4430 un->un_saved_throttle = un->un_throttle = 4431 prop_list->sdt_throttle; 4432 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4433 "sd_set_vers1_properties: throttle set to %d\n", 4434 prop_list->sdt_throttle); 4435 } 4436 4437 /* Set the per disk retry count according to the conf file or table. */ 4438 if (flags & SD_CONF_BSET_NRR_COUNT) { 4439 ASSERT(prop_list != NULL); 4440 if (prop_list->sdt_not_rdy_retries) { 4441 un->un_notready_retry_count = 4442 prop_list->sdt_not_rdy_retries; 4443 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4444 "sd_set_vers1_properties: not ready retry count" 4445 " set to %d\n", un->un_notready_retry_count); 4446 } 4447 } 4448 4449 /* The controller type is reported for generic disk driver ioctls */ 4450 if (flags & SD_CONF_BSET_CTYPE) { 4451 ASSERT(prop_list != NULL); 4452 switch (prop_list->sdt_ctype) { 4453 case CTYPE_CDROM: 4454 un->un_ctype = prop_list->sdt_ctype; 4455 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4456 "sd_set_vers1_properties: ctype set to " 4457 "CTYPE_CDROM\n"); 4458 break; 4459 case CTYPE_CCS: 4460 un->un_ctype = prop_list->sdt_ctype; 4461 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4462 "sd_set_vers1_properties: ctype set to " 4463 "CTYPE_CCS\n"); 4464 break; 4465 case CTYPE_ROD: /* RW optical */ 4466 un->un_ctype = prop_list->sdt_ctype; 4467 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4468 "sd_set_vers1_properties: ctype set to " 4469 "CTYPE_ROD\n"); 4470 break; 4471 default: 4472 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4473 "sd_set_vers1_properties: Could not set " 4474 "invalid ctype value (%d)", 4475 prop_list->sdt_ctype); 4476 } 4477 } 4478 4479 /* Purple failover timeout */ 4480 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4481 ASSERT(prop_list != NULL); 4482 un->un_busy_retry_count = 4483 prop_list->sdt_busy_retries; 4484 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4485 "sd_set_vers1_properties: " 4486 "busy retry count set to %d\n", 4487 un->un_busy_retry_count); 4488 } 4489 4490 /* Purple reset retry count */ 4491 if (flags & SD_CONF_BSET_RST_RETRIES) { 4492 ASSERT(prop_list != NULL); 4493 un->un_reset_retry_count = 4494 prop_list->sdt_reset_retries; 4495 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4496 "sd_set_vers1_properties: " 4497 "reset retry count set to %d\n", 4498 un->un_reset_retry_count); 4499 } 4500 4501 /* Purple reservation release timeout */ 4502 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4503 ASSERT(prop_list != NULL); 4504 un->un_reserve_release_time = 4505 prop_list->sdt_reserv_rel_time; 4506 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4507 "sd_set_vers1_properties: " 4508 "reservation release timeout set to %d\n", 4509 un->un_reserve_release_time); 4510 } 4511 4512 /* 4513 * Driver flag telling the driver to verify that no commands are pending 4514 * for a device before issuing a Test Unit Ready. This is a workaround 4515 * for a firmware bug in some Seagate eliteI drives. 4516 */ 4517 if (flags & SD_CONF_BSET_TUR_CHECK) { 4518 un->un_f_cfg_tur_check = TRUE; 4519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4520 "sd_set_vers1_properties: tur queue check set\n"); 4521 } 4522 4523 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4524 un->un_min_throttle = prop_list->sdt_min_throttle; 4525 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4526 "sd_set_vers1_properties: min throttle set to %d\n", 4527 un->un_min_throttle); 4528 } 4529 4530 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4531 un->un_f_disksort_disabled = 4532 (prop_list->sdt_disk_sort_dis != 0) ? 4533 TRUE : FALSE; 4534 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4535 "sd_set_vers1_properties: disksort disabled " 4536 "flag set to %d\n", 4537 prop_list->sdt_disk_sort_dis); 4538 } 4539 4540 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4541 un->un_f_lun_reset_enabled = 4542 (prop_list->sdt_lun_reset_enable != 0) ? 4543 TRUE : FALSE; 4544 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4545 "sd_set_vers1_properties: lun reset enabled " 4546 "flag set to %d\n", 4547 prop_list->sdt_lun_reset_enable); 4548 } 4549 4550 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4551 un->un_f_suppress_cache_flush = 4552 (prop_list->sdt_suppress_cache_flush != 0) ? 4553 TRUE : FALSE; 4554 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4555 "sd_set_vers1_properties: suppress_cache_flush " 4556 "flag set to %d\n", 4557 prop_list->sdt_suppress_cache_flush); 4558 } 4559 4560 /* 4561 * Validate the throttle values. 4562 * If any of the numbers are invalid, set everything to defaults. 4563 */ 4564 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4565 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4566 (un->un_min_throttle > un->un_throttle)) { 4567 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4568 un->un_min_throttle = sd_min_throttle; 4569 } 4570 } 4571 4572 /* 4573 * Function: sd_is_lsi() 4574 * 4575 * Description: Check for lsi devices, step through the static device 4576 * table to match vid/pid. 4577 * 4578 * Args: un - ptr to sd_lun 4579 * 4580 * Notes: When creating new LSI property, need to add the new LSI property 4581 * to this function. 4582 */ 4583 static void 4584 sd_is_lsi(struct sd_lun *un) 4585 { 4586 char *id = NULL; 4587 int table_index; 4588 int idlen; 4589 void *prop; 4590 4591 ASSERT(un != NULL); 4592 for (table_index = 0; table_index < sd_disk_table_size; 4593 table_index++) { 4594 id = sd_disk_table[table_index].device_id; 4595 idlen = strlen(id); 4596 if (idlen == 0) { 4597 continue; 4598 } 4599 4600 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4601 prop = sd_disk_table[table_index].properties; 4602 if (prop == &lsi_properties || 4603 prop == &lsi_oem_properties || 4604 prop == &lsi_properties_scsi || 4605 prop == &symbios_properties) { 4606 un->un_f_cfg_is_lsi = TRUE; 4607 } 4608 break; 4609 } 4610 } 4611 } 4612 4613 /* 4614 * Function: sd_get_physical_geometry 4615 * 4616 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4617 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4618 * target, and use this information to initialize the physical 4619 * geometry cache specified by pgeom_p. 4620 * 4621 * MODE SENSE is an optional command, so failure in this case 4622 * does not necessarily denote an error. We want to use the 4623 * MODE SENSE commands to derive the physical geometry of the 4624 * device, but if either command fails, the logical geometry is 4625 * used as the fallback for disk label geometry in cmlb. 4626 * 4627 * This requires that un->un_blockcount and un->un_tgt_blocksize 4628 * have already been initialized for the current target and 4629 * that the current values be passed as args so that we don't 4630 * end up ever trying to use -1 as a valid value. This could 4631 * happen if either value is reset while we're not holding 4632 * the mutex. 4633 * 4634 * Arguments: un - driver soft state (unit) structure 4635 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4636 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4637 * to use the USCSI "direct" chain and bypass the normal 4638 * command waitq. 4639 * 4640 * Context: Kernel thread only (can sleep). 4641 */ 4642 4643 static int 4644 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4645 diskaddr_t capacity, int lbasize, int path_flag) 4646 { 4647 struct mode_format *page3p; 4648 struct mode_geometry *page4p; 4649 struct mode_header *headerp; 4650 int sector_size; 4651 int nsect; 4652 int nhead; 4653 int ncyl; 4654 int intrlv; 4655 int spc; 4656 diskaddr_t modesense_capacity; 4657 int rpm; 4658 int bd_len; 4659 int mode_header_length; 4660 uchar_t *p3bufp; 4661 uchar_t *p4bufp; 4662 int cdbsize; 4663 int ret = EIO; 4664 sd_ssc_t *ssc; 4665 int status; 4666 4667 ASSERT(un != NULL); 4668 4669 if (lbasize == 0) { 4670 if (ISCD(un)) { 4671 lbasize = 2048; 4672 } else { 4673 lbasize = un->un_sys_blocksize; 4674 } 4675 } 4676 pgeom_p->g_secsize = (unsigned short)lbasize; 4677 4678 /* 4679 * If the unit is a cd/dvd drive MODE SENSE page three 4680 * and MODE SENSE page four are reserved (see SBC spec 4681 * and MMC spec). To prevent soft errors just return 4682 * using the default LBA size. 4683 */ 4684 if (ISCD(un)) 4685 return (ret); 4686 4687 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4688 4689 /* 4690 * Retrieve MODE SENSE page 3 - Format Device Page 4691 */ 4692 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4693 ssc = sd_ssc_init(un); 4694 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4695 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4696 if (status != 0) { 4697 SD_ERROR(SD_LOG_COMMON, un, 4698 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4699 goto page3_exit; 4700 } 4701 4702 /* 4703 * Determine size of Block Descriptors in order to locate the mode 4704 * page data. ATAPI devices return 0, SCSI devices should return 4705 * MODE_BLK_DESC_LENGTH. 4706 */ 4707 headerp = (struct mode_header *)p3bufp; 4708 if (un->un_f_cfg_is_atapi == TRUE) { 4709 struct mode_header_grp2 *mhp = 4710 (struct mode_header_grp2 *)headerp; 4711 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4712 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4713 } else { 4714 mode_header_length = MODE_HEADER_LENGTH; 4715 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4716 } 4717 4718 if (bd_len > MODE_BLK_DESC_LENGTH) { 4719 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4720 "sd_get_physical_geometry: received unexpected bd_len " 4721 "of %d, page3\n", bd_len); 4722 status = EIO; 4723 goto page3_exit; 4724 } 4725 4726 page3p = (struct mode_format *) 4727 ((caddr_t)headerp + mode_header_length + bd_len); 4728 4729 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4730 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4731 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4732 "%d\n", page3p->mode_page.code); 4733 status = EIO; 4734 goto page3_exit; 4735 } 4736 4737 /* 4738 * Use this physical geometry data only if BOTH MODE SENSE commands 4739 * complete successfully; otherwise, revert to the logical geometry. 4740 * So, we need to save everything in temporary variables. 4741 */ 4742 sector_size = BE_16(page3p->data_bytes_sect); 4743 4744 /* 4745 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4746 */ 4747 if (sector_size == 0) { 4748 sector_size = un->un_sys_blocksize; 4749 } else { 4750 sector_size &= ~(un->un_sys_blocksize - 1); 4751 } 4752 4753 nsect = BE_16(page3p->sect_track); 4754 intrlv = BE_16(page3p->interleave); 4755 4756 SD_INFO(SD_LOG_COMMON, un, 4757 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4758 SD_INFO(SD_LOG_COMMON, un, 4759 " mode page: %d; nsect: %d; sector size: %d;\n", 4760 page3p->mode_page.code, nsect, sector_size); 4761 SD_INFO(SD_LOG_COMMON, un, 4762 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4763 BE_16(page3p->track_skew), 4764 BE_16(page3p->cylinder_skew)); 4765 4766 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4767 4768 /* 4769 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4770 */ 4771 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4772 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4773 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4774 if (status != 0) { 4775 SD_ERROR(SD_LOG_COMMON, un, 4776 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4777 goto page4_exit; 4778 } 4779 4780 /* 4781 * Determine size of Block Descriptors in order to locate the mode 4782 * page data. ATAPI devices return 0, SCSI devices should return 4783 * MODE_BLK_DESC_LENGTH. 4784 */ 4785 headerp = (struct mode_header *)p4bufp; 4786 if (un->un_f_cfg_is_atapi == TRUE) { 4787 struct mode_header_grp2 *mhp = 4788 (struct mode_header_grp2 *)headerp; 4789 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4790 } else { 4791 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4792 } 4793 4794 if (bd_len > MODE_BLK_DESC_LENGTH) { 4795 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4796 "sd_get_physical_geometry: received unexpected bd_len of " 4797 "%d, page4\n", bd_len); 4798 status = EIO; 4799 goto page4_exit; 4800 } 4801 4802 page4p = (struct mode_geometry *) 4803 ((caddr_t)headerp + mode_header_length + bd_len); 4804 4805 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4806 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4807 "sd_get_physical_geometry: mode sense pg4 code mismatch " 4808 "%d\n", page4p->mode_page.code); 4809 status = EIO; 4810 goto page4_exit; 4811 } 4812 4813 /* 4814 * Stash the data now, after we know that both commands completed. 4815 */ 4816 4817 4818 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4819 spc = nhead * nsect; 4820 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4821 rpm = BE_16(page4p->rpm); 4822 4823 modesense_capacity = spc * ncyl; 4824 4825 SD_INFO(SD_LOG_COMMON, un, 4826 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4827 SD_INFO(SD_LOG_COMMON, un, 4828 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4829 SD_INFO(SD_LOG_COMMON, un, 4830 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4831 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4832 (void *)pgeom_p, capacity); 4833 4834 /* 4835 * Compensate if the drive's geometry is not rectangular, i.e., 4836 * the product of C * H * S returned by MODE SENSE >= that returned 4837 * by read capacity. This is an idiosyncrasy of the original x86 4838 * disk subsystem. 4839 */ 4840 if (modesense_capacity >= capacity) { 4841 SD_INFO(SD_LOG_COMMON, un, 4842 "sd_get_physical_geometry: adjusting acyl; " 4843 "old: %d; new: %d\n", pgeom_p->g_acyl, 4844 (modesense_capacity - capacity + spc - 1) / spc); 4845 if (sector_size != 0) { 4846 /* 1243403: NEC D38x7 drives don't support sec size */ 4847 pgeom_p->g_secsize = (unsigned short)sector_size; 4848 } 4849 pgeom_p->g_nsect = (unsigned short)nsect; 4850 pgeom_p->g_nhead = (unsigned short)nhead; 4851 pgeom_p->g_capacity = capacity; 4852 pgeom_p->g_acyl = 4853 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4854 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4855 } 4856 4857 pgeom_p->g_rpm = (unsigned short)rpm; 4858 pgeom_p->g_intrlv = (unsigned short)intrlv; 4859 ret = 0; 4860 4861 SD_INFO(SD_LOG_COMMON, un, 4862 "sd_get_physical_geometry: mode sense geometry:\n"); 4863 SD_INFO(SD_LOG_COMMON, un, 4864 " nsect: %d; sector size: %d; interlv: %d\n", 4865 nsect, sector_size, intrlv); 4866 SD_INFO(SD_LOG_COMMON, un, 4867 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4868 nhead, ncyl, rpm, modesense_capacity); 4869 SD_INFO(SD_LOG_COMMON, un, 4870 "sd_get_physical_geometry: (cached)\n"); 4871 SD_INFO(SD_LOG_COMMON, un, 4872 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4873 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4874 pgeom_p->g_nhead, pgeom_p->g_nsect); 4875 SD_INFO(SD_LOG_COMMON, un, 4876 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4877 pgeom_p->g_secsize, pgeom_p->g_capacity, 4878 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4879 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4880 4881 page4_exit: 4882 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4883 4884 page3_exit: 4885 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4886 4887 if (status != 0) { 4888 if (status == EIO) { 4889 /* 4890 * Some disks do not support mode sense(6), we 4891 * should ignore this kind of error(sense key is 4892 * 0x5 - illegal request). 4893 */ 4894 uint8_t *sensep; 4895 int senlen; 4896 4897 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4898 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4899 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4900 4901 if (senlen > 0 && 4902 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4903 sd_ssc_assessment(ssc, 4904 SD_FMT_IGNORE_COMPROMISE); 4905 } else { 4906 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4907 } 4908 } else { 4909 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4910 } 4911 } 4912 sd_ssc_fini(ssc); 4913 return (ret); 4914 } 4915 4916 /* 4917 * Function: sd_get_virtual_geometry 4918 * 4919 * Description: Ask the controller to tell us about the target device. 4920 * 4921 * Arguments: un - pointer to softstate 4922 * capacity - disk capacity in #blocks 4923 * lbasize - disk block size in bytes 4924 * 4925 * Context: Kernel thread only 4926 */ 4927 4928 static int 4929 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4930 diskaddr_t capacity, int lbasize) 4931 { 4932 uint_t geombuf; 4933 int spc; 4934 4935 ASSERT(un != NULL); 4936 4937 /* Set sector size, and total number of sectors */ 4938 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4939 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4940 4941 /* Let the HBA tell us its geometry */ 4942 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4943 4944 /* A value of -1 indicates an undefined "geometry" property */ 4945 if (geombuf == (-1)) { 4946 return (EINVAL); 4947 } 4948 4949 /* Initialize the logical geometry cache. */ 4950 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4951 lgeom_p->g_nsect = geombuf & 0xffff; 4952 lgeom_p->g_secsize = un->un_sys_blocksize; 4953 4954 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4955 4956 /* 4957 * Note: The driver originally converted the capacity value from 4958 * target blocks to system blocks. However, the capacity value passed 4959 * to this routine is already in terms of system blocks (this scaling 4960 * is done when the READ CAPACITY command is issued and processed). 4961 * This 'error' may have gone undetected because the usage of g_ncyl 4962 * (which is based upon g_capacity) is very limited within the driver 4963 */ 4964 lgeom_p->g_capacity = capacity; 4965 4966 /* 4967 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4968 * hba may return zero values if the device has been removed. 4969 */ 4970 if (spc == 0) { 4971 lgeom_p->g_ncyl = 0; 4972 } else { 4973 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4974 } 4975 lgeom_p->g_acyl = 0; 4976 4977 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4978 return (0); 4979 4980 } 4981 /* 4982 * Function: sd_update_block_info 4983 * 4984 * Description: Calculate a byte count to sector count bitshift value 4985 * from sector size. 4986 * 4987 * Arguments: un: unit struct. 4988 * lbasize: new target sector size 4989 * capacity: new target capacity, ie. block count 4990 * 4991 * Context: Kernel thread context 4992 */ 4993 4994 static void 4995 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4996 { 4997 if (lbasize != 0) { 4998 un->un_tgt_blocksize = lbasize; 4999 un->un_f_tgt_blocksize_is_valid = TRUE; 5000 } 5001 5002 if (capacity != 0) { 5003 un->un_blockcount = capacity; 5004 un->un_f_blockcount_is_valid = TRUE; 5005 } 5006 } 5007 5008 5009 /* 5010 * Function: sd_register_devid 5011 * 5012 * Description: This routine will obtain the device id information from the 5013 * target, obtain the serial number, and register the device 5014 * id with the ddi framework. 5015 * 5016 * Arguments: devi - the system's dev_info_t for the device. 5017 * un - driver soft state (unit) structure 5018 * reservation_flag - indicates if a reservation conflict 5019 * occurred during attach 5020 * 5021 * Context: Kernel Thread 5022 */ 5023 static void 5024 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5025 { 5026 int rval = 0; 5027 uchar_t *inq80 = NULL; 5028 size_t inq80_len = MAX_INQUIRY_SIZE; 5029 size_t inq80_resid = 0; 5030 uchar_t *inq83 = NULL; 5031 size_t inq83_len = MAX_INQUIRY_SIZE; 5032 size_t inq83_resid = 0; 5033 int dlen, len; 5034 char *sn; 5035 struct sd_lun *un; 5036 5037 ASSERT(ssc != NULL); 5038 un = ssc->ssc_un; 5039 ASSERT(un != NULL); 5040 ASSERT(mutex_owned(SD_MUTEX(un))); 5041 ASSERT((SD_DEVINFO(un)) == devi); 5042 5043 5044 /* 5045 * We check the availability of the World Wide Name (0x83) and Unit 5046 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5047 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5048 * 0x83 is available, that is the best choice. Our next choice is 5049 * 0x80. If neither are available, we munge the devid from the device 5050 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5051 * to fabricate a devid for non-Sun qualified disks. 5052 */ 5053 if (sd_check_vpd_page_support(ssc) == 0) { 5054 /* collect page 80 data if available */ 5055 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5056 5057 mutex_exit(SD_MUTEX(un)); 5058 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5059 5060 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5061 0x01, 0x80, &inq80_resid); 5062 5063 if (rval != 0) { 5064 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5065 kmem_free(inq80, inq80_len); 5066 inq80 = NULL; 5067 inq80_len = 0; 5068 } else if (ddi_prop_exists( 5069 DDI_DEV_T_NONE, SD_DEVINFO(un), 5070 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5071 INQUIRY_SERIAL_NO) == 0) { 5072 /* 5073 * If we don't already have a serial number 5074 * property, do quick verify of data returned 5075 * and define property. 5076 */ 5077 dlen = inq80_len - inq80_resid; 5078 len = (size_t)inq80[3]; 5079 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5080 /* 5081 * Ensure sn termination, skip leading 5082 * blanks, and create property 5083 * 'inquiry-serial-no'. 5084 */ 5085 sn = (char *)&inq80[4]; 5086 sn[len] = 0; 5087 while (*sn && (*sn == ' ')) 5088 sn++; 5089 if (*sn) { 5090 (void) ddi_prop_update_string( 5091 DDI_DEV_T_NONE, 5092 SD_DEVINFO(un), 5093 INQUIRY_SERIAL_NO, sn); 5094 } 5095 } 5096 } 5097 mutex_enter(SD_MUTEX(un)); 5098 } 5099 5100 /* collect page 83 data if available */ 5101 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5102 mutex_exit(SD_MUTEX(un)); 5103 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5104 5105 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5106 0x01, 0x83, &inq83_resid); 5107 5108 if (rval != 0) { 5109 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5110 kmem_free(inq83, inq83_len); 5111 inq83 = NULL; 5112 inq83_len = 0; 5113 } 5114 mutex_enter(SD_MUTEX(un)); 5115 } 5116 } 5117 5118 /* 5119 * If transport has already registered a devid for this target 5120 * then that takes precedence over the driver's determination 5121 * of the devid. 5122 * 5123 * NOTE: The reason this check is done here instead of at the beginning 5124 * of the function is to allow the code above to create the 5125 * 'inquiry-serial-no' property. 5126 */ 5127 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5128 ASSERT(un->un_devid); 5129 un->un_f_devid_transport_defined = TRUE; 5130 goto cleanup; /* use devid registered by the transport */ 5131 } 5132 5133 /* 5134 * This is the case of antiquated Sun disk drives that have the 5135 * FAB_DEVID property set in the disk_table. These drives 5136 * manage the devid's by storing them in last 2 available sectors 5137 * on the drive and have them fabricated by the ddi layer by calling 5138 * ddi_devid_init and passing the DEVID_FAB flag. 5139 */ 5140 if (un->un_f_opt_fab_devid == TRUE) { 5141 /* 5142 * Depending on EINVAL isn't reliable, since a reserved disk 5143 * may result in invalid geometry, so check to make sure a 5144 * reservation conflict did not occur during attach. 5145 */ 5146 if ((sd_get_devid(ssc) == EINVAL) && 5147 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5148 /* 5149 * The devid is invalid AND there is no reservation 5150 * conflict. Fabricate a new devid. 5151 */ 5152 (void) sd_create_devid(ssc); 5153 } 5154 5155 /* Register the devid if it exists */ 5156 if (un->un_devid != NULL) { 5157 (void) ddi_devid_register(SD_DEVINFO(un), 5158 un->un_devid); 5159 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5160 "sd_register_devid: Devid Fabricated\n"); 5161 } 5162 goto cleanup; 5163 } 5164 5165 /* encode best devid possible based on data available */ 5166 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5167 (char *)ddi_driver_name(SD_DEVINFO(un)), 5168 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5169 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5170 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5171 5172 /* devid successfully encoded, register devid */ 5173 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5174 5175 } else { 5176 /* 5177 * Unable to encode a devid based on data available. 5178 * This is not a Sun qualified disk. Older Sun disk 5179 * drives that have the SD_FAB_DEVID property 5180 * set in the disk_table and non Sun qualified 5181 * disks are treated in the same manner. These 5182 * drives manage the devid's by storing them in 5183 * last 2 available sectors on the drive and 5184 * have them fabricated by the ddi layer by 5185 * calling ddi_devid_init and passing the 5186 * DEVID_FAB flag. 5187 * Create a fabricate devid only if there's no 5188 * fabricate devid existed. 5189 */ 5190 if (sd_get_devid(ssc) == EINVAL) { 5191 (void) sd_create_devid(ssc); 5192 } 5193 un->un_f_opt_fab_devid = TRUE; 5194 5195 /* Register the devid if it exists */ 5196 if (un->un_devid != NULL) { 5197 (void) ddi_devid_register(SD_DEVINFO(un), 5198 un->un_devid); 5199 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5200 "sd_register_devid: devid fabricated using " 5201 "ddi framework\n"); 5202 } 5203 } 5204 5205 cleanup: 5206 /* clean up resources */ 5207 if (inq80 != NULL) { 5208 kmem_free(inq80, inq80_len); 5209 } 5210 if (inq83 != NULL) { 5211 kmem_free(inq83, inq83_len); 5212 } 5213 } 5214 5215 5216 5217 /* 5218 * Function: sd_get_devid 5219 * 5220 * Description: This routine will return 0 if a valid device id has been 5221 * obtained from the target and stored in the soft state. If a 5222 * valid device id has not been previously read and stored, a 5223 * read attempt will be made. 5224 * 5225 * Arguments: un - driver soft state (unit) structure 5226 * 5227 * Return Code: 0 if we successfully get the device id 5228 * 5229 * Context: Kernel Thread 5230 */ 5231 5232 static int 5233 sd_get_devid(sd_ssc_t *ssc) 5234 { 5235 struct dk_devid *dkdevid; 5236 ddi_devid_t tmpid; 5237 uint_t *ip; 5238 size_t sz; 5239 diskaddr_t blk; 5240 int status; 5241 int chksum; 5242 int i; 5243 size_t buffer_size; 5244 struct sd_lun *un; 5245 5246 ASSERT(ssc != NULL); 5247 un = ssc->ssc_un; 5248 ASSERT(un != NULL); 5249 ASSERT(mutex_owned(SD_MUTEX(un))); 5250 5251 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5252 un); 5253 5254 if (un->un_devid != NULL) { 5255 return (0); 5256 } 5257 5258 mutex_exit(SD_MUTEX(un)); 5259 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5260 (void *)SD_PATH_DIRECT) != 0) { 5261 mutex_enter(SD_MUTEX(un)); 5262 return (EINVAL); 5263 } 5264 5265 /* 5266 * Read and verify device id, stored in the reserved cylinders at the 5267 * end of the disk. Backup label is on the odd sectors of the last 5268 * track of the last cylinder. Device id will be on track of the next 5269 * to last cylinder. 5270 */ 5271 mutex_enter(SD_MUTEX(un)); 5272 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5273 mutex_exit(SD_MUTEX(un)); 5274 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5275 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5276 SD_PATH_DIRECT); 5277 5278 if (status != 0) { 5279 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5280 goto error; 5281 } 5282 5283 /* Validate the revision */ 5284 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5285 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5286 status = EINVAL; 5287 goto error; 5288 } 5289 5290 /* Calculate the checksum */ 5291 chksum = 0; 5292 ip = (uint_t *)dkdevid; 5293 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5294 i++) { 5295 chksum ^= ip[i]; 5296 } 5297 5298 /* Compare the checksums */ 5299 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5300 status = EINVAL; 5301 goto error; 5302 } 5303 5304 /* Validate the device id */ 5305 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5306 status = EINVAL; 5307 goto error; 5308 } 5309 5310 /* 5311 * Store the device id in the driver soft state 5312 */ 5313 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5314 tmpid = kmem_alloc(sz, KM_SLEEP); 5315 5316 mutex_enter(SD_MUTEX(un)); 5317 5318 un->un_devid = tmpid; 5319 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5320 5321 kmem_free(dkdevid, buffer_size); 5322 5323 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5324 5325 return (status); 5326 error: 5327 mutex_enter(SD_MUTEX(un)); 5328 kmem_free(dkdevid, buffer_size); 5329 return (status); 5330 } 5331 5332 5333 /* 5334 * Function: sd_create_devid 5335 * 5336 * Description: This routine will fabricate the device id and write it 5337 * to the disk. 5338 * 5339 * Arguments: un - driver soft state (unit) structure 5340 * 5341 * Return Code: value of the fabricated device id 5342 * 5343 * Context: Kernel Thread 5344 */ 5345 5346 static ddi_devid_t 5347 sd_create_devid(sd_ssc_t *ssc) 5348 { 5349 struct sd_lun *un; 5350 5351 ASSERT(ssc != NULL); 5352 un = ssc->ssc_un; 5353 ASSERT(un != NULL); 5354 5355 /* Fabricate the devid */ 5356 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5357 == DDI_FAILURE) { 5358 return (NULL); 5359 } 5360 5361 /* Write the devid to disk */ 5362 if (sd_write_deviceid(ssc) != 0) { 5363 ddi_devid_free(un->un_devid); 5364 un->un_devid = NULL; 5365 } 5366 5367 return (un->un_devid); 5368 } 5369 5370 5371 /* 5372 * Function: sd_write_deviceid 5373 * 5374 * Description: This routine will write the device id to the disk 5375 * reserved sector. 5376 * 5377 * Arguments: un - driver soft state (unit) structure 5378 * 5379 * Return Code: EINVAL 5380 * value returned by sd_send_scsi_cmd 5381 * 5382 * Context: Kernel Thread 5383 */ 5384 5385 static int 5386 sd_write_deviceid(sd_ssc_t *ssc) 5387 { 5388 struct dk_devid *dkdevid; 5389 diskaddr_t blk; 5390 uint_t *ip, chksum; 5391 int status; 5392 int i; 5393 struct sd_lun *un; 5394 5395 ASSERT(ssc != NULL); 5396 un = ssc->ssc_un; 5397 ASSERT(un != NULL); 5398 ASSERT(mutex_owned(SD_MUTEX(un))); 5399 5400 mutex_exit(SD_MUTEX(un)); 5401 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5402 (void *)SD_PATH_DIRECT) != 0) { 5403 mutex_enter(SD_MUTEX(un)); 5404 return (-1); 5405 } 5406 5407 5408 /* Allocate the buffer */ 5409 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5410 5411 /* Fill in the revision */ 5412 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5413 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5414 5415 /* Copy in the device id */ 5416 mutex_enter(SD_MUTEX(un)); 5417 bcopy(un->un_devid, &dkdevid->dkd_devid, 5418 ddi_devid_sizeof(un->un_devid)); 5419 mutex_exit(SD_MUTEX(un)); 5420 5421 /* Calculate the checksum */ 5422 chksum = 0; 5423 ip = (uint_t *)dkdevid; 5424 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5425 i++) { 5426 chksum ^= ip[i]; 5427 } 5428 5429 /* Fill-in checksum */ 5430 DKD_FORMCHKSUM(chksum, dkdevid); 5431 5432 /* Write the reserved sector */ 5433 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5434 SD_PATH_DIRECT); 5435 if (status != 0) 5436 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5437 5438 kmem_free(dkdevid, un->un_sys_blocksize); 5439 5440 mutex_enter(SD_MUTEX(un)); 5441 return (status); 5442 } 5443 5444 5445 /* 5446 * Function: sd_check_vpd_page_support 5447 * 5448 * Description: This routine sends an inquiry command with the EVPD bit set and 5449 * a page code of 0x00 to the device. It is used to determine which 5450 * vital product pages are available to find the devid. We are 5451 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5452 * device does not support that command. 5453 * 5454 * Arguments: un - driver soft state (unit) structure 5455 * 5456 * Return Code: 0 - success 5457 * 1 - check condition 5458 * 5459 * Context: This routine can sleep. 5460 */ 5461 5462 static int 5463 sd_check_vpd_page_support(sd_ssc_t *ssc) 5464 { 5465 uchar_t *page_list = NULL; 5466 uchar_t page_length = 0xff; /* Use max possible length */ 5467 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5468 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5469 int rval = 0; 5470 int counter; 5471 struct sd_lun *un; 5472 5473 ASSERT(ssc != NULL); 5474 un = ssc->ssc_un; 5475 ASSERT(un != NULL); 5476 ASSERT(mutex_owned(SD_MUTEX(un))); 5477 5478 mutex_exit(SD_MUTEX(un)); 5479 5480 /* 5481 * We'll set the page length to the maximum to save figuring it out 5482 * with an additional call. 5483 */ 5484 page_list = kmem_zalloc(page_length, KM_SLEEP); 5485 5486 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5487 page_code, NULL); 5488 5489 if (rval != 0) 5490 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5491 5492 mutex_enter(SD_MUTEX(un)); 5493 5494 /* 5495 * Now we must validate that the device accepted the command, as some 5496 * drives do not support it. If the drive does support it, we will 5497 * return 0, and the supported pages will be in un_vpd_page_mask. If 5498 * not, we return -1. 5499 */ 5500 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5501 /* Loop to find one of the 2 pages we need */ 5502 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5503 5504 /* 5505 * Pages are returned in ascending order, and 0x83 is what we 5506 * are hoping for. 5507 */ 5508 while ((page_list[counter] <= 0x86) && 5509 (counter <= (page_list[VPD_PAGE_LENGTH] + 5510 VPD_HEAD_OFFSET))) { 5511 /* 5512 * Add 3 because page_list[3] is the number of 5513 * pages minus 3 5514 */ 5515 5516 switch (page_list[counter]) { 5517 case 0x00: 5518 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5519 break; 5520 case 0x80: 5521 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5522 break; 5523 case 0x81: 5524 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5525 break; 5526 case 0x82: 5527 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5528 break; 5529 case 0x83: 5530 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5531 break; 5532 case 0x86: 5533 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5534 break; 5535 } 5536 counter++; 5537 } 5538 5539 } else { 5540 rval = -1; 5541 5542 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5543 "sd_check_vpd_page_support: This drive does not implement " 5544 "VPD pages.\n"); 5545 } 5546 5547 kmem_free(page_list, page_length); 5548 5549 return (rval); 5550 } 5551 5552 5553 /* 5554 * Function: sd_setup_pm 5555 * 5556 * Description: Initialize Power Management on the device 5557 * 5558 * Context: Kernel Thread 5559 */ 5560 5561 static void 5562 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5563 { 5564 uint_t log_page_size; 5565 uchar_t *log_page_data; 5566 int rval = 0; 5567 struct sd_lun *un; 5568 5569 ASSERT(ssc != NULL); 5570 un = ssc->ssc_un; 5571 ASSERT(un != NULL); 5572 5573 /* 5574 * Since we are called from attach, holding a mutex for 5575 * un is unnecessary. Because some of the routines called 5576 * from here require SD_MUTEX to not be held, assert this 5577 * right up front. 5578 */ 5579 ASSERT(!mutex_owned(SD_MUTEX(un))); 5580 /* 5581 * Since the sd device does not have the 'reg' property, 5582 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5583 * The following code is to tell cpr that this device 5584 * DOES need to be suspended and resumed. 5585 */ 5586 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5587 "pm-hardware-state", "needs-suspend-resume"); 5588 5589 /* 5590 * This complies with the new power management framework 5591 * for certain desktop machines. Create the pm_components 5592 * property as a string array property. 5593 */ 5594 if (un->un_f_pm_supported) { 5595 /* 5596 * not all devices have a motor, try it first. 5597 * some devices may return ILLEGAL REQUEST, some 5598 * will hang 5599 * The following START_STOP_UNIT is used to check if target 5600 * device has a motor. 5601 */ 5602 un->un_f_start_stop_supported = TRUE; 5603 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5604 SD_PATH_DIRECT); 5605 5606 if (rval != 0) { 5607 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5608 un->un_f_start_stop_supported = FALSE; 5609 } 5610 5611 /* 5612 * create pm properties anyways otherwise the parent can't 5613 * go to sleep 5614 */ 5615 (void) sd_create_pm_components(devi, un); 5616 un->un_f_pm_is_enabled = TRUE; 5617 return; 5618 } 5619 5620 if (!un->un_f_log_sense_supported) { 5621 un->un_power_level = SD_SPINDLE_ON; 5622 un->un_f_pm_is_enabled = FALSE; 5623 return; 5624 } 5625 5626 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5627 5628 #ifdef SDDEBUG 5629 if (sd_force_pm_supported) { 5630 /* Force a successful result */ 5631 rval = 1; 5632 } 5633 #endif 5634 5635 /* 5636 * If the start-stop cycle counter log page is not supported 5637 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5638 * then we should not create the pm_components property. 5639 */ 5640 if (rval == -1) { 5641 /* 5642 * Error. 5643 * Reading log sense failed, most likely this is 5644 * an older drive that does not support log sense. 5645 * If this fails auto-pm is not supported. 5646 */ 5647 un->un_power_level = SD_SPINDLE_ON; 5648 un->un_f_pm_is_enabled = FALSE; 5649 5650 } else if (rval == 0) { 5651 /* 5652 * Page not found. 5653 * The start stop cycle counter is implemented as page 5654 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5655 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5656 */ 5657 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5658 /* 5659 * Page found, use this one. 5660 */ 5661 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5662 un->un_f_pm_is_enabled = TRUE; 5663 } else { 5664 /* 5665 * Error or page not found. 5666 * auto-pm is not supported for this device. 5667 */ 5668 un->un_power_level = SD_SPINDLE_ON; 5669 un->un_f_pm_is_enabled = FALSE; 5670 } 5671 } else { 5672 /* 5673 * Page found, use it. 5674 */ 5675 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5676 un->un_f_pm_is_enabled = TRUE; 5677 } 5678 5679 5680 if (un->un_f_pm_is_enabled == TRUE) { 5681 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5682 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5683 5684 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5685 log_page_size, un->un_start_stop_cycle_page, 5686 0x01, 0, SD_PATH_DIRECT); 5687 5688 if (rval != 0) { 5689 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5690 } 5691 5692 #ifdef SDDEBUG 5693 if (sd_force_pm_supported) { 5694 /* Force a successful result */ 5695 rval = 0; 5696 } 5697 #endif 5698 5699 /* 5700 * If the Log sense for Page( Start/stop cycle counter page) 5701 * succeeds, then power management is supported and we can 5702 * enable auto-pm. 5703 */ 5704 if (rval == 0) { 5705 (void) sd_create_pm_components(devi, un); 5706 } else { 5707 un->un_power_level = SD_SPINDLE_ON; 5708 un->un_f_pm_is_enabled = FALSE; 5709 } 5710 5711 kmem_free(log_page_data, log_page_size); 5712 } 5713 } 5714 5715 5716 /* 5717 * Function: sd_create_pm_components 5718 * 5719 * Description: Initialize PM property. 5720 * 5721 * Context: Kernel thread context 5722 */ 5723 5724 static void 5725 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5726 { 5727 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5728 5729 ASSERT(!mutex_owned(SD_MUTEX(un))); 5730 5731 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5732 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5733 /* 5734 * When components are initially created they are idle, 5735 * power up any non-removables. 5736 * Note: the return value of pm_raise_power can't be used 5737 * for determining if PM should be enabled for this device. 5738 * Even if you check the return values and remove this 5739 * property created above, the PM framework will not honor the 5740 * change after the first call to pm_raise_power. Hence, 5741 * removal of that property does not help if pm_raise_power 5742 * fails. In the case of removable media, the start/stop 5743 * will fail if the media is not present. 5744 */ 5745 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5746 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5747 mutex_enter(SD_MUTEX(un)); 5748 un->un_power_level = SD_SPINDLE_ON; 5749 mutex_enter(&un->un_pm_mutex); 5750 /* Set to on and not busy. */ 5751 un->un_pm_count = 0; 5752 } else { 5753 mutex_enter(SD_MUTEX(un)); 5754 un->un_power_level = SD_SPINDLE_OFF; 5755 mutex_enter(&un->un_pm_mutex); 5756 /* Set to off. */ 5757 un->un_pm_count = -1; 5758 } 5759 mutex_exit(&un->un_pm_mutex); 5760 mutex_exit(SD_MUTEX(un)); 5761 } else { 5762 un->un_power_level = SD_SPINDLE_ON; 5763 un->un_f_pm_is_enabled = FALSE; 5764 } 5765 } 5766 5767 5768 /* 5769 * Function: sd_ddi_suspend 5770 * 5771 * Description: Performs system power-down operations. This includes 5772 * setting the drive state to indicate its suspended so 5773 * that no new commands will be accepted. Also, wait for 5774 * all commands that are in transport or queued to a timer 5775 * for retry to complete. All timeout threads are cancelled. 5776 * 5777 * Return Code: DDI_FAILURE or DDI_SUCCESS 5778 * 5779 * Context: Kernel thread context 5780 */ 5781 5782 static int 5783 sd_ddi_suspend(dev_info_t *devi) 5784 { 5785 struct sd_lun *un; 5786 clock_t wait_cmds_complete; 5787 5788 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5789 if (un == NULL) { 5790 return (DDI_FAILURE); 5791 } 5792 5793 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5794 5795 mutex_enter(SD_MUTEX(un)); 5796 5797 /* Return success if the device is already suspended. */ 5798 if (un->un_state == SD_STATE_SUSPENDED) { 5799 mutex_exit(SD_MUTEX(un)); 5800 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5801 "device already suspended, exiting\n"); 5802 return (DDI_SUCCESS); 5803 } 5804 5805 /* Return failure if the device is being used by HA */ 5806 if (un->un_resvd_status & 5807 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5808 mutex_exit(SD_MUTEX(un)); 5809 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5810 "device in use by HA, exiting\n"); 5811 return (DDI_FAILURE); 5812 } 5813 5814 /* 5815 * Return failure if the device is in a resource wait 5816 * or power changing state. 5817 */ 5818 if ((un->un_state == SD_STATE_RWAIT) || 5819 (un->un_state == SD_STATE_PM_CHANGING)) { 5820 mutex_exit(SD_MUTEX(un)); 5821 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5822 "device in resource wait state, exiting\n"); 5823 return (DDI_FAILURE); 5824 } 5825 5826 5827 un->un_save_state = un->un_last_state; 5828 New_state(un, SD_STATE_SUSPENDED); 5829 5830 /* 5831 * Wait for all commands that are in transport or queued to a timer 5832 * for retry to complete. 5833 * 5834 * While waiting, no new commands will be accepted or sent because of 5835 * the new state we set above. 5836 * 5837 * Wait till current operation has completed. If we are in the resource 5838 * wait state (with an intr outstanding) then we need to wait till the 5839 * intr completes and starts the next cmd. We want to wait for 5840 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5841 */ 5842 wait_cmds_complete = ddi_get_lbolt() + 5843 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5844 5845 while (un->un_ncmds_in_transport != 0) { 5846 /* 5847 * Fail if commands do not finish in the specified time. 5848 */ 5849 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5850 wait_cmds_complete) == -1) { 5851 /* 5852 * Undo the state changes made above. Everything 5853 * must go back to it's original value. 5854 */ 5855 Restore_state(un); 5856 un->un_last_state = un->un_save_state; 5857 /* Wake up any threads that might be waiting. */ 5858 cv_broadcast(&un->un_suspend_cv); 5859 mutex_exit(SD_MUTEX(un)); 5860 SD_ERROR(SD_LOG_IO_PM, un, 5861 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5862 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5863 return (DDI_FAILURE); 5864 } 5865 } 5866 5867 /* 5868 * Cancel SCSI watch thread and timeouts, if any are active 5869 */ 5870 5871 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5872 opaque_t temp_token = un->un_swr_token; 5873 mutex_exit(SD_MUTEX(un)); 5874 scsi_watch_suspend(temp_token); 5875 mutex_enter(SD_MUTEX(un)); 5876 } 5877 5878 if (un->un_reset_throttle_timeid != NULL) { 5879 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5880 un->un_reset_throttle_timeid = NULL; 5881 mutex_exit(SD_MUTEX(un)); 5882 (void) untimeout(temp_id); 5883 mutex_enter(SD_MUTEX(un)); 5884 } 5885 5886 if (un->un_dcvb_timeid != NULL) { 5887 timeout_id_t temp_id = un->un_dcvb_timeid; 5888 un->un_dcvb_timeid = NULL; 5889 mutex_exit(SD_MUTEX(un)); 5890 (void) untimeout(temp_id); 5891 mutex_enter(SD_MUTEX(un)); 5892 } 5893 5894 mutex_enter(&un->un_pm_mutex); 5895 if (un->un_pm_timeid != NULL) { 5896 timeout_id_t temp_id = un->un_pm_timeid; 5897 un->un_pm_timeid = NULL; 5898 mutex_exit(&un->un_pm_mutex); 5899 mutex_exit(SD_MUTEX(un)); 5900 (void) untimeout(temp_id); 5901 mutex_enter(SD_MUTEX(un)); 5902 } else { 5903 mutex_exit(&un->un_pm_mutex); 5904 } 5905 5906 if (un->un_retry_timeid != NULL) { 5907 timeout_id_t temp_id = un->un_retry_timeid; 5908 un->un_retry_timeid = NULL; 5909 mutex_exit(SD_MUTEX(un)); 5910 (void) untimeout(temp_id); 5911 mutex_enter(SD_MUTEX(un)); 5912 5913 if (un->un_retry_bp != NULL) { 5914 un->un_retry_bp->av_forw = un->un_waitq_headp; 5915 un->un_waitq_headp = un->un_retry_bp; 5916 if (un->un_waitq_tailp == NULL) { 5917 un->un_waitq_tailp = un->un_retry_bp; 5918 } 5919 un->un_retry_bp = NULL; 5920 un->un_retry_statp = NULL; 5921 } 5922 } 5923 5924 if (un->un_direct_priority_timeid != NULL) { 5925 timeout_id_t temp_id = un->un_direct_priority_timeid; 5926 un->un_direct_priority_timeid = NULL; 5927 mutex_exit(SD_MUTEX(un)); 5928 (void) untimeout(temp_id); 5929 mutex_enter(SD_MUTEX(un)); 5930 } 5931 5932 if (un->un_f_is_fibre == TRUE) { 5933 /* 5934 * Remove callbacks for insert and remove events 5935 */ 5936 if (un->un_insert_event != NULL) { 5937 mutex_exit(SD_MUTEX(un)); 5938 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5939 mutex_enter(SD_MUTEX(un)); 5940 un->un_insert_event = NULL; 5941 } 5942 5943 if (un->un_remove_event != NULL) { 5944 mutex_exit(SD_MUTEX(un)); 5945 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5946 mutex_enter(SD_MUTEX(un)); 5947 un->un_remove_event = NULL; 5948 } 5949 } 5950 5951 mutex_exit(SD_MUTEX(un)); 5952 5953 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5954 5955 return (DDI_SUCCESS); 5956 } 5957 5958 5959 /* 5960 * Function: sd_ddi_pm_suspend 5961 * 5962 * Description: Set the drive state to low power. 5963 * Someone else is required to actually change the drive 5964 * power level. 5965 * 5966 * Arguments: un - driver soft state (unit) structure 5967 * 5968 * Return Code: DDI_FAILURE or DDI_SUCCESS 5969 * 5970 * Context: Kernel thread context 5971 */ 5972 5973 static int 5974 sd_ddi_pm_suspend(struct sd_lun *un) 5975 { 5976 ASSERT(un != NULL); 5977 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5978 5979 ASSERT(!mutex_owned(SD_MUTEX(un))); 5980 mutex_enter(SD_MUTEX(un)); 5981 5982 /* 5983 * Exit if power management is not enabled for this device, or if 5984 * the device is being used by HA. 5985 */ 5986 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5987 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5988 mutex_exit(SD_MUTEX(un)); 5989 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5990 return (DDI_SUCCESS); 5991 } 5992 5993 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5994 un->un_ncmds_in_driver); 5995 5996 /* 5997 * See if the device is not busy, ie.: 5998 * - we have no commands in the driver for this device 5999 * - not waiting for resources 6000 */ 6001 if ((un->un_ncmds_in_driver == 0) && 6002 (un->un_state != SD_STATE_RWAIT)) { 6003 /* 6004 * The device is not busy, so it is OK to go to low power state. 6005 * Indicate low power, but rely on someone else to actually 6006 * change it. 6007 */ 6008 mutex_enter(&un->un_pm_mutex); 6009 un->un_pm_count = -1; 6010 mutex_exit(&un->un_pm_mutex); 6011 un->un_power_level = SD_SPINDLE_OFF; 6012 } 6013 6014 mutex_exit(SD_MUTEX(un)); 6015 6016 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6017 6018 return (DDI_SUCCESS); 6019 } 6020 6021 6022 /* 6023 * Function: sd_ddi_resume 6024 * 6025 * Description: Performs system power-up operations.. 6026 * 6027 * Return Code: DDI_SUCCESS 6028 * DDI_FAILURE 6029 * 6030 * Context: Kernel thread context 6031 */ 6032 6033 static int 6034 sd_ddi_resume(dev_info_t *devi) 6035 { 6036 struct sd_lun *un; 6037 6038 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6039 if (un == NULL) { 6040 return (DDI_FAILURE); 6041 } 6042 6043 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6044 6045 mutex_enter(SD_MUTEX(un)); 6046 Restore_state(un); 6047 6048 /* 6049 * Restore the state which was saved to give the 6050 * the right state in un_last_state 6051 */ 6052 un->un_last_state = un->un_save_state; 6053 /* 6054 * Note: throttle comes back at full. 6055 * Also note: this MUST be done before calling pm_raise_power 6056 * otherwise the system can get hung in biowait. The scenario where 6057 * this'll happen is under cpr suspend. Writing of the system 6058 * state goes through sddump, which writes 0 to un_throttle. If 6059 * writing the system state then fails, example if the partition is 6060 * too small, then cpr attempts a resume. If throttle isn't restored 6061 * from the saved value until after calling pm_raise_power then 6062 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6063 * in biowait. 6064 */ 6065 un->un_throttle = un->un_saved_throttle; 6066 6067 /* 6068 * The chance of failure is very rare as the only command done in power 6069 * entry point is START command when you transition from 0->1 or 6070 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6071 * which suspend was done. Ignore the return value as the resume should 6072 * not be failed. In the case of removable media the media need not be 6073 * inserted and hence there is a chance that raise power will fail with 6074 * media not present. 6075 */ 6076 if (un->un_f_attach_spinup) { 6077 mutex_exit(SD_MUTEX(un)); 6078 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6079 mutex_enter(SD_MUTEX(un)); 6080 } 6081 6082 /* 6083 * Don't broadcast to the suspend cv and therefore possibly 6084 * start I/O until after power has been restored. 6085 */ 6086 cv_broadcast(&un->un_suspend_cv); 6087 cv_broadcast(&un->un_state_cv); 6088 6089 /* restart thread */ 6090 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6091 scsi_watch_resume(un->un_swr_token); 6092 } 6093 6094 #if (defined(__fibre)) 6095 if (un->un_f_is_fibre == TRUE) { 6096 /* 6097 * Add callbacks for insert and remove events 6098 */ 6099 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6100 sd_init_event_callbacks(un); 6101 } 6102 } 6103 #endif 6104 6105 /* 6106 * Transport any pending commands to the target. 6107 * 6108 * If this is a low-activity device commands in queue will have to wait 6109 * until new commands come in, which may take awhile. Also, we 6110 * specifically don't check un_ncmds_in_transport because we know that 6111 * there really are no commands in progress after the unit was 6112 * suspended and we could have reached the throttle level, been 6113 * suspended, and have no new commands coming in for awhile. Highly 6114 * unlikely, but so is the low-activity disk scenario. 6115 */ 6116 ddi_xbuf_dispatch(un->un_xbuf_attr); 6117 6118 sd_start_cmds(un, NULL); 6119 mutex_exit(SD_MUTEX(un)); 6120 6121 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6122 6123 return (DDI_SUCCESS); 6124 } 6125 6126 6127 /* 6128 * Function: sd_ddi_pm_resume 6129 * 6130 * Description: Set the drive state to powered on. 6131 * Someone else is required to actually change the drive 6132 * power level. 6133 * 6134 * Arguments: un - driver soft state (unit) structure 6135 * 6136 * Return Code: DDI_SUCCESS 6137 * 6138 * Context: Kernel thread context 6139 */ 6140 6141 static int 6142 sd_ddi_pm_resume(struct sd_lun *un) 6143 { 6144 ASSERT(un != NULL); 6145 6146 ASSERT(!mutex_owned(SD_MUTEX(un))); 6147 mutex_enter(SD_MUTEX(un)); 6148 un->un_power_level = SD_SPINDLE_ON; 6149 6150 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6151 mutex_enter(&un->un_pm_mutex); 6152 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6153 un->un_pm_count++; 6154 ASSERT(un->un_pm_count == 0); 6155 /* 6156 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6157 * un_suspend_cv is for a system resume, not a power management 6158 * device resume. (4297749) 6159 * cv_broadcast(&un->un_suspend_cv); 6160 */ 6161 } 6162 mutex_exit(&un->un_pm_mutex); 6163 mutex_exit(SD_MUTEX(un)); 6164 6165 return (DDI_SUCCESS); 6166 } 6167 6168 6169 /* 6170 * Function: sd_pm_idletimeout_handler 6171 * 6172 * Description: A timer routine that's active only while a device is busy. 6173 * The purpose is to extend slightly the pm framework's busy 6174 * view of the device to prevent busy/idle thrashing for 6175 * back-to-back commands. Do this by comparing the current time 6176 * to the time at which the last command completed and when the 6177 * difference is greater than sd_pm_idletime, call 6178 * pm_idle_component. In addition to indicating idle to the pm 6179 * framework, update the chain type to again use the internal pm 6180 * layers of the driver. 6181 * 6182 * Arguments: arg - driver soft state (unit) structure 6183 * 6184 * Context: Executes in a timeout(9F) thread context 6185 */ 6186 6187 static void 6188 sd_pm_idletimeout_handler(void *arg) 6189 { 6190 struct sd_lun *un = arg; 6191 6192 time_t now; 6193 6194 mutex_enter(&sd_detach_mutex); 6195 if (un->un_detach_count != 0) { 6196 /* Abort if the instance is detaching */ 6197 mutex_exit(&sd_detach_mutex); 6198 return; 6199 } 6200 mutex_exit(&sd_detach_mutex); 6201 6202 now = ddi_get_time(); 6203 /* 6204 * Grab both mutexes, in the proper order, since we're accessing 6205 * both PM and softstate variables. 6206 */ 6207 mutex_enter(SD_MUTEX(un)); 6208 mutex_enter(&un->un_pm_mutex); 6209 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6210 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6211 /* 6212 * Update the chain types. 6213 * This takes affect on the next new command received. 6214 */ 6215 if (un->un_f_non_devbsize_supported) { 6216 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6217 } else { 6218 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6219 } 6220 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6221 6222 SD_TRACE(SD_LOG_IO_PM, un, 6223 "sd_pm_idletimeout_handler: idling device\n"); 6224 (void) pm_idle_component(SD_DEVINFO(un), 0); 6225 un->un_pm_idle_timeid = NULL; 6226 } else { 6227 un->un_pm_idle_timeid = 6228 timeout(sd_pm_idletimeout_handler, un, 6229 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6230 } 6231 mutex_exit(&un->un_pm_mutex); 6232 mutex_exit(SD_MUTEX(un)); 6233 } 6234 6235 6236 /* 6237 * Function: sd_pm_timeout_handler 6238 * 6239 * Description: Callback to tell framework we are idle. 6240 * 6241 * Context: timeout(9f) thread context. 6242 */ 6243 6244 static void 6245 sd_pm_timeout_handler(void *arg) 6246 { 6247 struct sd_lun *un = arg; 6248 6249 (void) pm_idle_component(SD_DEVINFO(un), 0); 6250 mutex_enter(&un->un_pm_mutex); 6251 un->un_pm_timeid = NULL; 6252 mutex_exit(&un->un_pm_mutex); 6253 } 6254 6255 6256 /* 6257 * Function: sdpower 6258 * 6259 * Description: PM entry point. 6260 * 6261 * Return Code: DDI_SUCCESS 6262 * DDI_FAILURE 6263 * 6264 * Context: Kernel thread context 6265 */ 6266 6267 static int 6268 sdpower(dev_info_t *devi, int component, int level) 6269 { 6270 struct sd_lun *un; 6271 int instance; 6272 int rval = DDI_SUCCESS; 6273 uint_t i, log_page_size, maxcycles, ncycles; 6274 uchar_t *log_page_data; 6275 int log_sense_page; 6276 int medium_present; 6277 time_t intvlp; 6278 dev_t dev; 6279 struct pm_trans_data sd_pm_tran_data; 6280 uchar_t save_state; 6281 int sval; 6282 uchar_t state_before_pm; 6283 int got_semaphore_here; 6284 sd_ssc_t *ssc; 6285 6286 instance = ddi_get_instance(devi); 6287 6288 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6289 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6290 component != 0) { 6291 return (DDI_FAILURE); 6292 } 6293 6294 dev = sd_make_device(SD_DEVINFO(un)); 6295 ssc = sd_ssc_init(un); 6296 6297 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6298 6299 /* 6300 * Must synchronize power down with close. 6301 * Attempt to decrement/acquire the open/close semaphore, 6302 * but do NOT wait on it. If it's not greater than zero, 6303 * ie. it can't be decremented without waiting, then 6304 * someone else, either open or close, already has it 6305 * and the try returns 0. Use that knowledge here to determine 6306 * if it's OK to change the device power level. 6307 * Also, only increment it on exit if it was decremented, ie. gotten, 6308 * here. 6309 */ 6310 got_semaphore_here = sema_tryp(&un->un_semoclose); 6311 6312 mutex_enter(SD_MUTEX(un)); 6313 6314 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6315 un->un_ncmds_in_driver); 6316 6317 /* 6318 * If un_ncmds_in_driver is non-zero it indicates commands are 6319 * already being processed in the driver, or if the semaphore was 6320 * not gotten here it indicates an open or close is being processed. 6321 * At the same time somebody is requesting to go low power which 6322 * can't happen, therefore we need to return failure. 6323 */ 6324 if ((level == SD_SPINDLE_OFF) && 6325 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6326 mutex_exit(SD_MUTEX(un)); 6327 6328 if (got_semaphore_here != 0) { 6329 sema_v(&un->un_semoclose); 6330 } 6331 SD_TRACE(SD_LOG_IO_PM, un, 6332 "sdpower: exit, device has queued cmds.\n"); 6333 6334 goto sdpower_failed; 6335 } 6336 6337 /* 6338 * if it is OFFLINE that means the disk is completely dead 6339 * in our case we have to put the disk in on or off by sending commands 6340 * Of course that will fail anyway so return back here. 6341 * 6342 * Power changes to a device that's OFFLINE or SUSPENDED 6343 * are not allowed. 6344 */ 6345 if ((un->un_state == SD_STATE_OFFLINE) || 6346 (un->un_state == SD_STATE_SUSPENDED)) { 6347 mutex_exit(SD_MUTEX(un)); 6348 6349 if (got_semaphore_here != 0) { 6350 sema_v(&un->un_semoclose); 6351 } 6352 SD_TRACE(SD_LOG_IO_PM, un, 6353 "sdpower: exit, device is off-line.\n"); 6354 6355 goto sdpower_failed; 6356 } 6357 6358 /* 6359 * Change the device's state to indicate it's power level 6360 * is being changed. Do this to prevent a power off in the 6361 * middle of commands, which is especially bad on devices 6362 * that are really powered off instead of just spun down. 6363 */ 6364 state_before_pm = un->un_state; 6365 un->un_state = SD_STATE_PM_CHANGING; 6366 6367 mutex_exit(SD_MUTEX(un)); 6368 6369 /* 6370 * If "pm-capable" property is set to TRUE by HBA drivers, 6371 * bypass the following checking, otherwise, check the log 6372 * sense information for this device 6373 */ 6374 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6375 /* 6376 * Get the log sense information to understand whether the 6377 * the powercycle counts have gone beyond the threshhold. 6378 */ 6379 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6380 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6381 6382 mutex_enter(SD_MUTEX(un)); 6383 log_sense_page = un->un_start_stop_cycle_page; 6384 mutex_exit(SD_MUTEX(un)); 6385 6386 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6387 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6388 6389 if (rval != 0) { 6390 if (rval == EIO) 6391 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6392 else 6393 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6394 } 6395 6396 #ifdef SDDEBUG 6397 if (sd_force_pm_supported) { 6398 /* Force a successful result */ 6399 rval = 0; 6400 } 6401 #endif 6402 if (rval != 0) { 6403 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6404 "Log Sense Failed\n"); 6405 6406 kmem_free(log_page_data, log_page_size); 6407 /* Cannot support power management on those drives */ 6408 6409 if (got_semaphore_here != 0) { 6410 sema_v(&un->un_semoclose); 6411 } 6412 /* 6413 * On exit put the state back to it's original value 6414 * and broadcast to anyone waiting for the power 6415 * change completion. 6416 */ 6417 mutex_enter(SD_MUTEX(un)); 6418 un->un_state = state_before_pm; 6419 cv_broadcast(&un->un_suspend_cv); 6420 mutex_exit(SD_MUTEX(un)); 6421 SD_TRACE(SD_LOG_IO_PM, un, 6422 "sdpower: exit, Log Sense Failed.\n"); 6423 6424 goto sdpower_failed; 6425 } 6426 6427 /* 6428 * From the page data - Convert the essential information to 6429 * pm_trans_data 6430 */ 6431 maxcycles = 6432 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6433 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6434 6435 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6436 6437 ncycles = 6438 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6439 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6440 6441 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6442 6443 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6444 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6445 log_page_data[8+i]; 6446 } 6447 6448 kmem_free(log_page_data, log_page_size); 6449 6450 /* 6451 * Call pm_trans_check routine to get the Ok from 6452 * the global policy 6453 */ 6454 6455 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6456 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6457 6458 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6459 #ifdef SDDEBUG 6460 if (sd_force_pm_supported) { 6461 /* Force a successful result */ 6462 rval = 1; 6463 } 6464 #endif 6465 switch (rval) { 6466 case 0: 6467 /* 6468 * Not Ok to Power cycle or error in parameters passed 6469 * Would have given the advised time to consider power 6470 * cycle. Based on the new intvlp parameter we are 6471 * supposed to pretend we are busy so that pm framework 6472 * will never call our power entry point. Because of 6473 * that install a timeout handler and wait for the 6474 * recommended time to elapse so that power management 6475 * can be effective again. 6476 * 6477 * To effect this behavior, call pm_busy_component to 6478 * indicate to the framework this device is busy. 6479 * By not adjusting un_pm_count the rest of PM in 6480 * the driver will function normally, and independent 6481 * of this but because the framework is told the device 6482 * is busy it won't attempt powering down until it gets 6483 * a matching idle. The timeout handler sends this. 6484 * Note: sd_pm_entry can't be called here to do this 6485 * because sdpower may have been called as a result 6486 * of a call to pm_raise_power from within sd_pm_entry. 6487 * 6488 * If a timeout handler is already active then 6489 * don't install another. 6490 */ 6491 mutex_enter(&un->un_pm_mutex); 6492 if (un->un_pm_timeid == NULL) { 6493 un->un_pm_timeid = 6494 timeout(sd_pm_timeout_handler, 6495 un, intvlp * drv_usectohz(1000000)); 6496 mutex_exit(&un->un_pm_mutex); 6497 (void) pm_busy_component(SD_DEVINFO(un), 0); 6498 } else { 6499 mutex_exit(&un->un_pm_mutex); 6500 } 6501 if (got_semaphore_here != 0) { 6502 sema_v(&un->un_semoclose); 6503 } 6504 /* 6505 * On exit put the state back to it's original value 6506 * and broadcast to anyone waiting for the power 6507 * change completion. 6508 */ 6509 mutex_enter(SD_MUTEX(un)); 6510 un->un_state = state_before_pm; 6511 cv_broadcast(&un->un_suspend_cv); 6512 mutex_exit(SD_MUTEX(un)); 6513 6514 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6515 "trans check Failed, not ok to power cycle.\n"); 6516 6517 goto sdpower_failed; 6518 case -1: 6519 if (got_semaphore_here != 0) { 6520 sema_v(&un->un_semoclose); 6521 } 6522 /* 6523 * On exit put the state back to it's original value 6524 * and broadcast to anyone waiting for the power 6525 * change completion. 6526 */ 6527 mutex_enter(SD_MUTEX(un)); 6528 un->un_state = state_before_pm; 6529 cv_broadcast(&un->un_suspend_cv); 6530 mutex_exit(SD_MUTEX(un)); 6531 SD_TRACE(SD_LOG_IO_PM, un, 6532 "sdpower: exit, trans check command Failed.\n"); 6533 6534 goto sdpower_failed; 6535 } 6536 } 6537 6538 if (level == SD_SPINDLE_OFF) { 6539 /* 6540 * Save the last state... if the STOP FAILS we need it 6541 * for restoring 6542 */ 6543 mutex_enter(SD_MUTEX(un)); 6544 save_state = un->un_last_state; 6545 /* 6546 * There must not be any cmds. getting processed 6547 * in the driver when we get here. Power to the 6548 * device is potentially going off. 6549 */ 6550 ASSERT(un->un_ncmds_in_driver == 0); 6551 mutex_exit(SD_MUTEX(un)); 6552 6553 /* 6554 * For now suspend the device completely before spindle is 6555 * turned off 6556 */ 6557 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6558 if (got_semaphore_here != 0) { 6559 sema_v(&un->un_semoclose); 6560 } 6561 /* 6562 * On exit put the state back to it's original value 6563 * and broadcast to anyone waiting for the power 6564 * change completion. 6565 */ 6566 mutex_enter(SD_MUTEX(un)); 6567 un->un_state = state_before_pm; 6568 cv_broadcast(&un->un_suspend_cv); 6569 mutex_exit(SD_MUTEX(un)); 6570 SD_TRACE(SD_LOG_IO_PM, un, 6571 "sdpower: exit, PM suspend Failed.\n"); 6572 6573 goto sdpower_failed; 6574 } 6575 } 6576 6577 /* 6578 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6579 * close, or strategy. Dump no long uses this routine, it uses it's 6580 * own code so it can be done in polled mode. 6581 */ 6582 6583 medium_present = TRUE; 6584 6585 /* 6586 * When powering up, issue a TUR in case the device is at unit 6587 * attention. Don't do retries. Bypass the PM layer, otherwise 6588 * a deadlock on un_pm_busy_cv will occur. 6589 */ 6590 if (level == SD_SPINDLE_ON) { 6591 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6592 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6593 if (sval != 0) 6594 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6595 } 6596 6597 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6598 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6599 6600 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6601 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6602 SD_PATH_DIRECT); 6603 if (sval != 0) { 6604 if (sval == EIO) 6605 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6606 else 6607 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6608 } 6609 6610 /* Command failed, check for media present. */ 6611 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6612 medium_present = FALSE; 6613 } 6614 6615 /* 6616 * The conditions of interest here are: 6617 * if a spindle off with media present fails, 6618 * then restore the state and return an error. 6619 * else if a spindle on fails, 6620 * then return an error (there's no state to restore). 6621 * In all other cases we setup for the new state 6622 * and return success. 6623 */ 6624 switch (level) { 6625 case SD_SPINDLE_OFF: 6626 if ((medium_present == TRUE) && (sval != 0)) { 6627 /* The stop command from above failed */ 6628 rval = DDI_FAILURE; 6629 /* 6630 * The stop command failed, and we have media 6631 * present. Put the level back by calling the 6632 * sd_pm_resume() and set the state back to 6633 * it's previous value. 6634 */ 6635 (void) sd_ddi_pm_resume(un); 6636 mutex_enter(SD_MUTEX(un)); 6637 un->un_last_state = save_state; 6638 mutex_exit(SD_MUTEX(un)); 6639 break; 6640 } 6641 /* 6642 * The stop command from above succeeded. 6643 */ 6644 if (un->un_f_monitor_media_state) { 6645 /* 6646 * Terminate watch thread in case of removable media 6647 * devices going into low power state. This is as per 6648 * the requirements of pm framework, otherwise commands 6649 * will be generated for the device (through watch 6650 * thread), even when the device is in low power state. 6651 */ 6652 mutex_enter(SD_MUTEX(un)); 6653 un->un_f_watcht_stopped = FALSE; 6654 if (un->un_swr_token != NULL) { 6655 opaque_t temp_token = un->un_swr_token; 6656 un->un_f_watcht_stopped = TRUE; 6657 un->un_swr_token = NULL; 6658 mutex_exit(SD_MUTEX(un)); 6659 (void) scsi_watch_request_terminate(temp_token, 6660 SCSI_WATCH_TERMINATE_ALL_WAIT); 6661 } else { 6662 mutex_exit(SD_MUTEX(un)); 6663 } 6664 } 6665 break; 6666 6667 default: /* The level requested is spindle on... */ 6668 /* 6669 * Legacy behavior: return success on a failed spinup 6670 * if there is no media in the drive. 6671 * Do this by looking at medium_present here. 6672 */ 6673 if ((sval != 0) && medium_present) { 6674 /* The start command from above failed */ 6675 rval = DDI_FAILURE; 6676 break; 6677 } 6678 /* 6679 * The start command from above succeeded 6680 * Resume the devices now that we have 6681 * started the disks 6682 */ 6683 (void) sd_ddi_pm_resume(un); 6684 6685 /* 6686 * Resume the watch thread since it was suspended 6687 * when the device went into low power mode. 6688 */ 6689 if (un->un_f_monitor_media_state) { 6690 mutex_enter(SD_MUTEX(un)); 6691 if (un->un_f_watcht_stopped == TRUE) { 6692 opaque_t temp_token; 6693 6694 un->un_f_watcht_stopped = FALSE; 6695 mutex_exit(SD_MUTEX(un)); 6696 temp_token = scsi_watch_request_submit( 6697 SD_SCSI_DEVP(un), 6698 sd_check_media_time, 6699 SENSE_LENGTH, sd_media_watch_cb, 6700 (caddr_t)dev); 6701 mutex_enter(SD_MUTEX(un)); 6702 un->un_swr_token = temp_token; 6703 } 6704 mutex_exit(SD_MUTEX(un)); 6705 } 6706 } 6707 if (got_semaphore_here != 0) { 6708 sema_v(&un->un_semoclose); 6709 } 6710 /* 6711 * On exit put the state back to it's original value 6712 * and broadcast to anyone waiting for the power 6713 * change completion. 6714 */ 6715 mutex_enter(SD_MUTEX(un)); 6716 un->un_state = state_before_pm; 6717 cv_broadcast(&un->un_suspend_cv); 6718 mutex_exit(SD_MUTEX(un)); 6719 6720 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6721 6722 sd_ssc_fini(ssc); 6723 return (rval); 6724 6725 sdpower_failed: 6726 6727 sd_ssc_fini(ssc); 6728 return (DDI_FAILURE); 6729 } 6730 6731 6732 6733 /* 6734 * Function: sdattach 6735 * 6736 * Description: Driver's attach(9e) entry point function. 6737 * 6738 * Arguments: devi - opaque device info handle 6739 * cmd - attach type 6740 * 6741 * Return Code: DDI_SUCCESS 6742 * DDI_FAILURE 6743 * 6744 * Context: Kernel thread context 6745 */ 6746 6747 static int 6748 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6749 { 6750 switch (cmd) { 6751 case DDI_ATTACH: 6752 return (sd_unit_attach(devi)); 6753 case DDI_RESUME: 6754 return (sd_ddi_resume(devi)); 6755 default: 6756 break; 6757 } 6758 return (DDI_FAILURE); 6759 } 6760 6761 6762 /* 6763 * Function: sddetach 6764 * 6765 * Description: Driver's detach(9E) entry point function. 6766 * 6767 * Arguments: devi - opaque device info handle 6768 * cmd - detach type 6769 * 6770 * Return Code: DDI_SUCCESS 6771 * DDI_FAILURE 6772 * 6773 * Context: Kernel thread context 6774 */ 6775 6776 static int 6777 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6778 { 6779 switch (cmd) { 6780 case DDI_DETACH: 6781 return (sd_unit_detach(devi)); 6782 case DDI_SUSPEND: 6783 return (sd_ddi_suspend(devi)); 6784 default: 6785 break; 6786 } 6787 return (DDI_FAILURE); 6788 } 6789 6790 6791 /* 6792 * Function: sd_sync_with_callback 6793 * 6794 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6795 * state while the callback routine is active. 6796 * 6797 * Arguments: un: softstate structure for the instance 6798 * 6799 * Context: Kernel thread context 6800 */ 6801 6802 static void 6803 sd_sync_with_callback(struct sd_lun *un) 6804 { 6805 ASSERT(un != NULL); 6806 6807 mutex_enter(SD_MUTEX(un)); 6808 6809 ASSERT(un->un_in_callback >= 0); 6810 6811 while (un->un_in_callback > 0) { 6812 mutex_exit(SD_MUTEX(un)); 6813 delay(2); 6814 mutex_enter(SD_MUTEX(un)); 6815 } 6816 6817 mutex_exit(SD_MUTEX(un)); 6818 } 6819 6820 /* 6821 * Function: sd_unit_attach 6822 * 6823 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6824 * the soft state structure for the device and performs 6825 * all necessary structure and device initializations. 6826 * 6827 * Arguments: devi: the system's dev_info_t for the device. 6828 * 6829 * Return Code: DDI_SUCCESS if attach is successful. 6830 * DDI_FAILURE if any part of the attach fails. 6831 * 6832 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6833 * Kernel thread context only. Can sleep. 6834 */ 6835 6836 static int 6837 sd_unit_attach(dev_info_t *devi) 6838 { 6839 struct scsi_device *devp; 6840 struct sd_lun *un; 6841 char *variantp; 6842 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6843 int instance; 6844 int rval; 6845 int wc_enabled; 6846 int tgt; 6847 uint64_t capacity; 6848 uint_t lbasize = 0; 6849 dev_info_t *pdip = ddi_get_parent(devi); 6850 int offbyone = 0; 6851 int geom_label_valid = 0; 6852 sd_ssc_t *ssc; 6853 int status; 6854 struct sd_fm_internal *sfip = NULL; 6855 int max_xfer_size; 6856 6857 /* 6858 * Retrieve the target driver's private data area. This was set 6859 * up by the HBA. 6860 */ 6861 devp = ddi_get_driver_private(devi); 6862 6863 /* 6864 * Retrieve the target ID of the device. 6865 */ 6866 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6867 SCSI_ADDR_PROP_TARGET, -1); 6868 6869 /* 6870 * Since we have no idea what state things were left in by the last 6871 * user of the device, set up some 'default' settings, ie. turn 'em 6872 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6873 * Do this before the scsi_probe, which sends an inquiry. 6874 * This is a fix for bug (4430280). 6875 * Of special importance is wide-xfer. The drive could have been left 6876 * in wide transfer mode by the last driver to communicate with it, 6877 * this includes us. If that's the case, and if the following is not 6878 * setup properly or we don't re-negotiate with the drive prior to 6879 * transferring data to/from the drive, it causes bus parity errors, 6880 * data overruns, and unexpected interrupts. This first occurred when 6881 * the fix for bug (4378686) was made. 6882 */ 6883 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6884 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6885 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6886 6887 /* 6888 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6889 * on a target. Setting it per lun instance actually sets the 6890 * capability of this target, which affects those luns already 6891 * attached on the same target. So during attach, we can only disable 6892 * this capability only when no other lun has been attached on this 6893 * target. By doing this, we assume a target has the same tagged-qing 6894 * capability for every lun. The condition can be removed when HBA 6895 * is changed to support per lun based tagged-qing capability. 6896 */ 6897 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6898 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6899 } 6900 6901 /* 6902 * Use scsi_probe() to issue an INQUIRY command to the device. 6903 * This call will allocate and fill in the scsi_inquiry structure 6904 * and point the sd_inq member of the scsi_device structure to it. 6905 * If the attach succeeds, then this memory will not be de-allocated 6906 * (via scsi_unprobe()) until the instance is detached. 6907 */ 6908 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6909 goto probe_failed; 6910 } 6911 6912 /* 6913 * Check the device type as specified in the inquiry data and 6914 * claim it if it is of a type that we support. 6915 */ 6916 switch (devp->sd_inq->inq_dtype) { 6917 case DTYPE_DIRECT: 6918 break; 6919 case DTYPE_RODIRECT: 6920 break; 6921 case DTYPE_OPTICAL: 6922 break; 6923 case DTYPE_NOTPRESENT: 6924 default: 6925 /* Unsupported device type; fail the attach. */ 6926 goto probe_failed; 6927 } 6928 6929 /* 6930 * Allocate the soft state structure for this unit. 6931 * 6932 * We rely upon this memory being set to all zeroes by 6933 * ddi_soft_state_zalloc(). We assume that any member of the 6934 * soft state structure that is not explicitly initialized by 6935 * this routine will have a value of zero. 6936 */ 6937 instance = ddi_get_instance(devp->sd_dev); 6938 #ifndef XPV_HVM_DRIVER 6939 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6940 goto probe_failed; 6941 } 6942 #endif /* !XPV_HVM_DRIVER */ 6943 6944 /* 6945 * Retrieve a pointer to the newly-allocated soft state. 6946 * 6947 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6948 * was successful, unless something has gone horribly wrong and the 6949 * ddi's soft state internals are corrupt (in which case it is 6950 * probably better to halt here than just fail the attach....) 6951 */ 6952 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6953 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6954 instance); 6955 /*NOTREACHED*/ 6956 } 6957 6958 /* 6959 * Link the back ptr of the driver soft state to the scsi_device 6960 * struct for this lun. 6961 * Save a pointer to the softstate in the driver-private area of 6962 * the scsi_device struct. 6963 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6964 * we first set un->un_sd below. 6965 */ 6966 un->un_sd = devp; 6967 devp->sd_private = (opaque_t)un; 6968 6969 /* 6970 * The following must be after devp is stored in the soft state struct. 6971 */ 6972 #ifdef SDDEBUG 6973 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6974 "%s_unit_attach: un:0x%p instance:%d\n", 6975 ddi_driver_name(devi), un, instance); 6976 #endif 6977 6978 /* 6979 * Set up the device type and node type (for the minor nodes). 6980 * By default we assume that the device can at least support the 6981 * Common Command Set. Call it a CD-ROM if it reports itself 6982 * as a RODIRECT device. 6983 */ 6984 switch (devp->sd_inq->inq_dtype) { 6985 case DTYPE_RODIRECT: 6986 un->un_node_type = DDI_NT_CD_CHAN; 6987 un->un_ctype = CTYPE_CDROM; 6988 break; 6989 case DTYPE_OPTICAL: 6990 un->un_node_type = DDI_NT_BLOCK_CHAN; 6991 un->un_ctype = CTYPE_ROD; 6992 break; 6993 default: 6994 un->un_node_type = DDI_NT_BLOCK_CHAN; 6995 un->un_ctype = CTYPE_CCS; 6996 break; 6997 } 6998 6999 /* 7000 * Try to read the interconnect type from the HBA. 7001 * 7002 * Note: This driver is currently compiled as two binaries, a parallel 7003 * scsi version (sd) and a fibre channel version (ssd). All functional 7004 * differences are determined at compile time. In the future a single 7005 * binary will be provided and the interconnect type will be used to 7006 * differentiate between fibre and parallel scsi behaviors. At that time 7007 * it will be necessary for all fibre channel HBAs to support this 7008 * property. 7009 * 7010 * set un_f_is_fiber to TRUE ( default fiber ) 7011 */ 7012 un->un_f_is_fibre = TRUE; 7013 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7014 case INTERCONNECT_SSA: 7015 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7017 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7018 break; 7019 case INTERCONNECT_PARALLEL: 7020 un->un_f_is_fibre = FALSE; 7021 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7022 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7023 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7024 break; 7025 case INTERCONNECT_SAS: 7026 un->un_f_is_fibre = FALSE; 7027 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7028 un->un_node_type = DDI_NT_BLOCK_SAS; 7029 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7030 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7031 break; 7032 case INTERCONNECT_SATA: 7033 un->un_f_is_fibre = FALSE; 7034 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7036 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7037 break; 7038 case INTERCONNECT_FIBRE: 7039 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7040 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7041 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7042 break; 7043 case INTERCONNECT_FABRIC: 7044 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7045 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7046 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7047 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7048 break; 7049 default: 7050 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7051 /* 7052 * The HBA does not support the "interconnect-type" property 7053 * (or did not provide a recognized type). 7054 * 7055 * Note: This will be obsoleted when a single fibre channel 7056 * and parallel scsi driver is delivered. In the meantime the 7057 * interconnect type will be set to the platform default.If that 7058 * type is not parallel SCSI, it means that we should be 7059 * assuming "ssd" semantics. However, here this also means that 7060 * the FC HBA is not supporting the "interconnect-type" property 7061 * like we expect it to, so log this occurrence. 7062 */ 7063 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7064 if (!SD_IS_PARALLEL_SCSI(un)) { 7065 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7066 "sd_unit_attach: un:0x%p Assuming " 7067 "INTERCONNECT_FIBRE\n", un); 7068 } else { 7069 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7070 "sd_unit_attach: un:0x%p Assuming " 7071 "INTERCONNECT_PARALLEL\n", un); 7072 un->un_f_is_fibre = FALSE; 7073 } 7074 #else 7075 /* 7076 * Note: This source will be implemented when a single fibre 7077 * channel and parallel scsi driver is delivered. The default 7078 * will be to assume that if a device does not support the 7079 * "interconnect-type" property it is a parallel SCSI HBA and 7080 * we will set the interconnect type for parallel scsi. 7081 */ 7082 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7083 un->un_f_is_fibre = FALSE; 7084 #endif 7085 break; 7086 } 7087 7088 if (un->un_f_is_fibre == TRUE) { 7089 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7090 SCSI_VERSION_3) { 7091 switch (un->un_interconnect_type) { 7092 case SD_INTERCONNECT_FIBRE: 7093 case SD_INTERCONNECT_SSA: 7094 un->un_node_type = DDI_NT_BLOCK_WWN; 7095 break; 7096 default: 7097 break; 7098 } 7099 } 7100 } 7101 7102 /* 7103 * Initialize the Request Sense command for the target 7104 */ 7105 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7106 goto alloc_rqs_failed; 7107 } 7108 7109 /* 7110 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7111 * with separate binary for sd and ssd. 7112 * 7113 * x86 has 1 binary, un_retry_count is set base on connection type. 7114 * The hardcoded values will go away when Sparc uses 1 binary 7115 * for sd and ssd. This hardcoded values need to match 7116 * SD_RETRY_COUNT in sddef.h 7117 * The value used is base on interconnect type. 7118 * fibre = 3, parallel = 5 7119 */ 7120 #if defined(__i386) || defined(__amd64) 7121 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7122 #else 7123 un->un_retry_count = SD_RETRY_COUNT; 7124 #endif 7125 7126 /* 7127 * Set the per disk retry count to the default number of retries 7128 * for disks and CDROMs. This value can be overridden by the 7129 * disk property list or an entry in sd.conf. 7130 */ 7131 un->un_notready_retry_count = 7132 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7133 : DISK_NOT_READY_RETRY_COUNT(un); 7134 7135 /* 7136 * Set the busy retry count to the default value of un_retry_count. 7137 * This can be overridden by entries in sd.conf or the device 7138 * config table. 7139 */ 7140 un->un_busy_retry_count = un->un_retry_count; 7141 7142 /* 7143 * Init the reset threshold for retries. This number determines 7144 * how many retries must be performed before a reset can be issued 7145 * (for certain error conditions). This can be overridden by entries 7146 * in sd.conf or the device config table. 7147 */ 7148 un->un_reset_retry_count = (un->un_retry_count / 2); 7149 7150 /* 7151 * Set the victim_retry_count to the default un_retry_count 7152 */ 7153 un->un_victim_retry_count = (2 * un->un_retry_count); 7154 7155 /* 7156 * Set the reservation release timeout to the default value of 7157 * 5 seconds. This can be overridden by entries in ssd.conf or the 7158 * device config table. 7159 */ 7160 un->un_reserve_release_time = 5; 7161 7162 /* 7163 * Set up the default maximum transfer size. Note that this may 7164 * get updated later in the attach, when setting up default wide 7165 * operations for disks. 7166 */ 7167 #if defined(__i386) || defined(__amd64) 7168 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7169 un->un_partial_dma_supported = 1; 7170 #else 7171 un->un_max_xfer_size = (uint_t)maxphys; 7172 #endif 7173 7174 /* 7175 * Get "allow bus device reset" property (defaults to "enabled" if 7176 * the property was not defined). This is to disable bus resets for 7177 * certain kinds of error recovery. Note: In the future when a run-time 7178 * fibre check is available the soft state flag should default to 7179 * enabled. 7180 */ 7181 if (un->un_f_is_fibre == TRUE) { 7182 un->un_f_allow_bus_device_reset = TRUE; 7183 } else { 7184 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7185 "allow-bus-device-reset", 1) != 0) { 7186 un->un_f_allow_bus_device_reset = TRUE; 7187 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7188 "sd_unit_attach: un:0x%p Bus device reset " 7189 "enabled\n", un); 7190 } else { 7191 un->un_f_allow_bus_device_reset = FALSE; 7192 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7193 "sd_unit_attach: un:0x%p Bus device reset " 7194 "disabled\n", un); 7195 } 7196 } 7197 7198 /* 7199 * Check if this is an ATAPI device. ATAPI devices use Group 1 7200 * Read/Write commands and Group 2 Mode Sense/Select commands. 7201 * 7202 * Note: The "obsolete" way of doing this is to check for the "atapi" 7203 * property. The new "variant" property with a value of "atapi" has been 7204 * introduced so that future 'variants' of standard SCSI behavior (like 7205 * atapi) could be specified by the underlying HBA drivers by supplying 7206 * a new value for the "variant" property, instead of having to define a 7207 * new property. 7208 */ 7209 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7210 un->un_f_cfg_is_atapi = TRUE; 7211 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7212 "sd_unit_attach: un:0x%p Atapi device\n", un); 7213 } 7214 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7215 &variantp) == DDI_PROP_SUCCESS) { 7216 if (strcmp(variantp, "atapi") == 0) { 7217 un->un_f_cfg_is_atapi = TRUE; 7218 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7219 "sd_unit_attach: un:0x%p Atapi device\n", un); 7220 } 7221 ddi_prop_free(variantp); 7222 } 7223 7224 un->un_cmd_timeout = SD_IO_TIME; 7225 7226 un->un_busy_timeout = SD_BSY_TIMEOUT; 7227 7228 /* Info on current states, statuses, etc. (Updated frequently) */ 7229 un->un_state = SD_STATE_NORMAL; 7230 un->un_last_state = SD_STATE_NORMAL; 7231 7232 /* Control & status info for command throttling */ 7233 un->un_throttle = sd_max_throttle; 7234 un->un_saved_throttle = sd_max_throttle; 7235 un->un_min_throttle = sd_min_throttle; 7236 7237 if (un->un_f_is_fibre == TRUE) { 7238 un->un_f_use_adaptive_throttle = TRUE; 7239 } else { 7240 un->un_f_use_adaptive_throttle = FALSE; 7241 } 7242 7243 /* Removable media support. */ 7244 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7245 un->un_mediastate = DKIO_NONE; 7246 un->un_specified_mediastate = DKIO_NONE; 7247 7248 /* CVs for suspend/resume (PM or DR) */ 7249 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7250 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7251 7252 /* Power management support. */ 7253 un->un_power_level = SD_SPINDLE_UNINIT; 7254 7255 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7256 un->un_f_wcc_inprog = 0; 7257 7258 /* 7259 * The open/close semaphore is used to serialize threads executing 7260 * in the driver's open & close entry point routines for a given 7261 * instance. 7262 */ 7263 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7264 7265 /* 7266 * The conf file entry and softstate variable is a forceful override, 7267 * meaning a non-zero value must be entered to change the default. 7268 */ 7269 un->un_f_disksort_disabled = FALSE; 7270 7271 /* 7272 * Retrieve the properties from the static driver table or the driver 7273 * configuration file (.conf) for this unit and update the soft state 7274 * for the device as needed for the indicated properties. 7275 * Note: the property configuration needs to occur here as some of the 7276 * following routines may have dependencies on soft state flags set 7277 * as part of the driver property configuration. 7278 */ 7279 sd_read_unit_properties(un); 7280 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7281 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7282 7283 /* 7284 * Only if a device has "hotpluggable" property, it is 7285 * treated as hotpluggable device. Otherwise, it is 7286 * regarded as non-hotpluggable one. 7287 */ 7288 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7289 -1) != -1) { 7290 un->un_f_is_hotpluggable = TRUE; 7291 } 7292 7293 /* 7294 * set unit's attributes(flags) according to "hotpluggable" and 7295 * RMB bit in INQUIRY data. 7296 */ 7297 sd_set_unit_attributes(un, devi); 7298 7299 /* 7300 * By default, we mark the capacity, lbasize, and geometry 7301 * as invalid. Only if we successfully read a valid capacity 7302 * will we update the un_blockcount and un_tgt_blocksize with the 7303 * valid values (the geometry will be validated later). 7304 */ 7305 un->un_f_blockcount_is_valid = FALSE; 7306 un->un_f_tgt_blocksize_is_valid = FALSE; 7307 7308 /* 7309 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7310 * otherwise. 7311 */ 7312 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7313 un->un_blockcount = 0; 7314 7315 /* 7316 * Set up the per-instance info needed to determine the correct 7317 * CDBs and other info for issuing commands to the target. 7318 */ 7319 sd_init_cdb_limits(un); 7320 7321 /* 7322 * Set up the IO chains to use, based upon the target type. 7323 */ 7324 if (un->un_f_non_devbsize_supported) { 7325 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7326 } else { 7327 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7328 } 7329 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7330 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7331 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7332 7333 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7334 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7335 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7336 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7337 7338 7339 if (ISCD(un)) { 7340 un->un_additional_codes = sd_additional_codes; 7341 } else { 7342 un->un_additional_codes = NULL; 7343 } 7344 7345 /* 7346 * Create the kstats here so they can be available for attach-time 7347 * routines that send commands to the unit (either polled or via 7348 * sd_send_scsi_cmd). 7349 * 7350 * Note: This is a critical sequence that needs to be maintained: 7351 * 1) Instantiate the kstats here, before any routines using the 7352 * iopath (i.e. sd_send_scsi_cmd). 7353 * 2) Instantiate and initialize the partition stats 7354 * (sd_set_pstats). 7355 * 3) Initialize the error stats (sd_set_errstats), following 7356 * sd_validate_geometry(),sd_register_devid(), 7357 * and sd_cache_control(). 7358 */ 7359 7360 un->un_stats = kstat_create(sd_label, instance, 7361 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7362 if (un->un_stats != NULL) { 7363 un->un_stats->ks_lock = SD_MUTEX(un); 7364 kstat_install(un->un_stats); 7365 } 7366 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7367 "sd_unit_attach: un:0x%p un_stats created\n", un); 7368 7369 sd_create_errstats(un, instance); 7370 if (un->un_errstats == NULL) { 7371 goto create_errstats_failed; 7372 } 7373 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7374 "sd_unit_attach: un:0x%p errstats created\n", un); 7375 7376 /* 7377 * The following if/else code was relocated here from below as part 7378 * of the fix for bug (4430280). However with the default setup added 7379 * on entry to this routine, it's no longer absolutely necessary for 7380 * this to be before the call to sd_spin_up_unit. 7381 */ 7382 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7383 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7384 (devp->sd_inq->inq_ansi == 5)) && 7385 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7386 7387 /* 7388 * If tagged queueing is supported by the target 7389 * and by the host adapter then we will enable it 7390 */ 7391 un->un_tagflags = 0; 7392 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7393 (un->un_f_arq_enabled == TRUE)) { 7394 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7395 1, 1) == 1) { 7396 un->un_tagflags = FLAG_STAG; 7397 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7398 "sd_unit_attach: un:0x%p tag queueing " 7399 "enabled\n", un); 7400 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7401 "untagged-qing", 0) == 1) { 7402 un->un_f_opt_queueing = TRUE; 7403 un->un_saved_throttle = un->un_throttle = 7404 min(un->un_throttle, 3); 7405 } else { 7406 un->un_f_opt_queueing = FALSE; 7407 un->un_saved_throttle = un->un_throttle = 1; 7408 } 7409 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7410 == 1) && (un->un_f_arq_enabled == TRUE)) { 7411 /* The Host Adapter supports internal queueing. */ 7412 un->un_f_opt_queueing = TRUE; 7413 un->un_saved_throttle = un->un_throttle = 7414 min(un->un_throttle, 3); 7415 } else { 7416 un->un_f_opt_queueing = FALSE; 7417 un->un_saved_throttle = un->un_throttle = 1; 7418 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7419 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7420 } 7421 7422 /* 7423 * Enable large transfers for SATA/SAS drives 7424 */ 7425 if (SD_IS_SERIAL(un)) { 7426 un->un_max_xfer_size = 7427 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7428 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7429 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7430 "sd_unit_attach: un:0x%p max transfer " 7431 "size=0x%x\n", un, un->un_max_xfer_size); 7432 7433 } 7434 7435 /* Setup or tear down default wide operations for disks */ 7436 7437 /* 7438 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7439 * and "ssd_max_xfer_size" to exist simultaneously on the same 7440 * system and be set to different values. In the future this 7441 * code may need to be updated when the ssd module is 7442 * obsoleted and removed from the system. (4299588) 7443 */ 7444 if (SD_IS_PARALLEL_SCSI(un) && 7445 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7446 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7447 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7448 1, 1) == 1) { 7449 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7450 "sd_unit_attach: un:0x%p Wide Transfer " 7451 "enabled\n", un); 7452 } 7453 7454 /* 7455 * If tagged queuing has also been enabled, then 7456 * enable large xfers 7457 */ 7458 if (un->un_saved_throttle == sd_max_throttle) { 7459 un->un_max_xfer_size = 7460 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7461 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7462 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7463 "sd_unit_attach: un:0x%p max transfer " 7464 "size=0x%x\n", un, un->un_max_xfer_size); 7465 } 7466 } else { 7467 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7468 0, 1) == 1) { 7469 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7470 "sd_unit_attach: un:0x%p " 7471 "Wide Transfer disabled\n", un); 7472 } 7473 } 7474 } else { 7475 un->un_tagflags = FLAG_STAG; 7476 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7477 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7478 } 7479 7480 /* 7481 * If this target supports LUN reset, try to enable it. 7482 */ 7483 if (un->un_f_lun_reset_enabled) { 7484 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7485 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7486 "un:0x%p lun_reset capability set\n", un); 7487 } else { 7488 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7489 "un:0x%p lun-reset capability not set\n", un); 7490 } 7491 } 7492 7493 /* 7494 * Adjust the maximum transfer size. This is to fix 7495 * the problem of partial DMA support on SPARC. Some 7496 * HBA driver, like aac, has very small dma_attr_maxxfer 7497 * size, which requires partial DMA support on SPARC. 7498 * In the future the SPARC pci nexus driver may solve 7499 * the problem instead of this fix. 7500 */ 7501 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7502 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7503 /* We need DMA partial even on sparc to ensure sddump() works */ 7504 un->un_max_xfer_size = max_xfer_size; 7505 if (un->un_partial_dma_supported == 0) 7506 un->un_partial_dma_supported = 1; 7507 } 7508 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7509 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7510 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7511 un->un_max_xfer_size) == 1) { 7512 un->un_buf_breakup_supported = 1; 7513 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7514 "un:0x%p Buf breakup enabled\n", un); 7515 } 7516 } 7517 7518 /* 7519 * Set PKT_DMA_PARTIAL flag. 7520 */ 7521 if (un->un_partial_dma_supported == 1) { 7522 un->un_pkt_flags = PKT_DMA_PARTIAL; 7523 } else { 7524 un->un_pkt_flags = 0; 7525 } 7526 7527 /* Initialize sd_ssc_t for internal uscsi commands */ 7528 ssc = sd_ssc_init(un); 7529 scsi_fm_init(devp); 7530 7531 /* 7532 * Allocate memory for SCSI FMA stuffs. 7533 */ 7534 un->un_fm_private = 7535 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7536 sfip = (struct sd_fm_internal *)un->un_fm_private; 7537 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7538 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7539 sfip->fm_ssc.ssc_un = un; 7540 7541 if (ISCD(un) || 7542 un->un_f_has_removable_media || 7543 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7544 /* 7545 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7546 * Their log are unchanged. 7547 */ 7548 sfip->fm_log_level = SD_FM_LOG_NSUP; 7549 } else { 7550 /* 7551 * If enter here, it should be non-CDROM and FM-capable 7552 * device, and it will not keep the old scsi_log as before 7553 * in /var/adm/messages. However, the property 7554 * "fm-scsi-log" will control whether the FM telemetry will 7555 * be logged in /var/adm/messages. 7556 */ 7557 int fm_scsi_log; 7558 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7559 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7560 7561 if (fm_scsi_log) 7562 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7563 else 7564 sfip->fm_log_level = SD_FM_LOG_SILENT; 7565 } 7566 7567 /* 7568 * At this point in the attach, we have enough info in the 7569 * soft state to be able to issue commands to the target. 7570 * 7571 * All command paths used below MUST issue their commands as 7572 * SD_PATH_DIRECT. This is important as intermediate layers 7573 * are not all initialized yet (such as PM). 7574 */ 7575 7576 /* 7577 * Send a TEST UNIT READY command to the device. This should clear 7578 * any outstanding UNIT ATTENTION that may be present. 7579 * 7580 * Note: Don't check for success, just track if there is a reservation, 7581 * this is a throw away command to clear any unit attentions. 7582 * 7583 * Note: This MUST be the first command issued to the target during 7584 * attach to ensure power on UNIT ATTENTIONS are cleared. 7585 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7586 * with attempts at spinning up a device with no media. 7587 */ 7588 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7589 if (status != 0) { 7590 if (status == EACCES) 7591 reservation_flag = SD_TARGET_IS_RESERVED; 7592 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7593 } 7594 7595 /* 7596 * If the device is NOT a removable media device, attempt to spin 7597 * it up (using the START_STOP_UNIT command) and read its capacity 7598 * (using the READ CAPACITY command). Note, however, that either 7599 * of these could fail and in some cases we would continue with 7600 * the attach despite the failure (see below). 7601 */ 7602 if (un->un_f_descr_format_supported) { 7603 7604 switch (sd_spin_up_unit(ssc)) { 7605 case 0: 7606 /* 7607 * Spin-up was successful; now try to read the 7608 * capacity. If successful then save the results 7609 * and mark the capacity & lbasize as valid. 7610 */ 7611 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7612 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7613 7614 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7615 &lbasize, SD_PATH_DIRECT); 7616 7617 switch (status) { 7618 case 0: { 7619 if (capacity > DK_MAX_BLOCKS) { 7620 #ifdef _LP64 7621 if ((capacity + 1) > 7622 SD_GROUP1_MAX_ADDRESS) { 7623 /* 7624 * Enable descriptor format 7625 * sense data so that we can 7626 * get 64 bit sense data 7627 * fields. 7628 */ 7629 sd_enable_descr_sense(ssc); 7630 } 7631 #else 7632 /* 32-bit kernels can't handle this */ 7633 scsi_log(SD_DEVINFO(un), 7634 sd_label, CE_WARN, 7635 "disk has %llu blocks, which " 7636 "is too large for a 32-bit " 7637 "kernel", capacity); 7638 7639 #if defined(__i386) || defined(__amd64) 7640 /* 7641 * 1TB disk was treated as (1T - 512)B 7642 * in the past, so that it might have 7643 * valid VTOC and solaris partitions, 7644 * we have to allow it to continue to 7645 * work. 7646 */ 7647 if (capacity -1 > DK_MAX_BLOCKS) 7648 #endif 7649 goto spinup_failed; 7650 #endif 7651 } 7652 7653 /* 7654 * Here it's not necessary to check the case: 7655 * the capacity of the device is bigger than 7656 * what the max hba cdb can support. Because 7657 * sd_send_scsi_READ_CAPACITY will retrieve 7658 * the capacity by sending USCSI command, which 7659 * is constrained by the max hba cdb. Actually, 7660 * sd_send_scsi_READ_CAPACITY will return 7661 * EINVAL when using bigger cdb than required 7662 * cdb length. Will handle this case in 7663 * "case EINVAL". 7664 */ 7665 7666 /* 7667 * The following relies on 7668 * sd_send_scsi_READ_CAPACITY never 7669 * returning 0 for capacity and/or lbasize. 7670 */ 7671 sd_update_block_info(un, lbasize, capacity); 7672 7673 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7674 "sd_unit_attach: un:0x%p capacity = %ld " 7675 "blocks; lbasize= %ld.\n", un, 7676 un->un_blockcount, un->un_tgt_blocksize); 7677 7678 break; 7679 } 7680 case EINVAL: 7681 /* 7682 * In the case where the max-cdb-length property 7683 * is smaller than the required CDB length for 7684 * a SCSI device, a target driver can fail to 7685 * attach to that device. 7686 */ 7687 scsi_log(SD_DEVINFO(un), 7688 sd_label, CE_WARN, 7689 "disk capacity is too large " 7690 "for current cdb length"); 7691 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7692 7693 goto spinup_failed; 7694 case EACCES: 7695 /* 7696 * Should never get here if the spin-up 7697 * succeeded, but code it in anyway. 7698 * From here, just continue with the attach... 7699 */ 7700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7701 "sd_unit_attach: un:0x%p " 7702 "sd_send_scsi_READ_CAPACITY " 7703 "returned reservation conflict\n", un); 7704 reservation_flag = SD_TARGET_IS_RESERVED; 7705 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7706 break; 7707 default: 7708 /* 7709 * Likewise, should never get here if the 7710 * spin-up succeeded. Just continue with 7711 * the attach... 7712 */ 7713 if (status == EIO) 7714 sd_ssc_assessment(ssc, 7715 SD_FMT_STATUS_CHECK); 7716 else 7717 sd_ssc_assessment(ssc, 7718 SD_FMT_IGNORE); 7719 break; 7720 } 7721 break; 7722 case EACCES: 7723 /* 7724 * Device is reserved by another host. In this case 7725 * we could not spin it up or read the capacity, but 7726 * we continue with the attach anyway. 7727 */ 7728 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7729 "sd_unit_attach: un:0x%p spin-up reservation " 7730 "conflict.\n", un); 7731 reservation_flag = SD_TARGET_IS_RESERVED; 7732 break; 7733 default: 7734 /* Fail the attach if the spin-up failed. */ 7735 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7736 "sd_unit_attach: un:0x%p spin-up failed.", un); 7737 goto spinup_failed; 7738 } 7739 7740 } 7741 7742 /* 7743 * Check to see if this is a MMC drive 7744 */ 7745 if (ISCD(un)) { 7746 sd_set_mmc_caps(ssc); 7747 } 7748 7749 7750 /* 7751 * Add a zero-length attribute to tell the world we support 7752 * kernel ioctls (for layered drivers) 7753 */ 7754 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7755 DDI_KERNEL_IOCTL, NULL, 0); 7756 7757 /* 7758 * Add a boolean property to tell the world we support 7759 * the B_FAILFAST flag (for layered drivers) 7760 */ 7761 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7762 "ddi-failfast-supported", NULL, 0); 7763 7764 /* 7765 * Initialize power management 7766 */ 7767 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7768 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7769 sd_setup_pm(ssc, devi); 7770 if (un->un_f_pm_is_enabled == FALSE) { 7771 /* 7772 * For performance, point to a jump table that does 7773 * not include pm. 7774 * The direct and priority chains don't change with PM. 7775 * 7776 * Note: this is currently done based on individual device 7777 * capabilities. When an interface for determining system 7778 * power enabled state becomes available, or when additional 7779 * layers are added to the command chain, these values will 7780 * have to be re-evaluated for correctness. 7781 */ 7782 if (un->un_f_non_devbsize_supported) { 7783 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7784 } else { 7785 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7786 } 7787 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7788 } 7789 7790 /* 7791 * This property is set to 0 by HA software to avoid retries 7792 * on a reserved disk. (The preferred property name is 7793 * "retry-on-reservation-conflict") (1189689) 7794 * 7795 * Note: The use of a global here can have unintended consequences. A 7796 * per instance variable is preferable to match the capabilities of 7797 * different underlying hba's (4402600) 7798 */ 7799 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7800 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7801 sd_retry_on_reservation_conflict); 7802 if (sd_retry_on_reservation_conflict != 0) { 7803 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7804 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7805 sd_retry_on_reservation_conflict); 7806 } 7807 7808 /* Set up options for QFULL handling. */ 7809 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7810 "qfull-retries", -1)) != -1) { 7811 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7812 rval, 1); 7813 } 7814 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7815 "qfull-retry-interval", -1)) != -1) { 7816 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7817 rval, 1); 7818 } 7819 7820 /* 7821 * This just prints a message that announces the existence of the 7822 * device. The message is always printed in the system logfile, but 7823 * only appears on the console if the system is booted with the 7824 * -v (verbose) argument. 7825 */ 7826 ddi_report_dev(devi); 7827 7828 un->un_mediastate = DKIO_NONE; 7829 7830 cmlb_alloc_handle(&un->un_cmlbhandle); 7831 7832 #if defined(__i386) || defined(__amd64) 7833 /* 7834 * On x86, compensate for off-by-1 legacy error 7835 */ 7836 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7837 (lbasize == un->un_sys_blocksize)) 7838 offbyone = CMLB_OFF_BY_ONE; 7839 #endif 7840 7841 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7842 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 7843 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 7844 un->un_node_type, offbyone, un->un_cmlbhandle, 7845 (void *)SD_PATH_DIRECT) != 0) { 7846 goto cmlb_attach_failed; 7847 } 7848 7849 7850 /* 7851 * Read and validate the device's geometry (ie, disk label) 7852 * A new unformatted drive will not have a valid geometry, but 7853 * the driver needs to successfully attach to this device so 7854 * the drive can be formatted via ioctls. 7855 */ 7856 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7857 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7858 7859 mutex_enter(SD_MUTEX(un)); 7860 7861 /* 7862 * Read and initialize the devid for the unit. 7863 */ 7864 if (un->un_f_devid_supported) { 7865 sd_register_devid(ssc, devi, reservation_flag); 7866 } 7867 mutex_exit(SD_MUTEX(un)); 7868 7869 #if (defined(__fibre)) 7870 /* 7871 * Register callbacks for fibre only. You can't do this solely 7872 * on the basis of the devid_type because this is hba specific. 7873 * We need to query our hba capabilities to find out whether to 7874 * register or not. 7875 */ 7876 if (un->un_f_is_fibre) { 7877 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7878 sd_init_event_callbacks(un); 7879 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7880 "sd_unit_attach: un:0x%p event callbacks inserted", 7881 un); 7882 } 7883 } 7884 #endif 7885 7886 if (un->un_f_opt_disable_cache == TRUE) { 7887 /* 7888 * Disable both read cache and write cache. This is 7889 * the historic behavior of the keywords in the config file. 7890 */ 7891 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7892 0) { 7893 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7894 "sd_unit_attach: un:0x%p Could not disable " 7895 "caching", un); 7896 goto devid_failed; 7897 } 7898 } 7899 7900 /* 7901 * Check the value of the WCE bit now and 7902 * set un_f_write_cache_enabled accordingly. 7903 */ 7904 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7905 mutex_enter(SD_MUTEX(un)); 7906 un->un_f_write_cache_enabled = (wc_enabled != 0); 7907 mutex_exit(SD_MUTEX(un)); 7908 7909 /* 7910 * Check the value of the NV_SUP bit and set 7911 * un_f_suppress_cache_flush accordingly. 7912 */ 7913 sd_get_nv_sup(ssc); 7914 7915 /* 7916 * Find out what type of reservation this disk supports. 7917 */ 7918 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7919 7920 switch (status) { 7921 case 0: 7922 /* 7923 * SCSI-3 reservations are supported. 7924 */ 7925 un->un_reservation_type = SD_SCSI3_RESERVATION; 7926 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7927 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7928 break; 7929 case ENOTSUP: 7930 /* 7931 * The PERSISTENT RESERVE IN command would not be recognized by 7932 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7933 */ 7934 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7935 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7936 un->un_reservation_type = SD_SCSI2_RESERVATION; 7937 7938 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7939 break; 7940 default: 7941 /* 7942 * default to SCSI-3 reservations 7943 */ 7944 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7945 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7946 un->un_reservation_type = SD_SCSI3_RESERVATION; 7947 7948 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7949 break; 7950 } 7951 7952 /* 7953 * Set the pstat and error stat values here, so data obtained during the 7954 * previous attach-time routines is available. 7955 * 7956 * Note: This is a critical sequence that needs to be maintained: 7957 * 1) Instantiate the kstats before any routines using the iopath 7958 * (i.e. sd_send_scsi_cmd). 7959 * 2) Initialize the error stats (sd_set_errstats) and partition 7960 * stats (sd_set_pstats)here, following 7961 * cmlb_validate_geometry(), sd_register_devid(), and 7962 * sd_cache_control(). 7963 */ 7964 7965 if (un->un_f_pkstats_enabled && geom_label_valid) { 7966 sd_set_pstats(un); 7967 SD_TRACE(SD_LOG_IO_PARTITION, un, 7968 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7969 } 7970 7971 sd_set_errstats(un); 7972 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7973 "sd_unit_attach: un:0x%p errstats set\n", un); 7974 7975 7976 /* 7977 * After successfully attaching an instance, we record the information 7978 * of how many luns have been attached on the relative target and 7979 * controller for parallel SCSI. This information is used when sd tries 7980 * to set the tagged queuing capability in HBA. 7981 */ 7982 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7983 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7984 } 7985 7986 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7987 "sd_unit_attach: un:0x%p exit success\n", un); 7988 7989 /* Uninitialize sd_ssc_t pointer */ 7990 sd_ssc_fini(ssc); 7991 7992 return (DDI_SUCCESS); 7993 7994 /* 7995 * An error occurred during the attach; clean up & return failure. 7996 */ 7997 7998 devid_failed: 7999 8000 setup_pm_failed: 8001 ddi_remove_minor_node(devi, NULL); 8002 8003 cmlb_attach_failed: 8004 /* 8005 * Cleanup from the scsi_ifsetcap() calls (437868) 8006 */ 8007 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8008 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8009 8010 /* 8011 * Refer to the comments of setting tagged-qing in the beginning of 8012 * sd_unit_attach. We can only disable tagged queuing when there is 8013 * no lun attached on the target. 8014 */ 8015 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8016 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8017 } 8018 8019 if (un->un_f_is_fibre == FALSE) { 8020 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8021 } 8022 8023 spinup_failed: 8024 8025 /* Uninitialize sd_ssc_t pointer */ 8026 sd_ssc_fini(ssc); 8027 8028 mutex_enter(SD_MUTEX(un)); 8029 8030 /* Deallocate SCSI FMA memory spaces */ 8031 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8032 8033 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8034 if (un->un_direct_priority_timeid != NULL) { 8035 timeout_id_t temp_id = un->un_direct_priority_timeid; 8036 un->un_direct_priority_timeid = NULL; 8037 mutex_exit(SD_MUTEX(un)); 8038 (void) untimeout(temp_id); 8039 mutex_enter(SD_MUTEX(un)); 8040 } 8041 8042 /* Cancel any pending start/stop timeouts */ 8043 if (un->un_startstop_timeid != NULL) { 8044 timeout_id_t temp_id = un->un_startstop_timeid; 8045 un->un_startstop_timeid = NULL; 8046 mutex_exit(SD_MUTEX(un)); 8047 (void) untimeout(temp_id); 8048 mutex_enter(SD_MUTEX(un)); 8049 } 8050 8051 /* Cancel any pending reset-throttle timeouts */ 8052 if (un->un_reset_throttle_timeid != NULL) { 8053 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8054 un->un_reset_throttle_timeid = NULL; 8055 mutex_exit(SD_MUTEX(un)); 8056 (void) untimeout(temp_id); 8057 mutex_enter(SD_MUTEX(un)); 8058 } 8059 8060 /* Cancel any pending retry timeouts */ 8061 if (un->un_retry_timeid != NULL) { 8062 timeout_id_t temp_id = un->un_retry_timeid; 8063 un->un_retry_timeid = NULL; 8064 mutex_exit(SD_MUTEX(un)); 8065 (void) untimeout(temp_id); 8066 mutex_enter(SD_MUTEX(un)); 8067 } 8068 8069 /* Cancel any pending delayed cv broadcast timeouts */ 8070 if (un->un_dcvb_timeid != NULL) { 8071 timeout_id_t temp_id = un->un_dcvb_timeid; 8072 un->un_dcvb_timeid = NULL; 8073 mutex_exit(SD_MUTEX(un)); 8074 (void) untimeout(temp_id); 8075 mutex_enter(SD_MUTEX(un)); 8076 } 8077 8078 mutex_exit(SD_MUTEX(un)); 8079 8080 /* There should not be any in-progress I/O so ASSERT this check */ 8081 ASSERT(un->un_ncmds_in_transport == 0); 8082 ASSERT(un->un_ncmds_in_driver == 0); 8083 8084 /* Do not free the softstate if the callback routine is active */ 8085 sd_sync_with_callback(un); 8086 8087 /* 8088 * Partition stats apparently are not used with removables. These would 8089 * not have been created during attach, so no need to clean them up... 8090 */ 8091 if (un->un_errstats != NULL) { 8092 kstat_delete(un->un_errstats); 8093 un->un_errstats = NULL; 8094 } 8095 8096 create_errstats_failed: 8097 8098 if (un->un_stats != NULL) { 8099 kstat_delete(un->un_stats); 8100 un->un_stats = NULL; 8101 } 8102 8103 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8104 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8105 8106 ddi_prop_remove_all(devi); 8107 sema_destroy(&un->un_semoclose); 8108 cv_destroy(&un->un_state_cv); 8109 8110 getrbuf_failed: 8111 8112 sd_free_rqs(un); 8113 8114 alloc_rqs_failed: 8115 8116 devp->sd_private = NULL; 8117 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8118 8119 get_softstate_failed: 8120 /* 8121 * Note: the man pages are unclear as to whether or not doing a 8122 * ddi_soft_state_free(sd_state, instance) is the right way to 8123 * clean up after the ddi_soft_state_zalloc() if the subsequent 8124 * ddi_get_soft_state() fails. The implication seems to be 8125 * that the get_soft_state cannot fail if the zalloc succeeds. 8126 */ 8127 #ifndef XPV_HVM_DRIVER 8128 ddi_soft_state_free(sd_state, instance); 8129 #endif /* !XPV_HVM_DRIVER */ 8130 8131 probe_failed: 8132 scsi_unprobe(devp); 8133 8134 return (DDI_FAILURE); 8135 } 8136 8137 8138 /* 8139 * Function: sd_unit_detach 8140 * 8141 * Description: Performs DDI_DETACH processing for sddetach(). 8142 * 8143 * Return Code: DDI_SUCCESS 8144 * DDI_FAILURE 8145 * 8146 * Context: Kernel thread context 8147 */ 8148 8149 static int 8150 sd_unit_detach(dev_info_t *devi) 8151 { 8152 struct scsi_device *devp; 8153 struct sd_lun *un; 8154 int i; 8155 int tgt; 8156 dev_t dev; 8157 dev_info_t *pdip = ddi_get_parent(devi); 8158 #ifndef XPV_HVM_DRIVER 8159 int instance = ddi_get_instance(devi); 8160 #endif /* !XPV_HVM_DRIVER */ 8161 8162 mutex_enter(&sd_detach_mutex); 8163 8164 /* 8165 * Fail the detach for any of the following: 8166 * - Unable to get the sd_lun struct for the instance 8167 * - A layered driver has an outstanding open on the instance 8168 * - Another thread is already detaching this instance 8169 * - Another thread is currently performing an open 8170 */ 8171 devp = ddi_get_driver_private(devi); 8172 if ((devp == NULL) || 8173 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8174 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8175 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8176 mutex_exit(&sd_detach_mutex); 8177 return (DDI_FAILURE); 8178 } 8179 8180 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8181 8182 /* 8183 * Mark this instance as currently in a detach, to inhibit any 8184 * opens from a layered driver. 8185 */ 8186 un->un_detach_count++; 8187 mutex_exit(&sd_detach_mutex); 8188 8189 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8190 SCSI_ADDR_PROP_TARGET, -1); 8191 8192 dev = sd_make_device(SD_DEVINFO(un)); 8193 8194 #ifndef lint 8195 _NOTE(COMPETING_THREADS_NOW); 8196 #endif 8197 8198 mutex_enter(SD_MUTEX(un)); 8199 8200 /* 8201 * Fail the detach if there are any outstanding layered 8202 * opens on this device. 8203 */ 8204 for (i = 0; i < NDKMAP; i++) { 8205 if (un->un_ocmap.lyropen[i] != 0) { 8206 goto err_notclosed; 8207 } 8208 } 8209 8210 /* 8211 * Verify there are NO outstanding commands issued to this device. 8212 * ie, un_ncmds_in_transport == 0. 8213 * It's possible to have outstanding commands through the physio 8214 * code path, even though everything's closed. 8215 */ 8216 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8217 (un->un_direct_priority_timeid != NULL) || 8218 (un->un_state == SD_STATE_RWAIT)) { 8219 mutex_exit(SD_MUTEX(un)); 8220 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8221 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8222 goto err_stillbusy; 8223 } 8224 8225 /* 8226 * If we have the device reserved, release the reservation. 8227 */ 8228 if ((un->un_resvd_status & SD_RESERVE) && 8229 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8230 mutex_exit(SD_MUTEX(un)); 8231 /* 8232 * Note: sd_reserve_release sends a command to the device 8233 * via the sd_ioctlcmd() path, and can sleep. 8234 */ 8235 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8236 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8237 "sd_dr_detach: Cannot release reservation \n"); 8238 } 8239 } else { 8240 mutex_exit(SD_MUTEX(un)); 8241 } 8242 8243 /* 8244 * Untimeout any reserve recover, throttle reset, restart unit 8245 * and delayed broadcast timeout threads. Protect the timeout pointer 8246 * from getting nulled by their callback functions. 8247 */ 8248 mutex_enter(SD_MUTEX(un)); 8249 if (un->un_resvd_timeid != NULL) { 8250 timeout_id_t temp_id = un->un_resvd_timeid; 8251 un->un_resvd_timeid = NULL; 8252 mutex_exit(SD_MUTEX(un)); 8253 (void) untimeout(temp_id); 8254 mutex_enter(SD_MUTEX(un)); 8255 } 8256 8257 if (un->un_reset_throttle_timeid != NULL) { 8258 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8259 un->un_reset_throttle_timeid = NULL; 8260 mutex_exit(SD_MUTEX(un)); 8261 (void) untimeout(temp_id); 8262 mutex_enter(SD_MUTEX(un)); 8263 } 8264 8265 if (un->un_startstop_timeid != NULL) { 8266 timeout_id_t temp_id = un->un_startstop_timeid; 8267 un->un_startstop_timeid = NULL; 8268 mutex_exit(SD_MUTEX(un)); 8269 (void) untimeout(temp_id); 8270 mutex_enter(SD_MUTEX(un)); 8271 } 8272 8273 if (un->un_dcvb_timeid != NULL) { 8274 timeout_id_t temp_id = un->un_dcvb_timeid; 8275 un->un_dcvb_timeid = NULL; 8276 mutex_exit(SD_MUTEX(un)); 8277 (void) untimeout(temp_id); 8278 } else { 8279 mutex_exit(SD_MUTEX(un)); 8280 } 8281 8282 /* Remove any pending reservation reclaim requests for this device */ 8283 sd_rmv_resv_reclaim_req(dev); 8284 8285 mutex_enter(SD_MUTEX(un)); 8286 8287 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8288 if (un->un_direct_priority_timeid != NULL) { 8289 timeout_id_t temp_id = un->un_direct_priority_timeid; 8290 un->un_direct_priority_timeid = NULL; 8291 mutex_exit(SD_MUTEX(un)); 8292 (void) untimeout(temp_id); 8293 mutex_enter(SD_MUTEX(un)); 8294 } 8295 8296 /* Cancel any active multi-host disk watch thread requests */ 8297 if (un->un_mhd_token != NULL) { 8298 mutex_exit(SD_MUTEX(un)); 8299 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8300 if (scsi_watch_request_terminate(un->un_mhd_token, 8301 SCSI_WATCH_TERMINATE_NOWAIT)) { 8302 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8303 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8304 /* 8305 * Note: We are returning here after having removed 8306 * some driver timeouts above. This is consistent with 8307 * the legacy implementation but perhaps the watch 8308 * terminate call should be made with the wait flag set. 8309 */ 8310 goto err_stillbusy; 8311 } 8312 mutex_enter(SD_MUTEX(un)); 8313 un->un_mhd_token = NULL; 8314 } 8315 8316 if (un->un_swr_token != NULL) { 8317 mutex_exit(SD_MUTEX(un)); 8318 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8319 if (scsi_watch_request_terminate(un->un_swr_token, 8320 SCSI_WATCH_TERMINATE_NOWAIT)) { 8321 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8322 "sd_dr_detach: Cannot cancel swr watch request\n"); 8323 /* 8324 * Note: We are returning here after having removed 8325 * some driver timeouts above. This is consistent with 8326 * the legacy implementation but perhaps the watch 8327 * terminate call should be made with the wait flag set. 8328 */ 8329 goto err_stillbusy; 8330 } 8331 mutex_enter(SD_MUTEX(un)); 8332 un->un_swr_token = NULL; 8333 } 8334 8335 mutex_exit(SD_MUTEX(un)); 8336 8337 /* 8338 * Clear any scsi_reset_notifies. We clear the reset notifies 8339 * if we have not registered one. 8340 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8341 */ 8342 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8343 sd_mhd_reset_notify_cb, (caddr_t)un); 8344 8345 /* 8346 * protect the timeout pointers from getting nulled by 8347 * their callback functions during the cancellation process. 8348 * In such a scenario untimeout can be invoked with a null value. 8349 */ 8350 _NOTE(NO_COMPETING_THREADS_NOW); 8351 8352 mutex_enter(&un->un_pm_mutex); 8353 if (un->un_pm_idle_timeid != NULL) { 8354 timeout_id_t temp_id = un->un_pm_idle_timeid; 8355 un->un_pm_idle_timeid = NULL; 8356 mutex_exit(&un->un_pm_mutex); 8357 8358 /* 8359 * Timeout is active; cancel it. 8360 * Note that it'll never be active on a device 8361 * that does not support PM therefore we don't 8362 * have to check before calling pm_idle_component. 8363 */ 8364 (void) untimeout(temp_id); 8365 (void) pm_idle_component(SD_DEVINFO(un), 0); 8366 mutex_enter(&un->un_pm_mutex); 8367 } 8368 8369 /* 8370 * Check whether there is already a timeout scheduled for power 8371 * management. If yes then don't lower the power here, that's. 8372 * the timeout handler's job. 8373 */ 8374 if (un->un_pm_timeid != NULL) { 8375 timeout_id_t temp_id = un->un_pm_timeid; 8376 un->un_pm_timeid = NULL; 8377 mutex_exit(&un->un_pm_mutex); 8378 /* 8379 * Timeout is active; cancel it. 8380 * Note that it'll never be active on a device 8381 * that does not support PM therefore we don't 8382 * have to check before calling pm_idle_component. 8383 */ 8384 (void) untimeout(temp_id); 8385 (void) pm_idle_component(SD_DEVINFO(un), 0); 8386 8387 } else { 8388 mutex_exit(&un->un_pm_mutex); 8389 if ((un->un_f_pm_is_enabled == TRUE) && 8390 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8391 DDI_SUCCESS)) { 8392 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8393 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8394 /* 8395 * Fix for bug: 4297749, item # 13 8396 * The above test now includes a check to see if PM is 8397 * supported by this device before call 8398 * pm_lower_power(). 8399 * Note, the following is not dead code. The call to 8400 * pm_lower_power above will generate a call back into 8401 * our sdpower routine which might result in a timeout 8402 * handler getting activated. Therefore the following 8403 * code is valid and necessary. 8404 */ 8405 mutex_enter(&un->un_pm_mutex); 8406 if (un->un_pm_timeid != NULL) { 8407 timeout_id_t temp_id = un->un_pm_timeid; 8408 un->un_pm_timeid = NULL; 8409 mutex_exit(&un->un_pm_mutex); 8410 (void) untimeout(temp_id); 8411 (void) pm_idle_component(SD_DEVINFO(un), 0); 8412 } else { 8413 mutex_exit(&un->un_pm_mutex); 8414 } 8415 } 8416 } 8417 8418 /* 8419 * Cleanup from the scsi_ifsetcap() calls (437868) 8420 * Relocated here from above to be after the call to 8421 * pm_lower_power, which was getting errors. 8422 */ 8423 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8424 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8425 8426 /* 8427 * Currently, tagged queuing is supported per target based by HBA. 8428 * Setting this per lun instance actually sets the capability of this 8429 * target in HBA, which affects those luns already attached on the 8430 * same target. So during detach, we can only disable this capability 8431 * only when this is the only lun left on this target. By doing 8432 * this, we assume a target has the same tagged queuing capability 8433 * for every lun. The condition can be removed when HBA is changed to 8434 * support per lun based tagged queuing capability. 8435 */ 8436 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8437 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8438 } 8439 8440 if (un->un_f_is_fibre == FALSE) { 8441 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8442 } 8443 8444 /* 8445 * Remove any event callbacks, fibre only 8446 */ 8447 if (un->un_f_is_fibre == TRUE) { 8448 if ((un->un_insert_event != NULL) && 8449 (ddi_remove_event_handler(un->un_insert_cb_id) != 8450 DDI_SUCCESS)) { 8451 /* 8452 * Note: We are returning here after having done 8453 * substantial cleanup above. This is consistent 8454 * with the legacy implementation but this may not 8455 * be the right thing to do. 8456 */ 8457 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8458 "sd_dr_detach: Cannot cancel insert event\n"); 8459 goto err_remove_event; 8460 } 8461 un->un_insert_event = NULL; 8462 8463 if ((un->un_remove_event != NULL) && 8464 (ddi_remove_event_handler(un->un_remove_cb_id) != 8465 DDI_SUCCESS)) { 8466 /* 8467 * Note: We are returning here after having done 8468 * substantial cleanup above. This is consistent 8469 * with the legacy implementation but this may not 8470 * be the right thing to do. 8471 */ 8472 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8473 "sd_dr_detach: Cannot cancel remove event\n"); 8474 goto err_remove_event; 8475 } 8476 un->un_remove_event = NULL; 8477 } 8478 8479 /* Do not free the softstate if the callback routine is active */ 8480 sd_sync_with_callback(un); 8481 8482 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8483 cmlb_free_handle(&un->un_cmlbhandle); 8484 8485 /* 8486 * Hold the detach mutex here, to make sure that no other threads ever 8487 * can access a (partially) freed soft state structure. 8488 */ 8489 mutex_enter(&sd_detach_mutex); 8490 8491 /* 8492 * Clean up the soft state struct. 8493 * Cleanup is done in reverse order of allocs/inits. 8494 * At this point there should be no competing threads anymore. 8495 */ 8496 8497 scsi_fm_fini(devp); 8498 8499 /* 8500 * Deallocate memory for SCSI FMA. 8501 */ 8502 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8503 8504 /* 8505 * Unregister and free device id if it was not registered 8506 * by the transport. 8507 */ 8508 if (un->un_f_devid_transport_defined == FALSE) 8509 ddi_devid_unregister(devi); 8510 8511 /* 8512 * free the devid structure if allocated before (by ddi_devid_init() 8513 * or ddi_devid_get()). 8514 */ 8515 if (un->un_devid) { 8516 ddi_devid_free(un->un_devid); 8517 un->un_devid = NULL; 8518 } 8519 8520 /* 8521 * Destroy wmap cache if it exists. 8522 */ 8523 if (un->un_wm_cache != NULL) { 8524 kmem_cache_destroy(un->un_wm_cache); 8525 un->un_wm_cache = NULL; 8526 } 8527 8528 /* 8529 * kstat cleanup is done in detach for all device types (4363169). 8530 * We do not want to fail detach if the device kstats are not deleted 8531 * since there is a confusion about the devo_refcnt for the device. 8532 * We just delete the kstats and let detach complete successfully. 8533 */ 8534 if (un->un_stats != NULL) { 8535 kstat_delete(un->un_stats); 8536 un->un_stats = NULL; 8537 } 8538 if (un->un_errstats != NULL) { 8539 kstat_delete(un->un_errstats); 8540 un->un_errstats = NULL; 8541 } 8542 8543 /* Remove partition stats */ 8544 if (un->un_f_pkstats_enabled) { 8545 for (i = 0; i < NSDMAP; i++) { 8546 if (un->un_pstats[i] != NULL) { 8547 kstat_delete(un->un_pstats[i]); 8548 un->un_pstats[i] = NULL; 8549 } 8550 } 8551 } 8552 8553 /* Remove xbuf registration */ 8554 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8555 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8556 8557 /* Remove driver properties */ 8558 ddi_prop_remove_all(devi); 8559 8560 mutex_destroy(&un->un_pm_mutex); 8561 cv_destroy(&un->un_pm_busy_cv); 8562 8563 cv_destroy(&un->un_wcc_cv); 8564 8565 /* Open/close semaphore */ 8566 sema_destroy(&un->un_semoclose); 8567 8568 /* Removable media condvar. */ 8569 cv_destroy(&un->un_state_cv); 8570 8571 /* Suspend/resume condvar. */ 8572 cv_destroy(&un->un_suspend_cv); 8573 cv_destroy(&un->un_disk_busy_cv); 8574 8575 sd_free_rqs(un); 8576 8577 /* Free up soft state */ 8578 devp->sd_private = NULL; 8579 8580 bzero(un, sizeof (struct sd_lun)); 8581 #ifndef XPV_HVM_DRIVER 8582 ddi_soft_state_free(sd_state, instance); 8583 #endif /* !XPV_HVM_DRIVER */ 8584 8585 mutex_exit(&sd_detach_mutex); 8586 8587 /* This frees up the INQUIRY data associated with the device. */ 8588 scsi_unprobe(devp); 8589 8590 /* 8591 * After successfully detaching an instance, we update the information 8592 * of how many luns have been attached in the relative target and 8593 * controller for parallel SCSI. This information is used when sd tries 8594 * to set the tagged queuing capability in HBA. 8595 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8596 * check if the device is parallel SCSI. However, we don't need to 8597 * check here because we've already checked during attach. No device 8598 * that is not parallel SCSI is in the chain. 8599 */ 8600 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8601 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8602 } 8603 8604 return (DDI_SUCCESS); 8605 8606 err_notclosed: 8607 mutex_exit(SD_MUTEX(un)); 8608 8609 err_stillbusy: 8610 _NOTE(NO_COMPETING_THREADS_NOW); 8611 8612 err_remove_event: 8613 mutex_enter(&sd_detach_mutex); 8614 un->un_detach_count--; 8615 mutex_exit(&sd_detach_mutex); 8616 8617 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8618 return (DDI_FAILURE); 8619 } 8620 8621 8622 /* 8623 * Function: sd_create_errstats 8624 * 8625 * Description: This routine instantiates the device error stats. 8626 * 8627 * Note: During attach the stats are instantiated first so they are 8628 * available for attach-time routines that utilize the driver 8629 * iopath to send commands to the device. The stats are initialized 8630 * separately so data obtained during some attach-time routines is 8631 * available. (4362483) 8632 * 8633 * Arguments: un - driver soft state (unit) structure 8634 * instance - driver instance 8635 * 8636 * Context: Kernel thread context 8637 */ 8638 8639 static void 8640 sd_create_errstats(struct sd_lun *un, int instance) 8641 { 8642 struct sd_errstats *stp; 8643 char kstatmodule_err[KSTAT_STRLEN]; 8644 char kstatname[KSTAT_STRLEN]; 8645 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8646 8647 ASSERT(un != NULL); 8648 8649 if (un->un_errstats != NULL) { 8650 return; 8651 } 8652 8653 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8654 "%serr", sd_label); 8655 (void) snprintf(kstatname, sizeof (kstatname), 8656 "%s%d,err", sd_label, instance); 8657 8658 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8659 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8660 8661 if (un->un_errstats == NULL) { 8662 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8663 "sd_create_errstats: Failed kstat_create\n"); 8664 return; 8665 } 8666 8667 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8668 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8669 KSTAT_DATA_UINT32); 8670 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8671 KSTAT_DATA_UINT32); 8672 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8673 KSTAT_DATA_UINT32); 8674 kstat_named_init(&stp->sd_vid, "Vendor", 8675 KSTAT_DATA_CHAR); 8676 kstat_named_init(&stp->sd_pid, "Product", 8677 KSTAT_DATA_CHAR); 8678 kstat_named_init(&stp->sd_revision, "Revision", 8679 KSTAT_DATA_CHAR); 8680 kstat_named_init(&stp->sd_serial, "Serial No", 8681 KSTAT_DATA_CHAR); 8682 kstat_named_init(&stp->sd_capacity, "Size", 8683 KSTAT_DATA_ULONGLONG); 8684 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8685 KSTAT_DATA_UINT32); 8686 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8687 KSTAT_DATA_UINT32); 8688 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8689 KSTAT_DATA_UINT32); 8690 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8691 KSTAT_DATA_UINT32); 8692 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8693 KSTAT_DATA_UINT32); 8694 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8695 KSTAT_DATA_UINT32); 8696 8697 un->un_errstats->ks_private = un; 8698 un->un_errstats->ks_update = nulldev; 8699 8700 kstat_install(un->un_errstats); 8701 } 8702 8703 8704 /* 8705 * Function: sd_set_errstats 8706 * 8707 * Description: This routine sets the value of the vendor id, product id, 8708 * revision, serial number, and capacity device error stats. 8709 * 8710 * Note: During attach the stats are instantiated first so they are 8711 * available for attach-time routines that utilize the driver 8712 * iopath to send commands to the device. The stats are initialized 8713 * separately so data obtained during some attach-time routines is 8714 * available. (4362483) 8715 * 8716 * Arguments: un - driver soft state (unit) structure 8717 * 8718 * Context: Kernel thread context 8719 */ 8720 8721 static void 8722 sd_set_errstats(struct sd_lun *un) 8723 { 8724 struct sd_errstats *stp; 8725 8726 ASSERT(un != NULL); 8727 ASSERT(un->un_errstats != NULL); 8728 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8729 ASSERT(stp != NULL); 8730 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8731 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8732 (void) strncpy(stp->sd_revision.value.c, 8733 un->un_sd->sd_inq->inq_revision, 4); 8734 8735 /* 8736 * All the errstats are persistent across detach/attach, 8737 * so reset all the errstats here in case of the hot 8738 * replacement of disk drives, except for not changed 8739 * Sun qualified drives. 8740 */ 8741 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8742 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8743 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8744 stp->sd_softerrs.value.ui32 = 0; 8745 stp->sd_harderrs.value.ui32 = 0; 8746 stp->sd_transerrs.value.ui32 = 0; 8747 stp->sd_rq_media_err.value.ui32 = 0; 8748 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8749 stp->sd_rq_nodev_err.value.ui32 = 0; 8750 stp->sd_rq_recov_err.value.ui32 = 0; 8751 stp->sd_rq_illrq_err.value.ui32 = 0; 8752 stp->sd_rq_pfa_err.value.ui32 = 0; 8753 } 8754 8755 /* 8756 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8757 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8758 * (4376302)) 8759 */ 8760 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8761 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8762 sizeof (SD_INQUIRY(un)->inq_serial)); 8763 } 8764 8765 if (un->un_f_blockcount_is_valid != TRUE) { 8766 /* 8767 * Set capacity error stat to 0 for no media. This ensures 8768 * a valid capacity is displayed in response to 'iostat -E' 8769 * when no media is present in the device. 8770 */ 8771 stp->sd_capacity.value.ui64 = 0; 8772 } else { 8773 /* 8774 * Multiply un_blockcount by un->un_sys_blocksize to get 8775 * capacity. 8776 * 8777 * Note: for non-512 blocksize devices "un_blockcount" has been 8778 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8779 * (un_tgt_blocksize / un->un_sys_blocksize). 8780 */ 8781 stp->sd_capacity.value.ui64 = (uint64_t) 8782 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8783 } 8784 } 8785 8786 8787 /* 8788 * Function: sd_set_pstats 8789 * 8790 * Description: This routine instantiates and initializes the partition 8791 * stats for each partition with more than zero blocks. 8792 * (4363169) 8793 * 8794 * Arguments: un - driver soft state (unit) structure 8795 * 8796 * Context: Kernel thread context 8797 */ 8798 8799 static void 8800 sd_set_pstats(struct sd_lun *un) 8801 { 8802 char kstatname[KSTAT_STRLEN]; 8803 int instance; 8804 int i; 8805 diskaddr_t nblks = 0; 8806 char *partname = NULL; 8807 8808 ASSERT(un != NULL); 8809 8810 instance = ddi_get_instance(SD_DEVINFO(un)); 8811 8812 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8813 for (i = 0; i < NSDMAP; i++) { 8814 8815 if (cmlb_partinfo(un->un_cmlbhandle, i, 8816 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8817 continue; 8818 mutex_enter(SD_MUTEX(un)); 8819 8820 if ((un->un_pstats[i] == NULL) && 8821 (nblks != 0)) { 8822 8823 (void) snprintf(kstatname, sizeof (kstatname), 8824 "%s%d,%s", sd_label, instance, 8825 partname); 8826 8827 un->un_pstats[i] = kstat_create(sd_label, 8828 instance, kstatname, "partition", KSTAT_TYPE_IO, 8829 1, KSTAT_FLAG_PERSISTENT); 8830 if (un->un_pstats[i] != NULL) { 8831 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8832 kstat_install(un->un_pstats[i]); 8833 } 8834 } 8835 mutex_exit(SD_MUTEX(un)); 8836 } 8837 } 8838 8839 8840 #if (defined(__fibre)) 8841 /* 8842 * Function: sd_init_event_callbacks 8843 * 8844 * Description: This routine initializes the insertion and removal event 8845 * callbacks. (fibre only) 8846 * 8847 * Arguments: un - driver soft state (unit) structure 8848 * 8849 * Context: Kernel thread context 8850 */ 8851 8852 static void 8853 sd_init_event_callbacks(struct sd_lun *un) 8854 { 8855 ASSERT(un != NULL); 8856 8857 if ((un->un_insert_event == NULL) && 8858 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8859 &un->un_insert_event) == DDI_SUCCESS)) { 8860 /* 8861 * Add the callback for an insertion event 8862 */ 8863 (void) ddi_add_event_handler(SD_DEVINFO(un), 8864 un->un_insert_event, sd_event_callback, (void *)un, 8865 &(un->un_insert_cb_id)); 8866 } 8867 8868 if ((un->un_remove_event == NULL) && 8869 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8870 &un->un_remove_event) == DDI_SUCCESS)) { 8871 /* 8872 * Add the callback for a removal event 8873 */ 8874 (void) ddi_add_event_handler(SD_DEVINFO(un), 8875 un->un_remove_event, sd_event_callback, (void *)un, 8876 &(un->un_remove_cb_id)); 8877 } 8878 } 8879 8880 8881 /* 8882 * Function: sd_event_callback 8883 * 8884 * Description: This routine handles insert/remove events (photon). The 8885 * state is changed to OFFLINE which can be used to supress 8886 * error msgs. (fibre only) 8887 * 8888 * Arguments: un - driver soft state (unit) structure 8889 * 8890 * Context: Callout thread context 8891 */ 8892 /* ARGSUSED */ 8893 static void 8894 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8895 void *bus_impldata) 8896 { 8897 struct sd_lun *un = (struct sd_lun *)arg; 8898 8899 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8900 if (event == un->un_insert_event) { 8901 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8902 mutex_enter(SD_MUTEX(un)); 8903 if (un->un_state == SD_STATE_OFFLINE) { 8904 if (un->un_last_state != SD_STATE_SUSPENDED) { 8905 un->un_state = un->un_last_state; 8906 } else { 8907 /* 8908 * We have gone through SUSPEND/RESUME while 8909 * we were offline. Restore the last state 8910 */ 8911 un->un_state = un->un_save_state; 8912 } 8913 } 8914 mutex_exit(SD_MUTEX(un)); 8915 8916 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8917 } else if (event == un->un_remove_event) { 8918 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8919 mutex_enter(SD_MUTEX(un)); 8920 /* 8921 * We need to handle an event callback that occurs during 8922 * the suspend operation, since we don't prevent it. 8923 */ 8924 if (un->un_state != SD_STATE_OFFLINE) { 8925 if (un->un_state != SD_STATE_SUSPENDED) { 8926 New_state(un, SD_STATE_OFFLINE); 8927 } else { 8928 un->un_last_state = SD_STATE_OFFLINE; 8929 } 8930 } 8931 mutex_exit(SD_MUTEX(un)); 8932 } else { 8933 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8934 "!Unknown event\n"); 8935 } 8936 8937 } 8938 #endif 8939 8940 /* 8941 * Function: sd_cache_control() 8942 * 8943 * Description: This routine is the driver entry point for setting 8944 * read and write caching by modifying the WCE (write cache 8945 * enable) and RCD (read cache disable) bits of mode 8946 * page 8 (MODEPAGE_CACHING). 8947 * 8948 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8949 * structure for this target. 8950 * rcd_flag - flag for controlling the read cache 8951 * wce_flag - flag for controlling the write cache 8952 * 8953 * Return Code: EIO 8954 * code returned by sd_send_scsi_MODE_SENSE and 8955 * sd_send_scsi_MODE_SELECT 8956 * 8957 * Context: Kernel Thread 8958 */ 8959 8960 static int 8961 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8962 { 8963 struct mode_caching *mode_caching_page; 8964 uchar_t *header; 8965 size_t buflen; 8966 int hdrlen; 8967 int bd_len; 8968 int rval = 0; 8969 struct mode_header_grp2 *mhp; 8970 struct sd_lun *un; 8971 int status; 8972 8973 ASSERT(ssc != NULL); 8974 un = ssc->ssc_un; 8975 ASSERT(un != NULL); 8976 8977 /* 8978 * Do a test unit ready, otherwise a mode sense may not work if this 8979 * is the first command sent to the device after boot. 8980 */ 8981 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8982 if (status != 0) 8983 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8984 8985 if (un->un_f_cfg_is_atapi == TRUE) { 8986 hdrlen = MODE_HEADER_LENGTH_GRP2; 8987 } else { 8988 hdrlen = MODE_HEADER_LENGTH; 8989 } 8990 8991 /* 8992 * Allocate memory for the retrieved mode page and its headers. Set 8993 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8994 * we get all of the mode sense data otherwise, the mode select 8995 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8996 */ 8997 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8998 sizeof (struct mode_cache_scsi3); 8999 9000 header = kmem_zalloc(buflen, KM_SLEEP); 9001 9002 /* Get the information from the device. */ 9003 if (un->un_f_cfg_is_atapi == TRUE) { 9004 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9005 MODEPAGE_CACHING, SD_PATH_DIRECT); 9006 } else { 9007 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9008 MODEPAGE_CACHING, SD_PATH_DIRECT); 9009 } 9010 9011 if (rval != 0) { 9012 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9013 "sd_cache_control: Mode Sense Failed\n"); 9014 goto mode_sense_failed; 9015 } 9016 9017 /* 9018 * Determine size of Block Descriptors in order to locate 9019 * the mode page data. ATAPI devices return 0, SCSI devices 9020 * should return MODE_BLK_DESC_LENGTH. 9021 */ 9022 if (un->un_f_cfg_is_atapi == TRUE) { 9023 mhp = (struct mode_header_grp2 *)header; 9024 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9025 } else { 9026 bd_len = ((struct mode_header *)header)->bdesc_length; 9027 } 9028 9029 if (bd_len > MODE_BLK_DESC_LENGTH) { 9030 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9031 "sd_cache_control: Mode Sense returned invalid block " 9032 "descriptor length\n"); 9033 rval = EIO; 9034 goto mode_sense_failed; 9035 } 9036 9037 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9038 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9039 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9040 "sd_cache_control: Mode Sense caching page code mismatch " 9041 "%d\n", mode_caching_page->mode_page.code); 9042 rval = EIO; 9043 goto mode_sense_failed; 9044 } 9045 9046 /* Check the relevant bits on successful mode sense. */ 9047 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9048 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9049 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9050 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9051 9052 size_t sbuflen; 9053 uchar_t save_pg; 9054 9055 /* 9056 * Construct select buffer length based on the 9057 * length of the sense data returned. 9058 */ 9059 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 9060 sizeof (struct mode_page) + 9061 (int)mode_caching_page->mode_page.length; 9062 9063 /* 9064 * Set the caching bits as requested. 9065 */ 9066 if (rcd_flag == SD_CACHE_ENABLE) 9067 mode_caching_page->rcd = 0; 9068 else if (rcd_flag == SD_CACHE_DISABLE) 9069 mode_caching_page->rcd = 1; 9070 9071 if (wce_flag == SD_CACHE_ENABLE) 9072 mode_caching_page->wce = 1; 9073 else if (wce_flag == SD_CACHE_DISABLE) 9074 mode_caching_page->wce = 0; 9075 9076 /* 9077 * Save the page if the mode sense says the 9078 * drive supports it. 9079 */ 9080 save_pg = mode_caching_page->mode_page.ps ? 9081 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9082 9083 /* Clear reserved bits before mode select. */ 9084 mode_caching_page->mode_page.ps = 0; 9085 9086 /* 9087 * Clear out mode header for mode select. 9088 * The rest of the retrieved page will be reused. 9089 */ 9090 bzero(header, hdrlen); 9091 9092 if (un->un_f_cfg_is_atapi == TRUE) { 9093 mhp = (struct mode_header_grp2 *)header; 9094 mhp->bdesc_length_hi = bd_len >> 8; 9095 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9096 } else { 9097 ((struct mode_header *)header)->bdesc_length = bd_len; 9098 } 9099 9100 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9101 9102 /* Issue mode select to change the cache settings */ 9103 if (un->un_f_cfg_is_atapi == TRUE) { 9104 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9105 sbuflen, save_pg, SD_PATH_DIRECT); 9106 } else { 9107 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9108 sbuflen, save_pg, SD_PATH_DIRECT); 9109 } 9110 9111 } 9112 9113 9114 mode_sense_failed: 9115 9116 kmem_free(header, buflen); 9117 9118 if (rval != 0) { 9119 if (rval == EIO) 9120 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9121 else 9122 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9123 } 9124 return (rval); 9125 } 9126 9127 9128 /* 9129 * Function: sd_get_write_cache_enabled() 9130 * 9131 * Description: This routine is the driver entry point for determining if 9132 * write caching is enabled. It examines the WCE (write cache 9133 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9134 * 9135 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9136 * structure for this target. 9137 * is_enabled - pointer to int where write cache enabled state 9138 * is returned (non-zero -> write cache enabled) 9139 * 9140 * 9141 * Return Code: EIO 9142 * code returned by sd_send_scsi_MODE_SENSE 9143 * 9144 * Context: Kernel Thread 9145 * 9146 * NOTE: If ioctl is added to disable write cache, this sequence should 9147 * be followed so that no locking is required for accesses to 9148 * un->un_f_write_cache_enabled: 9149 * do mode select to clear wce 9150 * do synchronize cache to flush cache 9151 * set un->un_f_write_cache_enabled = FALSE 9152 * 9153 * Conversely, an ioctl to enable the write cache should be done 9154 * in this order: 9155 * set un->un_f_write_cache_enabled = TRUE 9156 * do mode select to set wce 9157 */ 9158 9159 static int 9160 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9161 { 9162 struct mode_caching *mode_caching_page; 9163 uchar_t *header; 9164 size_t buflen; 9165 int hdrlen; 9166 int bd_len; 9167 int rval = 0; 9168 struct sd_lun *un; 9169 int status; 9170 9171 ASSERT(ssc != NULL); 9172 un = ssc->ssc_un; 9173 ASSERT(un != NULL); 9174 ASSERT(is_enabled != NULL); 9175 9176 /* in case of error, flag as enabled */ 9177 *is_enabled = TRUE; 9178 9179 /* 9180 * Do a test unit ready, otherwise a mode sense may not work if this 9181 * is the first command sent to the device after boot. 9182 */ 9183 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9184 9185 if (status != 0) 9186 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9187 9188 if (un->un_f_cfg_is_atapi == TRUE) { 9189 hdrlen = MODE_HEADER_LENGTH_GRP2; 9190 } else { 9191 hdrlen = MODE_HEADER_LENGTH; 9192 } 9193 9194 /* 9195 * Allocate memory for the retrieved mode page and its headers. Set 9196 * a pointer to the page itself. 9197 */ 9198 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9199 header = kmem_zalloc(buflen, KM_SLEEP); 9200 9201 /* Get the information from the device. */ 9202 if (un->un_f_cfg_is_atapi == TRUE) { 9203 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9204 MODEPAGE_CACHING, SD_PATH_DIRECT); 9205 } else { 9206 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9207 MODEPAGE_CACHING, SD_PATH_DIRECT); 9208 } 9209 9210 if (rval != 0) { 9211 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9212 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9213 goto mode_sense_failed; 9214 } 9215 9216 /* 9217 * Determine size of Block Descriptors in order to locate 9218 * the mode page data. ATAPI devices return 0, SCSI devices 9219 * should return MODE_BLK_DESC_LENGTH. 9220 */ 9221 if (un->un_f_cfg_is_atapi == TRUE) { 9222 struct mode_header_grp2 *mhp; 9223 mhp = (struct mode_header_grp2 *)header; 9224 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9225 } else { 9226 bd_len = ((struct mode_header *)header)->bdesc_length; 9227 } 9228 9229 if (bd_len > MODE_BLK_DESC_LENGTH) { 9230 /* FMA should make upset complain here */ 9231 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9232 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9233 "block descriptor length\n"); 9234 rval = EIO; 9235 goto mode_sense_failed; 9236 } 9237 9238 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9239 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9240 /* FMA could make upset complain here */ 9241 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9242 "sd_get_write_cache_enabled: Mode Sense caching page " 9243 "code mismatch %d\n", mode_caching_page->mode_page.code); 9244 rval = EIO; 9245 goto mode_sense_failed; 9246 } 9247 *is_enabled = mode_caching_page->wce; 9248 9249 mode_sense_failed: 9250 if (rval == 0) { 9251 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9252 } else if (rval == EIO) { 9253 /* 9254 * Some disks do not support mode sense(6), we 9255 * should ignore this kind of error(sense key is 9256 * 0x5 - illegal request). 9257 */ 9258 uint8_t *sensep; 9259 int senlen; 9260 9261 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9262 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9263 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9264 9265 if (senlen > 0 && 9266 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9267 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9268 } else { 9269 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9270 } 9271 } else { 9272 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9273 } 9274 kmem_free(header, buflen); 9275 return (rval); 9276 } 9277 9278 /* 9279 * Function: sd_get_nv_sup() 9280 * 9281 * Description: This routine is the driver entry point for 9282 * determining whether non-volatile cache is supported. This 9283 * determination process works as follows: 9284 * 9285 * 1. sd first queries sd.conf on whether 9286 * suppress_cache_flush bit is set for this device. 9287 * 9288 * 2. if not there, then queries the internal disk table. 9289 * 9290 * 3. if either sd.conf or internal disk table specifies 9291 * cache flush be suppressed, we don't bother checking 9292 * NV_SUP bit. 9293 * 9294 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9295 * the optional INQUIRY VPD page 0x86. If the device 9296 * supports VPD page 0x86, sd examines the NV_SUP 9297 * (non-volatile cache support) bit in the INQUIRY VPD page 9298 * 0x86: 9299 * o If NV_SUP bit is set, sd assumes the device has a 9300 * non-volatile cache and set the 9301 * un_f_sync_nv_supported to TRUE. 9302 * o Otherwise cache is not non-volatile, 9303 * un_f_sync_nv_supported is set to FALSE. 9304 * 9305 * Arguments: un - driver soft state (unit) structure 9306 * 9307 * Return Code: 9308 * 9309 * Context: Kernel Thread 9310 */ 9311 9312 static void 9313 sd_get_nv_sup(sd_ssc_t *ssc) 9314 { 9315 int rval = 0; 9316 uchar_t *inq86 = NULL; 9317 size_t inq86_len = MAX_INQUIRY_SIZE; 9318 size_t inq86_resid = 0; 9319 struct dk_callback *dkc; 9320 struct sd_lun *un; 9321 9322 ASSERT(ssc != NULL); 9323 un = ssc->ssc_un; 9324 ASSERT(un != NULL); 9325 9326 mutex_enter(SD_MUTEX(un)); 9327 9328 /* 9329 * Be conservative on the device's support of 9330 * SYNC_NV bit: un_f_sync_nv_supported is 9331 * initialized to be false. 9332 */ 9333 un->un_f_sync_nv_supported = FALSE; 9334 9335 /* 9336 * If either sd.conf or internal disk table 9337 * specifies cache flush be suppressed, then 9338 * we don't bother checking NV_SUP bit. 9339 */ 9340 if (un->un_f_suppress_cache_flush == TRUE) { 9341 mutex_exit(SD_MUTEX(un)); 9342 return; 9343 } 9344 9345 if (sd_check_vpd_page_support(ssc) == 0 && 9346 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9347 mutex_exit(SD_MUTEX(un)); 9348 /* collect page 86 data if available */ 9349 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9350 9351 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9352 0x01, 0x86, &inq86_resid); 9353 9354 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9355 SD_TRACE(SD_LOG_COMMON, un, 9356 "sd_get_nv_sup: \ 9357 successfully get VPD page: %x \ 9358 PAGE LENGTH: %x BYTE 6: %x\n", 9359 inq86[1], inq86[3], inq86[6]); 9360 9361 mutex_enter(SD_MUTEX(un)); 9362 /* 9363 * check the value of NV_SUP bit: only if the device 9364 * reports NV_SUP bit to be 1, the 9365 * un_f_sync_nv_supported bit will be set to true. 9366 */ 9367 if (inq86[6] & SD_VPD_NV_SUP) { 9368 un->un_f_sync_nv_supported = TRUE; 9369 } 9370 mutex_exit(SD_MUTEX(un)); 9371 } else if (rval != 0) { 9372 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9373 } 9374 9375 kmem_free(inq86, inq86_len); 9376 } else { 9377 mutex_exit(SD_MUTEX(un)); 9378 } 9379 9380 /* 9381 * Send a SYNC CACHE command to check whether 9382 * SYNC_NV bit is supported. This command should have 9383 * un_f_sync_nv_supported set to correct value. 9384 */ 9385 mutex_enter(SD_MUTEX(un)); 9386 if (un->un_f_sync_nv_supported) { 9387 mutex_exit(SD_MUTEX(un)); 9388 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9389 dkc->dkc_flag = FLUSH_VOLATILE; 9390 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9391 9392 /* 9393 * Send a TEST UNIT READY command to the device. This should 9394 * clear any outstanding UNIT ATTENTION that may be present. 9395 */ 9396 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9397 if (rval != 0) 9398 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9399 9400 kmem_free(dkc, sizeof (struct dk_callback)); 9401 } else { 9402 mutex_exit(SD_MUTEX(un)); 9403 } 9404 9405 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9406 un_f_suppress_cache_flush is set to %d\n", 9407 un->un_f_suppress_cache_flush); 9408 } 9409 9410 /* 9411 * Function: sd_make_device 9412 * 9413 * Description: Utility routine to return the Solaris device number from 9414 * the data in the device's dev_info structure. 9415 * 9416 * Return Code: The Solaris device number 9417 * 9418 * Context: Any 9419 */ 9420 9421 static dev_t 9422 sd_make_device(dev_info_t *devi) 9423 { 9424 return (makedevice(ddi_driver_major(devi), 9425 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9426 } 9427 9428 9429 /* 9430 * Function: sd_pm_entry 9431 * 9432 * Description: Called at the start of a new command to manage power 9433 * and busy status of a device. This includes determining whether 9434 * the current power state of the device is sufficient for 9435 * performing the command or whether it must be changed. 9436 * The PM framework is notified appropriately. 9437 * Only with a return status of DDI_SUCCESS will the 9438 * component be busy to the framework. 9439 * 9440 * All callers of sd_pm_entry must check the return status 9441 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9442 * of DDI_FAILURE indicates the device failed to power up. 9443 * In this case un_pm_count has been adjusted so the result 9444 * on exit is still powered down, ie. count is less than 0. 9445 * Calling sd_pm_exit with this count value hits an ASSERT. 9446 * 9447 * Return Code: DDI_SUCCESS or DDI_FAILURE 9448 * 9449 * Context: Kernel thread context. 9450 */ 9451 9452 static int 9453 sd_pm_entry(struct sd_lun *un) 9454 { 9455 int return_status = DDI_SUCCESS; 9456 9457 ASSERT(!mutex_owned(SD_MUTEX(un))); 9458 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9459 9460 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9461 9462 if (un->un_f_pm_is_enabled == FALSE) { 9463 SD_TRACE(SD_LOG_IO_PM, un, 9464 "sd_pm_entry: exiting, PM not enabled\n"); 9465 return (return_status); 9466 } 9467 9468 /* 9469 * Just increment a counter if PM is enabled. On the transition from 9470 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9471 * the count with each IO and mark the device as idle when the count 9472 * hits 0. 9473 * 9474 * If the count is less than 0 the device is powered down. If a powered 9475 * down device is successfully powered up then the count must be 9476 * incremented to reflect the power up. Note that it'll get incremented 9477 * a second time to become busy. 9478 * 9479 * Because the following has the potential to change the device state 9480 * and must release the un_pm_mutex to do so, only one thread can be 9481 * allowed through at a time. 9482 */ 9483 9484 mutex_enter(&un->un_pm_mutex); 9485 while (un->un_pm_busy == TRUE) { 9486 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9487 } 9488 un->un_pm_busy = TRUE; 9489 9490 if (un->un_pm_count < 1) { 9491 9492 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9493 9494 /* 9495 * Indicate we are now busy so the framework won't attempt to 9496 * power down the device. This call will only fail if either 9497 * we passed a bad component number or the device has no 9498 * components. Neither of these should ever happen. 9499 */ 9500 mutex_exit(&un->un_pm_mutex); 9501 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9502 ASSERT(return_status == DDI_SUCCESS); 9503 9504 mutex_enter(&un->un_pm_mutex); 9505 9506 if (un->un_pm_count < 0) { 9507 mutex_exit(&un->un_pm_mutex); 9508 9509 SD_TRACE(SD_LOG_IO_PM, un, 9510 "sd_pm_entry: power up component\n"); 9511 9512 /* 9513 * pm_raise_power will cause sdpower to be called 9514 * which brings the device power level to the 9515 * desired state, ON in this case. If successful, 9516 * un_pm_count and un_power_level will be updated 9517 * appropriately. 9518 */ 9519 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9520 SD_SPINDLE_ON); 9521 9522 mutex_enter(&un->un_pm_mutex); 9523 9524 if (return_status != DDI_SUCCESS) { 9525 /* 9526 * Power up failed. 9527 * Idle the device and adjust the count 9528 * so the result on exit is that we're 9529 * still powered down, ie. count is less than 0. 9530 */ 9531 SD_TRACE(SD_LOG_IO_PM, un, 9532 "sd_pm_entry: power up failed," 9533 " idle the component\n"); 9534 9535 (void) pm_idle_component(SD_DEVINFO(un), 0); 9536 un->un_pm_count--; 9537 } else { 9538 /* 9539 * Device is powered up, verify the 9540 * count is non-negative. 9541 * This is debug only. 9542 */ 9543 ASSERT(un->un_pm_count == 0); 9544 } 9545 } 9546 9547 if (return_status == DDI_SUCCESS) { 9548 /* 9549 * For performance, now that the device has been tagged 9550 * as busy, and it's known to be powered up, update the 9551 * chain types to use jump tables that do not include 9552 * pm. This significantly lowers the overhead and 9553 * therefore improves performance. 9554 */ 9555 9556 mutex_exit(&un->un_pm_mutex); 9557 mutex_enter(SD_MUTEX(un)); 9558 SD_TRACE(SD_LOG_IO_PM, un, 9559 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9560 un->un_uscsi_chain_type); 9561 9562 if (un->un_f_non_devbsize_supported) { 9563 un->un_buf_chain_type = 9564 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9565 } else { 9566 un->un_buf_chain_type = 9567 SD_CHAIN_INFO_DISK_NO_PM; 9568 } 9569 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9570 9571 SD_TRACE(SD_LOG_IO_PM, un, 9572 " changed uscsi_chain_type to %d\n", 9573 un->un_uscsi_chain_type); 9574 mutex_exit(SD_MUTEX(un)); 9575 mutex_enter(&un->un_pm_mutex); 9576 9577 if (un->un_pm_idle_timeid == NULL) { 9578 /* 300 ms. */ 9579 un->un_pm_idle_timeid = 9580 timeout(sd_pm_idletimeout_handler, un, 9581 (drv_usectohz((clock_t)300000))); 9582 /* 9583 * Include an extra call to busy which keeps the 9584 * device busy with-respect-to the PM layer 9585 * until the timer fires, at which time it'll 9586 * get the extra idle call. 9587 */ 9588 (void) pm_busy_component(SD_DEVINFO(un), 0); 9589 } 9590 } 9591 } 9592 un->un_pm_busy = FALSE; 9593 /* Next... */ 9594 cv_signal(&un->un_pm_busy_cv); 9595 9596 un->un_pm_count++; 9597 9598 SD_TRACE(SD_LOG_IO_PM, un, 9599 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9600 9601 mutex_exit(&un->un_pm_mutex); 9602 9603 return (return_status); 9604 } 9605 9606 9607 /* 9608 * Function: sd_pm_exit 9609 * 9610 * Description: Called at the completion of a command to manage busy 9611 * status for the device. If the device becomes idle the 9612 * PM framework is notified. 9613 * 9614 * Context: Kernel thread context 9615 */ 9616 9617 static void 9618 sd_pm_exit(struct sd_lun *un) 9619 { 9620 ASSERT(!mutex_owned(SD_MUTEX(un))); 9621 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9622 9623 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9624 9625 /* 9626 * After attach the following flag is only read, so don't 9627 * take the penalty of acquiring a mutex for it. 9628 */ 9629 if (un->un_f_pm_is_enabled == TRUE) { 9630 9631 mutex_enter(&un->un_pm_mutex); 9632 un->un_pm_count--; 9633 9634 SD_TRACE(SD_LOG_IO_PM, un, 9635 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9636 9637 ASSERT(un->un_pm_count >= 0); 9638 if (un->un_pm_count == 0) { 9639 mutex_exit(&un->un_pm_mutex); 9640 9641 SD_TRACE(SD_LOG_IO_PM, un, 9642 "sd_pm_exit: idle component\n"); 9643 9644 (void) pm_idle_component(SD_DEVINFO(un), 0); 9645 9646 } else { 9647 mutex_exit(&un->un_pm_mutex); 9648 } 9649 } 9650 9651 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9652 } 9653 9654 9655 /* 9656 * Function: sdopen 9657 * 9658 * Description: Driver's open(9e) entry point function. 9659 * 9660 * Arguments: dev_i - pointer to device number 9661 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9662 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9663 * cred_p - user credential pointer 9664 * 9665 * Return Code: EINVAL 9666 * ENXIO 9667 * EIO 9668 * EROFS 9669 * EBUSY 9670 * 9671 * Context: Kernel thread context 9672 */ 9673 /* ARGSUSED */ 9674 static int 9675 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9676 { 9677 struct sd_lun *un; 9678 int nodelay; 9679 int part; 9680 uint64_t partmask; 9681 int instance; 9682 dev_t dev; 9683 int rval = EIO; 9684 diskaddr_t nblks = 0; 9685 diskaddr_t label_cap; 9686 9687 /* Validate the open type */ 9688 if (otyp >= OTYPCNT) { 9689 return (EINVAL); 9690 } 9691 9692 dev = *dev_p; 9693 instance = SDUNIT(dev); 9694 mutex_enter(&sd_detach_mutex); 9695 9696 /* 9697 * Fail the open if there is no softstate for the instance, or 9698 * if another thread somewhere is trying to detach the instance. 9699 */ 9700 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9701 (un->un_detach_count != 0)) { 9702 mutex_exit(&sd_detach_mutex); 9703 /* 9704 * The probe cache only needs to be cleared when open (9e) fails 9705 * with ENXIO (4238046). 9706 */ 9707 /* 9708 * un-conditionally clearing probe cache is ok with 9709 * separate sd/ssd binaries 9710 * x86 platform can be an issue with both parallel 9711 * and fibre in 1 binary 9712 */ 9713 sd_scsi_clear_probe_cache(); 9714 return (ENXIO); 9715 } 9716 9717 /* 9718 * The un_layer_count is to prevent another thread in specfs from 9719 * trying to detach the instance, which can happen when we are 9720 * called from a higher-layer driver instead of thru specfs. 9721 * This will not be needed when DDI provides a layered driver 9722 * interface that allows specfs to know that an instance is in 9723 * use by a layered driver & should not be detached. 9724 * 9725 * Note: the semantics for layered driver opens are exactly one 9726 * close for every open. 9727 */ 9728 if (otyp == OTYP_LYR) { 9729 un->un_layer_count++; 9730 } 9731 9732 /* 9733 * Keep a count of the current # of opens in progress. This is because 9734 * some layered drivers try to call us as a regular open. This can 9735 * cause problems that we cannot prevent, however by keeping this count 9736 * we can at least keep our open and detach routines from racing against 9737 * each other under such conditions. 9738 */ 9739 un->un_opens_in_progress++; 9740 mutex_exit(&sd_detach_mutex); 9741 9742 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9743 part = SDPART(dev); 9744 partmask = 1 << part; 9745 9746 /* 9747 * We use a semaphore here in order to serialize 9748 * open and close requests on the device. 9749 */ 9750 sema_p(&un->un_semoclose); 9751 9752 mutex_enter(SD_MUTEX(un)); 9753 9754 /* 9755 * All device accesses go thru sdstrategy() where we check 9756 * on suspend status but there could be a scsi_poll command, 9757 * which bypasses sdstrategy(), so we need to check pm 9758 * status. 9759 */ 9760 9761 if (!nodelay) { 9762 while ((un->un_state == SD_STATE_SUSPENDED) || 9763 (un->un_state == SD_STATE_PM_CHANGING)) { 9764 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9765 } 9766 9767 mutex_exit(SD_MUTEX(un)); 9768 if (sd_pm_entry(un) != DDI_SUCCESS) { 9769 rval = EIO; 9770 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9771 "sdopen: sd_pm_entry failed\n"); 9772 goto open_failed_with_pm; 9773 } 9774 mutex_enter(SD_MUTEX(un)); 9775 } 9776 9777 /* check for previous exclusive open */ 9778 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9779 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9780 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9781 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9782 9783 if (un->un_exclopen & (partmask)) { 9784 goto excl_open_fail; 9785 } 9786 9787 if (flag & FEXCL) { 9788 int i; 9789 if (un->un_ocmap.lyropen[part]) { 9790 goto excl_open_fail; 9791 } 9792 for (i = 0; i < (OTYPCNT - 1); i++) { 9793 if (un->un_ocmap.regopen[i] & (partmask)) { 9794 goto excl_open_fail; 9795 } 9796 } 9797 } 9798 9799 /* 9800 * Check the write permission if this is a removable media device, 9801 * NDELAY has not been set, and writable permission is requested. 9802 * 9803 * Note: If NDELAY was set and this is write-protected media the WRITE 9804 * attempt will fail with EIO as part of the I/O processing. This is a 9805 * more permissive implementation that allows the open to succeed and 9806 * WRITE attempts to fail when appropriate. 9807 */ 9808 if (un->un_f_chk_wp_open) { 9809 if ((flag & FWRITE) && (!nodelay)) { 9810 mutex_exit(SD_MUTEX(un)); 9811 /* 9812 * Defer the check for write permission on writable 9813 * DVD drive till sdstrategy and will not fail open even 9814 * if FWRITE is set as the device can be writable 9815 * depending upon the media and the media can change 9816 * after the call to open(). 9817 */ 9818 if (un->un_f_dvdram_writable_device == FALSE) { 9819 if (ISCD(un) || sr_check_wp(dev)) { 9820 rval = EROFS; 9821 mutex_enter(SD_MUTEX(un)); 9822 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9823 "write to cd or write protected media\n"); 9824 goto open_fail; 9825 } 9826 } 9827 mutex_enter(SD_MUTEX(un)); 9828 } 9829 } 9830 9831 /* 9832 * If opening in NDELAY/NONBLOCK mode, just return. 9833 * Check if disk is ready and has a valid geometry later. 9834 */ 9835 if (!nodelay) { 9836 sd_ssc_t *ssc; 9837 9838 mutex_exit(SD_MUTEX(un)); 9839 ssc = sd_ssc_init(un); 9840 rval = sd_ready_and_valid(ssc, part); 9841 sd_ssc_fini(ssc); 9842 mutex_enter(SD_MUTEX(un)); 9843 /* 9844 * Fail if device is not ready or if the number of disk 9845 * blocks is zero or negative for non CD devices. 9846 */ 9847 9848 nblks = 0; 9849 9850 if (rval == SD_READY_VALID && (!ISCD(un))) { 9851 /* if cmlb_partinfo fails, nblks remains 0 */ 9852 mutex_exit(SD_MUTEX(un)); 9853 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9854 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9855 mutex_enter(SD_MUTEX(un)); 9856 } 9857 9858 if ((rval != SD_READY_VALID) || 9859 (!ISCD(un) && nblks <= 0)) { 9860 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9861 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9862 "device not ready or invalid disk block value\n"); 9863 goto open_fail; 9864 } 9865 #if defined(__i386) || defined(__amd64) 9866 } else { 9867 uchar_t *cp; 9868 /* 9869 * x86 requires special nodelay handling, so that p0 is 9870 * always defined and accessible. 9871 * Invalidate geometry only if device is not already open. 9872 */ 9873 cp = &un->un_ocmap.chkd[0]; 9874 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9875 if (*cp != (uchar_t)0) { 9876 break; 9877 } 9878 cp++; 9879 } 9880 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9881 mutex_exit(SD_MUTEX(un)); 9882 cmlb_invalidate(un->un_cmlbhandle, 9883 (void *)SD_PATH_DIRECT); 9884 mutex_enter(SD_MUTEX(un)); 9885 } 9886 9887 #endif 9888 } 9889 9890 if (otyp == OTYP_LYR) { 9891 un->un_ocmap.lyropen[part]++; 9892 } else { 9893 un->un_ocmap.regopen[otyp] |= partmask; 9894 } 9895 9896 /* Set up open and exclusive open flags */ 9897 if (flag & FEXCL) { 9898 un->un_exclopen |= (partmask); 9899 } 9900 9901 /* 9902 * If the lun is EFI labeled and lun capacity is greater than the 9903 * capacity contained in the label, log a sys-event to notify the 9904 * interested module. 9905 * To avoid an infinite loop of logging sys-event, we only log the 9906 * event when the lun is not opened in NDELAY mode. The event handler 9907 * should open the lun in NDELAY mode. 9908 */ 9909 if (!(flag & FNDELAY)) { 9910 mutex_exit(SD_MUTEX(un)); 9911 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9912 (void*)SD_PATH_DIRECT) == 0) { 9913 mutex_enter(SD_MUTEX(un)); 9914 if (un->un_f_blockcount_is_valid && 9915 un->un_blockcount > label_cap) { 9916 mutex_exit(SD_MUTEX(un)); 9917 sd_log_lun_expansion_event(un, 9918 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9919 mutex_enter(SD_MUTEX(un)); 9920 } 9921 } else { 9922 mutex_enter(SD_MUTEX(un)); 9923 } 9924 } 9925 9926 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9927 "open of part %d type %d\n", part, otyp); 9928 9929 mutex_exit(SD_MUTEX(un)); 9930 if (!nodelay) { 9931 sd_pm_exit(un); 9932 } 9933 9934 sema_v(&un->un_semoclose); 9935 9936 mutex_enter(&sd_detach_mutex); 9937 un->un_opens_in_progress--; 9938 mutex_exit(&sd_detach_mutex); 9939 9940 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9941 return (DDI_SUCCESS); 9942 9943 excl_open_fail: 9944 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9945 rval = EBUSY; 9946 9947 open_fail: 9948 mutex_exit(SD_MUTEX(un)); 9949 9950 /* 9951 * On a failed open we must exit the pm management. 9952 */ 9953 if (!nodelay) { 9954 sd_pm_exit(un); 9955 } 9956 open_failed_with_pm: 9957 sema_v(&un->un_semoclose); 9958 9959 mutex_enter(&sd_detach_mutex); 9960 un->un_opens_in_progress--; 9961 if (otyp == OTYP_LYR) { 9962 un->un_layer_count--; 9963 } 9964 mutex_exit(&sd_detach_mutex); 9965 9966 return (rval); 9967 } 9968 9969 9970 /* 9971 * Function: sdclose 9972 * 9973 * Description: Driver's close(9e) entry point function. 9974 * 9975 * Arguments: dev - device number 9976 * flag - file status flag, informational only 9977 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9978 * cred_p - user credential pointer 9979 * 9980 * Return Code: ENXIO 9981 * 9982 * Context: Kernel thread context 9983 */ 9984 /* ARGSUSED */ 9985 static int 9986 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9987 { 9988 struct sd_lun *un; 9989 uchar_t *cp; 9990 int part; 9991 int nodelay; 9992 int rval = 0; 9993 9994 /* Validate the open type */ 9995 if (otyp >= OTYPCNT) { 9996 return (ENXIO); 9997 } 9998 9999 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10000 return (ENXIO); 10001 } 10002 10003 part = SDPART(dev); 10004 nodelay = flag & (FNDELAY | FNONBLOCK); 10005 10006 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10007 "sdclose: close of part %d type %d\n", part, otyp); 10008 10009 /* 10010 * We use a semaphore here in order to serialize 10011 * open and close requests on the device. 10012 */ 10013 sema_p(&un->un_semoclose); 10014 10015 mutex_enter(SD_MUTEX(un)); 10016 10017 /* Don't proceed if power is being changed. */ 10018 while (un->un_state == SD_STATE_PM_CHANGING) { 10019 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10020 } 10021 10022 if (un->un_exclopen & (1 << part)) { 10023 un->un_exclopen &= ~(1 << part); 10024 } 10025 10026 /* Update the open partition map */ 10027 if (otyp == OTYP_LYR) { 10028 un->un_ocmap.lyropen[part] -= 1; 10029 } else { 10030 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10031 } 10032 10033 cp = &un->un_ocmap.chkd[0]; 10034 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10035 if (*cp != NULL) { 10036 break; 10037 } 10038 cp++; 10039 } 10040 10041 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10042 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10043 10044 /* 10045 * We avoid persistance upon the last close, and set 10046 * the throttle back to the maximum. 10047 */ 10048 un->un_throttle = un->un_saved_throttle; 10049 10050 if (un->un_state == SD_STATE_OFFLINE) { 10051 if (un->un_f_is_fibre == FALSE) { 10052 scsi_log(SD_DEVINFO(un), sd_label, 10053 CE_WARN, "offline\n"); 10054 } 10055 mutex_exit(SD_MUTEX(un)); 10056 cmlb_invalidate(un->un_cmlbhandle, 10057 (void *)SD_PATH_DIRECT); 10058 mutex_enter(SD_MUTEX(un)); 10059 10060 } else { 10061 /* 10062 * Flush any outstanding writes in NVRAM cache. 10063 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10064 * cmd, it may not work for non-Pluto devices. 10065 * SYNCHRONIZE CACHE is not required for removables, 10066 * except DVD-RAM drives. 10067 * 10068 * Also note: because SYNCHRONIZE CACHE is currently 10069 * the only command issued here that requires the 10070 * drive be powered up, only do the power up before 10071 * sending the Sync Cache command. If additional 10072 * commands are added which require a powered up 10073 * drive, the following sequence may have to change. 10074 * 10075 * And finally, note that parallel SCSI on SPARC 10076 * only issues a Sync Cache to DVD-RAM, a newly 10077 * supported device. 10078 */ 10079 #if defined(__i386) || defined(__amd64) 10080 if ((un->un_f_sync_cache_supported && 10081 un->un_f_sync_cache_required) || 10082 un->un_f_dvdram_writable_device == TRUE) { 10083 #else 10084 if (un->un_f_dvdram_writable_device == TRUE) { 10085 #endif 10086 mutex_exit(SD_MUTEX(un)); 10087 if (sd_pm_entry(un) == DDI_SUCCESS) { 10088 rval = 10089 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10090 NULL); 10091 /* ignore error if not supported */ 10092 if (rval == ENOTSUP) { 10093 rval = 0; 10094 } else if (rval != 0) { 10095 rval = EIO; 10096 } 10097 sd_pm_exit(un); 10098 } else { 10099 rval = EIO; 10100 } 10101 mutex_enter(SD_MUTEX(un)); 10102 } 10103 10104 /* 10105 * For devices which supports DOOR_LOCK, send an ALLOW 10106 * MEDIA REMOVAL command, but don't get upset if it 10107 * fails. We need to raise the power of the drive before 10108 * we can call sd_send_scsi_DOORLOCK() 10109 */ 10110 if (un->un_f_doorlock_supported) { 10111 mutex_exit(SD_MUTEX(un)); 10112 if (sd_pm_entry(un) == DDI_SUCCESS) { 10113 sd_ssc_t *ssc; 10114 10115 ssc = sd_ssc_init(un); 10116 rval = sd_send_scsi_DOORLOCK(ssc, 10117 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10118 if (rval != 0) 10119 sd_ssc_assessment(ssc, 10120 SD_FMT_IGNORE); 10121 sd_ssc_fini(ssc); 10122 10123 sd_pm_exit(un); 10124 if (ISCD(un) && (rval != 0) && 10125 (nodelay != 0)) { 10126 rval = ENXIO; 10127 } 10128 } else { 10129 rval = EIO; 10130 } 10131 mutex_enter(SD_MUTEX(un)); 10132 } 10133 10134 /* 10135 * If a device has removable media, invalidate all 10136 * parameters related to media, such as geometry, 10137 * blocksize, and blockcount. 10138 */ 10139 if (un->un_f_has_removable_media) { 10140 sr_ejected(un); 10141 } 10142 10143 /* 10144 * Destroy the cache (if it exists) which was 10145 * allocated for the write maps since this is 10146 * the last close for this media. 10147 */ 10148 if (un->un_wm_cache) { 10149 /* 10150 * Check if there are pending commands. 10151 * and if there are give a warning and 10152 * do not destroy the cache. 10153 */ 10154 if (un->un_ncmds_in_driver > 0) { 10155 scsi_log(SD_DEVINFO(un), 10156 sd_label, CE_WARN, 10157 "Unable to clean up memory " 10158 "because of pending I/O\n"); 10159 } else { 10160 kmem_cache_destroy( 10161 un->un_wm_cache); 10162 un->un_wm_cache = NULL; 10163 } 10164 } 10165 } 10166 } 10167 10168 mutex_exit(SD_MUTEX(un)); 10169 sema_v(&un->un_semoclose); 10170 10171 if (otyp == OTYP_LYR) { 10172 mutex_enter(&sd_detach_mutex); 10173 /* 10174 * The detach routine may run when the layer count 10175 * drops to zero. 10176 */ 10177 un->un_layer_count--; 10178 mutex_exit(&sd_detach_mutex); 10179 } 10180 10181 return (rval); 10182 } 10183 10184 10185 /* 10186 * Function: sd_ready_and_valid 10187 * 10188 * Description: Test if device is ready and has a valid geometry. 10189 * 10190 * Arguments: ssc - sd_ssc_t will contain un 10191 * un - driver soft state (unit) structure 10192 * 10193 * Return Code: SD_READY_VALID ready and valid label 10194 * SD_NOT_READY_VALID not ready, no label 10195 * SD_RESERVED_BY_OTHERS reservation conflict 10196 * 10197 * Context: Never called at interrupt context. 10198 */ 10199 10200 static int 10201 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10202 { 10203 struct sd_errstats *stp; 10204 uint64_t capacity; 10205 uint_t lbasize; 10206 int rval = SD_READY_VALID; 10207 char name_str[48]; 10208 boolean_t is_valid; 10209 struct sd_lun *un; 10210 int status; 10211 10212 ASSERT(ssc != NULL); 10213 un = ssc->ssc_un; 10214 ASSERT(un != NULL); 10215 ASSERT(!mutex_owned(SD_MUTEX(un))); 10216 10217 mutex_enter(SD_MUTEX(un)); 10218 /* 10219 * If a device has removable media, we must check if media is 10220 * ready when checking if this device is ready and valid. 10221 */ 10222 if (un->un_f_has_removable_media) { 10223 mutex_exit(SD_MUTEX(un)); 10224 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10225 10226 if (status != 0) { 10227 rval = SD_NOT_READY_VALID; 10228 mutex_enter(SD_MUTEX(un)); 10229 10230 /* Ignore all failed status for removalbe media */ 10231 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10232 10233 goto done; 10234 } 10235 10236 is_valid = SD_IS_VALID_LABEL(un); 10237 mutex_enter(SD_MUTEX(un)); 10238 if (!is_valid || 10239 (un->un_f_blockcount_is_valid == FALSE) || 10240 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10241 10242 /* capacity has to be read every open. */ 10243 mutex_exit(SD_MUTEX(un)); 10244 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10245 &lbasize, SD_PATH_DIRECT); 10246 10247 if (status != 0) { 10248 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10249 10250 cmlb_invalidate(un->un_cmlbhandle, 10251 (void *)SD_PATH_DIRECT); 10252 mutex_enter(SD_MUTEX(un)); 10253 rval = SD_NOT_READY_VALID; 10254 10255 goto done; 10256 } else { 10257 mutex_enter(SD_MUTEX(un)); 10258 sd_update_block_info(un, lbasize, capacity); 10259 } 10260 } 10261 10262 /* 10263 * Check if the media in the device is writable or not. 10264 */ 10265 if (!is_valid && ISCD(un)) { 10266 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10267 } 10268 10269 } else { 10270 /* 10271 * Do a test unit ready to clear any unit attention from non-cd 10272 * devices. 10273 */ 10274 mutex_exit(SD_MUTEX(un)); 10275 10276 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10277 if (status != 0) { 10278 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10279 } 10280 10281 mutex_enter(SD_MUTEX(un)); 10282 } 10283 10284 10285 /* 10286 * If this is a non 512 block device, allocate space for 10287 * the wmap cache. This is being done here since every time 10288 * a media is changed this routine will be called and the 10289 * block size is a function of media rather than device. 10290 */ 10291 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10292 if (!(un->un_wm_cache)) { 10293 (void) snprintf(name_str, sizeof (name_str), 10294 "%s%d_cache", 10295 ddi_driver_name(SD_DEVINFO(un)), 10296 ddi_get_instance(SD_DEVINFO(un))); 10297 un->un_wm_cache = kmem_cache_create( 10298 name_str, sizeof (struct sd_w_map), 10299 8, sd_wm_cache_constructor, 10300 sd_wm_cache_destructor, NULL, 10301 (void *)un, NULL, 0); 10302 if (!(un->un_wm_cache)) { 10303 rval = ENOMEM; 10304 goto done; 10305 } 10306 } 10307 } 10308 10309 if (un->un_state == SD_STATE_NORMAL) { 10310 /* 10311 * If the target is not yet ready here (defined by a TUR 10312 * failure), invalidate the geometry and print an 'offline' 10313 * message. This is a legacy message, as the state of the 10314 * target is not actually changed to SD_STATE_OFFLINE. 10315 * 10316 * If the TUR fails for EACCES (Reservation Conflict), 10317 * SD_RESERVED_BY_OTHERS will be returned to indicate 10318 * reservation conflict. If the TUR fails for other 10319 * reasons, SD_NOT_READY_VALID will be returned. 10320 */ 10321 int err; 10322 10323 mutex_exit(SD_MUTEX(un)); 10324 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10325 mutex_enter(SD_MUTEX(un)); 10326 10327 if (err != 0) { 10328 mutex_exit(SD_MUTEX(un)); 10329 cmlb_invalidate(un->un_cmlbhandle, 10330 (void *)SD_PATH_DIRECT); 10331 mutex_enter(SD_MUTEX(un)); 10332 if (err == EACCES) { 10333 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10334 "reservation conflict\n"); 10335 rval = SD_RESERVED_BY_OTHERS; 10336 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10337 } else { 10338 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10339 "drive offline\n"); 10340 rval = SD_NOT_READY_VALID; 10341 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10342 } 10343 goto done; 10344 } 10345 } 10346 10347 if (un->un_f_format_in_progress == FALSE) { 10348 mutex_exit(SD_MUTEX(un)); 10349 10350 (void) cmlb_validate(un->un_cmlbhandle, 0, 10351 (void *)SD_PATH_DIRECT); 10352 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10353 NULL, (void *) SD_PATH_DIRECT) != 0) { 10354 rval = SD_NOT_READY_VALID; 10355 mutex_enter(SD_MUTEX(un)); 10356 10357 goto done; 10358 } 10359 if (un->un_f_pkstats_enabled) { 10360 sd_set_pstats(un); 10361 SD_TRACE(SD_LOG_IO_PARTITION, un, 10362 "sd_ready_and_valid: un:0x%p pstats created and " 10363 "set\n", un); 10364 } 10365 mutex_enter(SD_MUTEX(un)); 10366 } 10367 10368 /* 10369 * If this device supports DOOR_LOCK command, try and send 10370 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10371 * if it fails. For a CD, however, it is an error 10372 */ 10373 if (un->un_f_doorlock_supported) { 10374 mutex_exit(SD_MUTEX(un)); 10375 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10376 SD_PATH_DIRECT); 10377 10378 if ((status != 0) && ISCD(un)) { 10379 rval = SD_NOT_READY_VALID; 10380 mutex_enter(SD_MUTEX(un)); 10381 10382 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10383 10384 goto done; 10385 } else if (status != 0) 10386 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10387 mutex_enter(SD_MUTEX(un)); 10388 } 10389 10390 /* The state has changed, inform the media watch routines */ 10391 un->un_mediastate = DKIO_INSERTED; 10392 cv_broadcast(&un->un_state_cv); 10393 rval = SD_READY_VALID; 10394 10395 done: 10396 10397 /* 10398 * Initialize the capacity kstat value, if no media previously 10399 * (capacity kstat is 0) and a media has been inserted 10400 * (un_blockcount > 0). 10401 */ 10402 if (un->un_errstats != NULL) { 10403 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10404 if ((stp->sd_capacity.value.ui64 == 0) && 10405 (un->un_f_blockcount_is_valid == TRUE)) { 10406 stp->sd_capacity.value.ui64 = 10407 (uint64_t)((uint64_t)un->un_blockcount * 10408 un->un_sys_blocksize); 10409 } 10410 } 10411 10412 mutex_exit(SD_MUTEX(un)); 10413 return (rval); 10414 } 10415 10416 10417 /* 10418 * Function: sdmin 10419 * 10420 * Description: Routine to limit the size of a data transfer. Used in 10421 * conjunction with physio(9F). 10422 * 10423 * Arguments: bp - pointer to the indicated buf(9S) struct. 10424 * 10425 * Context: Kernel thread context. 10426 */ 10427 10428 static void 10429 sdmin(struct buf *bp) 10430 { 10431 struct sd_lun *un; 10432 int instance; 10433 10434 instance = SDUNIT(bp->b_edev); 10435 10436 un = ddi_get_soft_state(sd_state, instance); 10437 ASSERT(un != NULL); 10438 10439 /* 10440 * We depend on DMA partial or buf breakup to restrict 10441 * IO size if any of them enabled. 10442 */ 10443 if (un->un_partial_dma_supported || 10444 un->un_buf_breakup_supported) { 10445 return; 10446 } 10447 10448 if (bp->b_bcount > un->un_max_xfer_size) { 10449 bp->b_bcount = un->un_max_xfer_size; 10450 } 10451 } 10452 10453 10454 /* 10455 * Function: sdread 10456 * 10457 * Description: Driver's read(9e) entry point function. 10458 * 10459 * Arguments: dev - device number 10460 * uio - structure pointer describing where data is to be stored 10461 * in user's space 10462 * cred_p - user credential pointer 10463 * 10464 * Return Code: ENXIO 10465 * EIO 10466 * EINVAL 10467 * value returned by physio 10468 * 10469 * Context: Kernel thread context. 10470 */ 10471 /* ARGSUSED */ 10472 static int 10473 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10474 { 10475 struct sd_lun *un = NULL; 10476 int secmask; 10477 int err = 0; 10478 sd_ssc_t *ssc; 10479 10480 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10481 return (ENXIO); 10482 } 10483 10484 ASSERT(!mutex_owned(SD_MUTEX(un))); 10485 10486 10487 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10488 mutex_enter(SD_MUTEX(un)); 10489 /* 10490 * Because the call to sd_ready_and_valid will issue I/O we 10491 * must wait here if either the device is suspended or 10492 * if it's power level is changing. 10493 */ 10494 while ((un->un_state == SD_STATE_SUSPENDED) || 10495 (un->un_state == SD_STATE_PM_CHANGING)) { 10496 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10497 } 10498 un->un_ncmds_in_driver++; 10499 mutex_exit(SD_MUTEX(un)); 10500 10501 /* Initialize sd_ssc_t for internal uscsi commands */ 10502 ssc = sd_ssc_init(un); 10503 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10504 err = EIO; 10505 } else { 10506 err = 0; 10507 } 10508 sd_ssc_fini(ssc); 10509 10510 mutex_enter(SD_MUTEX(un)); 10511 un->un_ncmds_in_driver--; 10512 ASSERT(un->un_ncmds_in_driver >= 0); 10513 mutex_exit(SD_MUTEX(un)); 10514 if (err != 0) 10515 return (err); 10516 } 10517 10518 /* 10519 * Read requests are restricted to multiples of the system block size. 10520 */ 10521 secmask = un->un_sys_blocksize - 1; 10522 10523 if (uio->uio_loffset & ((offset_t)(secmask))) { 10524 SD_ERROR(SD_LOG_READ_WRITE, un, 10525 "sdread: file offset not modulo %d\n", 10526 un->un_sys_blocksize); 10527 err = EINVAL; 10528 } else if (uio->uio_iov->iov_len & (secmask)) { 10529 SD_ERROR(SD_LOG_READ_WRITE, un, 10530 "sdread: transfer length not modulo %d\n", 10531 un->un_sys_blocksize); 10532 err = EINVAL; 10533 } else { 10534 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10535 } 10536 10537 return (err); 10538 } 10539 10540 10541 /* 10542 * Function: sdwrite 10543 * 10544 * Description: Driver's write(9e) entry point function. 10545 * 10546 * Arguments: dev - device number 10547 * uio - structure pointer describing where data is stored in 10548 * user's space 10549 * cred_p - user credential pointer 10550 * 10551 * Return Code: ENXIO 10552 * EIO 10553 * EINVAL 10554 * value returned by physio 10555 * 10556 * Context: Kernel thread context. 10557 */ 10558 /* ARGSUSED */ 10559 static int 10560 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10561 { 10562 struct sd_lun *un = NULL; 10563 int secmask; 10564 int err = 0; 10565 sd_ssc_t *ssc; 10566 10567 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10568 return (ENXIO); 10569 } 10570 10571 ASSERT(!mutex_owned(SD_MUTEX(un))); 10572 10573 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10574 mutex_enter(SD_MUTEX(un)); 10575 /* 10576 * Because the call to sd_ready_and_valid will issue I/O we 10577 * must wait here if either the device is suspended or 10578 * if it's power level is changing. 10579 */ 10580 while ((un->un_state == SD_STATE_SUSPENDED) || 10581 (un->un_state == SD_STATE_PM_CHANGING)) { 10582 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10583 } 10584 un->un_ncmds_in_driver++; 10585 mutex_exit(SD_MUTEX(un)); 10586 10587 /* Initialize sd_ssc_t for internal uscsi commands */ 10588 ssc = sd_ssc_init(un); 10589 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10590 err = EIO; 10591 } else { 10592 err = 0; 10593 } 10594 sd_ssc_fini(ssc); 10595 10596 mutex_enter(SD_MUTEX(un)); 10597 un->un_ncmds_in_driver--; 10598 ASSERT(un->un_ncmds_in_driver >= 0); 10599 mutex_exit(SD_MUTEX(un)); 10600 if (err != 0) 10601 return (err); 10602 } 10603 10604 /* 10605 * Write requests are restricted to multiples of the system block size. 10606 */ 10607 secmask = un->un_sys_blocksize - 1; 10608 10609 if (uio->uio_loffset & ((offset_t)(secmask))) { 10610 SD_ERROR(SD_LOG_READ_WRITE, un, 10611 "sdwrite: file offset not modulo %d\n", 10612 un->un_sys_blocksize); 10613 err = EINVAL; 10614 } else if (uio->uio_iov->iov_len & (secmask)) { 10615 SD_ERROR(SD_LOG_READ_WRITE, un, 10616 "sdwrite: transfer length not modulo %d\n", 10617 un->un_sys_blocksize); 10618 err = EINVAL; 10619 } else { 10620 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10621 } 10622 10623 return (err); 10624 } 10625 10626 10627 /* 10628 * Function: sdaread 10629 * 10630 * Description: Driver's aread(9e) entry point function. 10631 * 10632 * Arguments: dev - device number 10633 * aio - structure pointer describing where data is to be stored 10634 * cred_p - user credential pointer 10635 * 10636 * Return Code: ENXIO 10637 * EIO 10638 * EINVAL 10639 * value returned by aphysio 10640 * 10641 * Context: Kernel thread context. 10642 */ 10643 /* ARGSUSED */ 10644 static int 10645 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10646 { 10647 struct sd_lun *un = NULL; 10648 struct uio *uio = aio->aio_uio; 10649 int secmask; 10650 int err = 0; 10651 sd_ssc_t *ssc; 10652 10653 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10654 return (ENXIO); 10655 } 10656 10657 ASSERT(!mutex_owned(SD_MUTEX(un))); 10658 10659 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10660 mutex_enter(SD_MUTEX(un)); 10661 /* 10662 * Because the call to sd_ready_and_valid will issue I/O we 10663 * must wait here if either the device is suspended or 10664 * if it's power level is changing. 10665 */ 10666 while ((un->un_state == SD_STATE_SUSPENDED) || 10667 (un->un_state == SD_STATE_PM_CHANGING)) { 10668 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10669 } 10670 un->un_ncmds_in_driver++; 10671 mutex_exit(SD_MUTEX(un)); 10672 10673 /* Initialize sd_ssc_t for internal uscsi commands */ 10674 ssc = sd_ssc_init(un); 10675 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10676 err = EIO; 10677 } else { 10678 err = 0; 10679 } 10680 sd_ssc_fini(ssc); 10681 10682 mutex_enter(SD_MUTEX(un)); 10683 un->un_ncmds_in_driver--; 10684 ASSERT(un->un_ncmds_in_driver >= 0); 10685 mutex_exit(SD_MUTEX(un)); 10686 if (err != 0) 10687 return (err); 10688 } 10689 10690 /* 10691 * Read requests are restricted to multiples of the system block size. 10692 */ 10693 secmask = un->un_sys_blocksize - 1; 10694 10695 if (uio->uio_loffset & ((offset_t)(secmask))) { 10696 SD_ERROR(SD_LOG_READ_WRITE, un, 10697 "sdaread: file offset not modulo %d\n", 10698 un->un_sys_blocksize); 10699 err = EINVAL; 10700 } else if (uio->uio_iov->iov_len & (secmask)) { 10701 SD_ERROR(SD_LOG_READ_WRITE, un, 10702 "sdaread: transfer length not modulo %d\n", 10703 un->un_sys_blocksize); 10704 err = EINVAL; 10705 } else { 10706 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10707 } 10708 10709 return (err); 10710 } 10711 10712 10713 /* 10714 * Function: sdawrite 10715 * 10716 * Description: Driver's awrite(9e) entry point function. 10717 * 10718 * Arguments: dev - device number 10719 * aio - structure pointer describing where data is stored 10720 * cred_p - user credential pointer 10721 * 10722 * Return Code: ENXIO 10723 * EIO 10724 * EINVAL 10725 * value returned by aphysio 10726 * 10727 * Context: Kernel thread context. 10728 */ 10729 /* ARGSUSED */ 10730 static int 10731 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10732 { 10733 struct sd_lun *un = NULL; 10734 struct uio *uio = aio->aio_uio; 10735 int secmask; 10736 int err = 0; 10737 sd_ssc_t *ssc; 10738 10739 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10740 return (ENXIO); 10741 } 10742 10743 ASSERT(!mutex_owned(SD_MUTEX(un))); 10744 10745 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10746 mutex_enter(SD_MUTEX(un)); 10747 /* 10748 * Because the call to sd_ready_and_valid will issue I/O we 10749 * must wait here if either the device is suspended or 10750 * if it's power level is changing. 10751 */ 10752 while ((un->un_state == SD_STATE_SUSPENDED) || 10753 (un->un_state == SD_STATE_PM_CHANGING)) { 10754 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10755 } 10756 un->un_ncmds_in_driver++; 10757 mutex_exit(SD_MUTEX(un)); 10758 10759 /* Initialize sd_ssc_t for internal uscsi commands */ 10760 ssc = sd_ssc_init(un); 10761 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10762 err = EIO; 10763 } else { 10764 err = 0; 10765 } 10766 sd_ssc_fini(ssc); 10767 10768 mutex_enter(SD_MUTEX(un)); 10769 un->un_ncmds_in_driver--; 10770 ASSERT(un->un_ncmds_in_driver >= 0); 10771 mutex_exit(SD_MUTEX(un)); 10772 if (err != 0) 10773 return (err); 10774 } 10775 10776 /* 10777 * Write requests are restricted to multiples of the system block size. 10778 */ 10779 secmask = un->un_sys_blocksize - 1; 10780 10781 if (uio->uio_loffset & ((offset_t)(secmask))) { 10782 SD_ERROR(SD_LOG_READ_WRITE, un, 10783 "sdawrite: file offset not modulo %d\n", 10784 un->un_sys_blocksize); 10785 err = EINVAL; 10786 } else if (uio->uio_iov->iov_len & (secmask)) { 10787 SD_ERROR(SD_LOG_READ_WRITE, un, 10788 "sdawrite: transfer length not modulo %d\n", 10789 un->un_sys_blocksize); 10790 err = EINVAL; 10791 } else { 10792 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10793 } 10794 10795 return (err); 10796 } 10797 10798 10799 10800 10801 10802 /* 10803 * Driver IO processing follows the following sequence: 10804 * 10805 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10806 * | | ^ 10807 * v v | 10808 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10809 * | | | | 10810 * v | | | 10811 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10812 * | | ^ ^ 10813 * v v | | 10814 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10815 * | | | | 10816 * +---+ | +------------+ +-------+ 10817 * | | | | 10818 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10819 * | v | | 10820 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10821 * | | ^ | 10822 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10823 * | v | | 10824 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10825 * | | ^ | 10826 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10827 * | v | | 10828 * | sd_checksum_iostart() sd_checksum_iodone() | 10829 * | | ^ | 10830 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10831 * | v | | 10832 * | sd_pm_iostart() sd_pm_iodone() | 10833 * | | ^ | 10834 * | | | | 10835 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10836 * | ^ 10837 * v | 10838 * sd_core_iostart() | 10839 * | | 10840 * | +------>(*destroypkt)() 10841 * +-> sd_start_cmds() <-+ | | 10842 * | | | v 10843 * | | | scsi_destroy_pkt(9F) 10844 * | | | 10845 * +->(*initpkt)() +- sdintr() 10846 * | | | | 10847 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10848 * | +-> scsi_setup_cdb(9F) | 10849 * | | 10850 * +--> scsi_transport(9F) | 10851 * | | 10852 * +----> SCSA ---->+ 10853 * 10854 * 10855 * This code is based upon the following presumptions: 10856 * 10857 * - iostart and iodone functions operate on buf(9S) structures. These 10858 * functions perform the necessary operations on the buf(9S) and pass 10859 * them along to the next function in the chain by using the macros 10860 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10861 * (for iodone side functions). 10862 * 10863 * - The iostart side functions may sleep. The iodone side functions 10864 * are called under interrupt context and may NOT sleep. Therefore 10865 * iodone side functions also may not call iostart side functions. 10866 * (NOTE: iostart side functions should NOT sleep for memory, as 10867 * this could result in deadlock.) 10868 * 10869 * - An iostart side function may call its corresponding iodone side 10870 * function directly (if necessary). 10871 * 10872 * - In the event of an error, an iostart side function can return a buf(9S) 10873 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10874 * b_error in the usual way of course). 10875 * 10876 * - The taskq mechanism may be used by the iodone side functions to dispatch 10877 * requests to the iostart side functions. The iostart side functions in 10878 * this case would be called under the context of a taskq thread, so it's 10879 * OK for them to block/sleep/spin in this case. 10880 * 10881 * - iostart side functions may allocate "shadow" buf(9S) structs and 10882 * pass them along to the next function in the chain. The corresponding 10883 * iodone side functions must coalesce the "shadow" bufs and return 10884 * the "original" buf to the next higher layer. 10885 * 10886 * - The b_private field of the buf(9S) struct holds a pointer to 10887 * an sd_xbuf struct, which contains information needed to 10888 * construct the scsi_pkt for the command. 10889 * 10890 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10891 * layer must acquire & release the SD_MUTEX(un) as needed. 10892 */ 10893 10894 10895 /* 10896 * Create taskq for all targets in the system. This is created at 10897 * _init(9E) and destroyed at _fini(9E). 10898 * 10899 * Note: here we set the minalloc to a reasonably high number to ensure that 10900 * we will have an adequate supply of task entries available at interrupt time. 10901 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10902 * sd_create_taskq(). Since we do not want to sleep for allocations at 10903 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10904 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10905 * requests any one instant in time. 10906 */ 10907 #define SD_TASKQ_NUMTHREADS 8 10908 #define SD_TASKQ_MINALLOC 256 10909 #define SD_TASKQ_MAXALLOC 256 10910 10911 static taskq_t *sd_tq = NULL; 10912 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10913 10914 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10915 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10916 10917 /* 10918 * The following task queue is being created for the write part of 10919 * read-modify-write of non-512 block size devices. 10920 * Limit the number of threads to 1 for now. This number has been chosen 10921 * considering the fact that it applies only to dvd ram drives/MO drives 10922 * currently. Performance for which is not main criteria at this stage. 10923 * Note: It needs to be explored if we can use a single taskq in future 10924 */ 10925 #define SD_WMR_TASKQ_NUMTHREADS 1 10926 static taskq_t *sd_wmr_tq = NULL; 10927 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10928 10929 /* 10930 * Function: sd_taskq_create 10931 * 10932 * Description: Create taskq thread(s) and preallocate task entries 10933 * 10934 * Return Code: Returns a pointer to the allocated taskq_t. 10935 * 10936 * Context: Can sleep. Requires blockable context. 10937 * 10938 * Notes: - The taskq() facility currently is NOT part of the DDI. 10939 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10940 * - taskq_create() will block for memory, also it will panic 10941 * if it cannot create the requested number of threads. 10942 * - Currently taskq_create() creates threads that cannot be 10943 * swapped. 10944 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10945 * supply of taskq entries at interrupt time (ie, so that we 10946 * do not have to sleep for memory) 10947 */ 10948 10949 static void 10950 sd_taskq_create(void) 10951 { 10952 char taskq_name[TASKQ_NAMELEN]; 10953 10954 ASSERT(sd_tq == NULL); 10955 ASSERT(sd_wmr_tq == NULL); 10956 10957 (void) snprintf(taskq_name, sizeof (taskq_name), 10958 "%s_drv_taskq", sd_label); 10959 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10960 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10961 TASKQ_PREPOPULATE)); 10962 10963 (void) snprintf(taskq_name, sizeof (taskq_name), 10964 "%s_rmw_taskq", sd_label); 10965 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10966 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10967 TASKQ_PREPOPULATE)); 10968 } 10969 10970 10971 /* 10972 * Function: sd_taskq_delete 10973 * 10974 * Description: Complementary cleanup routine for sd_taskq_create(). 10975 * 10976 * Context: Kernel thread context. 10977 */ 10978 10979 static void 10980 sd_taskq_delete(void) 10981 { 10982 ASSERT(sd_tq != NULL); 10983 ASSERT(sd_wmr_tq != NULL); 10984 taskq_destroy(sd_tq); 10985 taskq_destroy(sd_wmr_tq); 10986 sd_tq = NULL; 10987 sd_wmr_tq = NULL; 10988 } 10989 10990 10991 /* 10992 * Function: sdstrategy 10993 * 10994 * Description: Driver's strategy (9E) entry point function. 10995 * 10996 * Arguments: bp - pointer to buf(9S) 10997 * 10998 * Return Code: Always returns zero 10999 * 11000 * Context: Kernel thread context. 11001 */ 11002 11003 static int 11004 sdstrategy(struct buf *bp) 11005 { 11006 struct sd_lun *un; 11007 11008 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11009 if (un == NULL) { 11010 bioerror(bp, EIO); 11011 bp->b_resid = bp->b_bcount; 11012 biodone(bp); 11013 return (0); 11014 } 11015 /* As was done in the past, fail new cmds. if state is dumping. */ 11016 if (un->un_state == SD_STATE_DUMPING) { 11017 bioerror(bp, ENXIO); 11018 bp->b_resid = bp->b_bcount; 11019 biodone(bp); 11020 return (0); 11021 } 11022 11023 ASSERT(!mutex_owned(SD_MUTEX(un))); 11024 11025 /* 11026 * Commands may sneak in while we released the mutex in 11027 * DDI_SUSPEND, we should block new commands. However, old 11028 * commands that are still in the driver at this point should 11029 * still be allowed to drain. 11030 */ 11031 mutex_enter(SD_MUTEX(un)); 11032 /* 11033 * Must wait here if either the device is suspended or 11034 * if it's power level is changing. 11035 */ 11036 while ((un->un_state == SD_STATE_SUSPENDED) || 11037 (un->un_state == SD_STATE_PM_CHANGING)) { 11038 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11039 } 11040 11041 un->un_ncmds_in_driver++; 11042 11043 /* 11044 * atapi: Since we are running the CD for now in PIO mode we need to 11045 * call bp_mapin here to avoid bp_mapin called interrupt context under 11046 * the HBA's init_pkt routine. 11047 */ 11048 if (un->un_f_cfg_is_atapi == TRUE) { 11049 mutex_exit(SD_MUTEX(un)); 11050 bp_mapin(bp); 11051 mutex_enter(SD_MUTEX(un)); 11052 } 11053 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11054 un->un_ncmds_in_driver); 11055 11056 if (bp->b_flags & B_WRITE) 11057 un->un_f_sync_cache_required = TRUE; 11058 11059 mutex_exit(SD_MUTEX(un)); 11060 11061 /* 11062 * This will (eventually) allocate the sd_xbuf area and 11063 * call sd_xbuf_strategy(). We just want to return the 11064 * result of ddi_xbuf_qstrategy so that we have an opt- 11065 * imized tail call which saves us a stack frame. 11066 */ 11067 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11068 } 11069 11070 11071 /* 11072 * Function: sd_xbuf_strategy 11073 * 11074 * Description: Function for initiating IO operations via the 11075 * ddi_xbuf_qstrategy() mechanism. 11076 * 11077 * Context: Kernel thread context. 11078 */ 11079 11080 static void 11081 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11082 { 11083 struct sd_lun *un = arg; 11084 11085 ASSERT(bp != NULL); 11086 ASSERT(xp != NULL); 11087 ASSERT(un != NULL); 11088 ASSERT(!mutex_owned(SD_MUTEX(un))); 11089 11090 /* 11091 * Initialize the fields in the xbuf and save a pointer to the 11092 * xbuf in bp->b_private. 11093 */ 11094 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11095 11096 /* Send the buf down the iostart chain */ 11097 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11098 } 11099 11100 11101 /* 11102 * Function: sd_xbuf_init 11103 * 11104 * Description: Prepare the given sd_xbuf struct for use. 11105 * 11106 * Arguments: un - ptr to softstate 11107 * bp - ptr to associated buf(9S) 11108 * xp - ptr to associated sd_xbuf 11109 * chain_type - IO chain type to use: 11110 * SD_CHAIN_NULL 11111 * SD_CHAIN_BUFIO 11112 * SD_CHAIN_USCSI 11113 * SD_CHAIN_DIRECT 11114 * SD_CHAIN_DIRECT_PRIORITY 11115 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11116 * initialization; may be NULL if none. 11117 * 11118 * Context: Kernel thread context 11119 */ 11120 11121 static void 11122 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11123 uchar_t chain_type, void *pktinfop) 11124 { 11125 int index; 11126 11127 ASSERT(un != NULL); 11128 ASSERT(bp != NULL); 11129 ASSERT(xp != NULL); 11130 11131 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11132 bp, chain_type); 11133 11134 xp->xb_un = un; 11135 xp->xb_pktp = NULL; 11136 xp->xb_pktinfo = pktinfop; 11137 xp->xb_private = bp->b_private; 11138 xp->xb_blkno = (daddr_t)bp->b_blkno; 11139 11140 /* 11141 * Set up the iostart and iodone chain indexes in the xbuf, based 11142 * upon the specified chain type to use. 11143 */ 11144 switch (chain_type) { 11145 case SD_CHAIN_NULL: 11146 /* 11147 * Fall thru to just use the values for the buf type, even 11148 * tho for the NULL chain these values will never be used. 11149 */ 11150 /* FALLTHRU */ 11151 case SD_CHAIN_BUFIO: 11152 index = un->un_buf_chain_type; 11153 break; 11154 case SD_CHAIN_USCSI: 11155 index = un->un_uscsi_chain_type; 11156 break; 11157 case SD_CHAIN_DIRECT: 11158 index = un->un_direct_chain_type; 11159 break; 11160 case SD_CHAIN_DIRECT_PRIORITY: 11161 index = un->un_priority_chain_type; 11162 break; 11163 default: 11164 /* We're really broken if we ever get here... */ 11165 panic("sd_xbuf_init: illegal chain type!"); 11166 /*NOTREACHED*/ 11167 } 11168 11169 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11170 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11171 11172 /* 11173 * It might be a bit easier to simply bzero the entire xbuf above, 11174 * but it turns out that since we init a fair number of members anyway, 11175 * we save a fair number cycles by doing explicit assignment of zero. 11176 */ 11177 xp->xb_pkt_flags = 0; 11178 xp->xb_dma_resid = 0; 11179 xp->xb_retry_count = 0; 11180 xp->xb_victim_retry_count = 0; 11181 xp->xb_ua_retry_count = 0; 11182 xp->xb_nr_retry_count = 0; 11183 xp->xb_sense_bp = NULL; 11184 xp->xb_sense_status = 0; 11185 xp->xb_sense_state = 0; 11186 xp->xb_sense_resid = 0; 11187 xp->xb_ena = 0; 11188 11189 bp->b_private = xp; 11190 bp->b_flags &= ~(B_DONE | B_ERROR); 11191 bp->b_resid = 0; 11192 bp->av_forw = NULL; 11193 bp->av_back = NULL; 11194 bioerror(bp, 0); 11195 11196 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11197 } 11198 11199 11200 /* 11201 * Function: sd_uscsi_strategy 11202 * 11203 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11204 * 11205 * Arguments: bp - buf struct ptr 11206 * 11207 * Return Code: Always returns 0 11208 * 11209 * Context: Kernel thread context 11210 */ 11211 11212 static int 11213 sd_uscsi_strategy(struct buf *bp) 11214 { 11215 struct sd_lun *un; 11216 struct sd_uscsi_info *uip; 11217 struct sd_xbuf *xp; 11218 uchar_t chain_type; 11219 uchar_t cmd; 11220 11221 ASSERT(bp != NULL); 11222 11223 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11224 if (un == NULL) { 11225 bioerror(bp, EIO); 11226 bp->b_resid = bp->b_bcount; 11227 biodone(bp); 11228 return (0); 11229 } 11230 11231 ASSERT(!mutex_owned(SD_MUTEX(un))); 11232 11233 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11234 11235 /* 11236 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11237 */ 11238 ASSERT(bp->b_private != NULL); 11239 uip = (struct sd_uscsi_info *)bp->b_private; 11240 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11241 11242 mutex_enter(SD_MUTEX(un)); 11243 /* 11244 * atapi: Since we are running the CD for now in PIO mode we need to 11245 * call bp_mapin here to avoid bp_mapin called interrupt context under 11246 * the HBA's init_pkt routine. 11247 */ 11248 if (un->un_f_cfg_is_atapi == TRUE) { 11249 mutex_exit(SD_MUTEX(un)); 11250 bp_mapin(bp); 11251 mutex_enter(SD_MUTEX(un)); 11252 } 11253 un->un_ncmds_in_driver++; 11254 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11255 un->un_ncmds_in_driver); 11256 11257 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11258 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11259 un->un_f_sync_cache_required = TRUE; 11260 11261 mutex_exit(SD_MUTEX(un)); 11262 11263 switch (uip->ui_flags) { 11264 case SD_PATH_DIRECT: 11265 chain_type = SD_CHAIN_DIRECT; 11266 break; 11267 case SD_PATH_DIRECT_PRIORITY: 11268 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11269 break; 11270 default: 11271 chain_type = SD_CHAIN_USCSI; 11272 break; 11273 } 11274 11275 /* 11276 * We may allocate extra buf for external USCSI commands. If the 11277 * application asks for bigger than 20-byte sense data via USCSI, 11278 * SCSA layer will allocate 252 bytes sense buf for that command. 11279 */ 11280 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11281 SENSE_LENGTH) { 11282 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11283 MAX_SENSE_LENGTH, KM_SLEEP); 11284 } else { 11285 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11286 } 11287 11288 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11289 11290 /* Use the index obtained within xbuf_init */ 11291 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11292 11293 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11294 11295 return (0); 11296 } 11297 11298 /* 11299 * Function: sd_send_scsi_cmd 11300 * 11301 * Description: Runs a USCSI command for user (when called thru sdioctl), 11302 * or for the driver 11303 * 11304 * Arguments: dev - the dev_t for the device 11305 * incmd - ptr to a valid uscsi_cmd struct 11306 * flag - bit flag, indicating open settings, 32/64 bit type 11307 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11308 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11309 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11310 * to use the USCSI "direct" chain and bypass the normal 11311 * command waitq. 11312 * 11313 * Return Code: 0 - successful completion of the given command 11314 * EIO - scsi_uscsi_handle_command() failed 11315 * ENXIO - soft state not found for specified dev 11316 * EINVAL 11317 * EFAULT - copyin/copyout error 11318 * return code of scsi_uscsi_handle_command(): 11319 * EIO 11320 * ENXIO 11321 * EACCES 11322 * 11323 * Context: Waits for command to complete. Can sleep. 11324 */ 11325 11326 static int 11327 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11328 enum uio_seg dataspace, int path_flag) 11329 { 11330 struct sd_lun *un; 11331 sd_ssc_t *ssc; 11332 int rval; 11333 11334 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11335 if (un == NULL) { 11336 return (ENXIO); 11337 } 11338 11339 /* 11340 * Using sd_ssc_send to handle uscsi cmd 11341 */ 11342 ssc = sd_ssc_init(un); 11343 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11344 sd_ssc_fini(ssc); 11345 11346 return (rval); 11347 } 11348 11349 /* 11350 * Function: sd_ssc_init 11351 * 11352 * Description: Uscsi end-user call this function to initialize necessary 11353 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11354 * 11355 * The return value of sd_send_scsi_cmd will be treated as a 11356 * fault in various conditions. Even it is not Zero, some 11357 * callers may ignore the return value. That is to say, we can 11358 * not make an accurate assessment in sdintr, since if a 11359 * command is failed in sdintr it does not mean the caller of 11360 * sd_send_scsi_cmd will treat it as a real failure. 11361 * 11362 * To avoid printing too many error logs for a failed uscsi 11363 * packet that the caller may not treat it as a failure, the 11364 * sd will keep silent for handling all uscsi commands. 11365 * 11366 * During detach->attach and attach-open, for some types of 11367 * problems, the driver should be providing information about 11368 * the problem encountered. Device use USCSI_SILENT, which 11369 * suppresses all driver information. The result is that no 11370 * information about the problem is available. Being 11371 * completely silent during this time is inappropriate. The 11372 * driver needs a more selective filter than USCSI_SILENT, so 11373 * that information related to faults is provided. 11374 * 11375 * To make the accurate accessment, the caller of 11376 * sd_send_scsi_USCSI_CMD should take the ownership and 11377 * get necessary information to print error messages. 11378 * 11379 * If we want to print necessary info of uscsi command, we need to 11380 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11381 * assessment. We use sd_ssc_init to alloc necessary 11382 * structs for sending an uscsi command and we are also 11383 * responsible for free the memory by calling 11384 * sd_ssc_fini. 11385 * 11386 * The calling secquences will look like: 11387 * sd_ssc_init-> 11388 * 11389 * ... 11390 * 11391 * sd_send_scsi_USCSI_CMD-> 11392 * sd_ssc_send-> - - - sdintr 11393 * ... 11394 * 11395 * if we think the return value should be treated as a 11396 * failure, we make the accessment here and print out 11397 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11398 * 11399 * ... 11400 * 11401 * sd_ssc_fini 11402 * 11403 * 11404 * Arguments: un - pointer to driver soft state (unit) structure for this 11405 * target. 11406 * 11407 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11408 * uscsi_cmd and sd_uscsi_info. 11409 * NULL - if can not alloc memory for sd_ssc_t struct 11410 * 11411 * Context: Kernel Thread. 11412 */ 11413 static sd_ssc_t * 11414 sd_ssc_init(struct sd_lun *un) 11415 { 11416 sd_ssc_t *ssc; 11417 struct uscsi_cmd *ucmdp; 11418 struct sd_uscsi_info *uip; 11419 11420 ASSERT(un != NULL); 11421 ASSERT(!mutex_owned(SD_MUTEX(un))); 11422 11423 /* 11424 * Allocate sd_ssc_t structure 11425 */ 11426 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11427 11428 /* 11429 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11430 */ 11431 ucmdp = scsi_uscsi_alloc(); 11432 11433 /* 11434 * Allocate sd_uscsi_info structure 11435 */ 11436 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11437 11438 ssc->ssc_uscsi_cmd = ucmdp; 11439 ssc->ssc_uscsi_info = uip; 11440 ssc->ssc_un = un; 11441 11442 return (ssc); 11443 } 11444 11445 /* 11446 * Function: sd_ssc_fini 11447 * 11448 * Description: To free sd_ssc_t and it's hanging off 11449 * 11450 * Arguments: ssc - struct pointer of sd_ssc_t. 11451 */ 11452 static void 11453 sd_ssc_fini(sd_ssc_t *ssc) 11454 { 11455 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11456 11457 if (ssc->ssc_uscsi_info != NULL) { 11458 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11459 ssc->ssc_uscsi_info = NULL; 11460 } 11461 11462 kmem_free(ssc, sizeof (sd_ssc_t)); 11463 ssc = NULL; 11464 } 11465 11466 /* 11467 * Function: sd_ssc_send 11468 * 11469 * Description: Runs a USCSI command for user when called through sdioctl, 11470 * or for the driver. 11471 * 11472 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11473 * sd_uscsi_info in. 11474 * incmd - ptr to a valid uscsi_cmd struct 11475 * flag - bit flag, indicating open settings, 32/64 bit type 11476 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11477 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11478 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11479 * to use the USCSI "direct" chain and bypass the normal 11480 * command waitq. 11481 * 11482 * Return Code: 0 - successful completion of the given command 11483 * EIO - scsi_uscsi_handle_command() failed 11484 * ENXIO - soft state not found for specified dev 11485 * EINVAL 11486 * EFAULT - copyin/copyout error 11487 * return code of scsi_uscsi_handle_command(): 11488 * EIO 11489 * ENXIO 11490 * EACCES 11491 * 11492 * Context: Kernel Thread; 11493 * Waits for command to complete. Can sleep. 11494 */ 11495 static int 11496 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11497 enum uio_seg dataspace, int path_flag) 11498 { 11499 struct sd_uscsi_info *uip; 11500 struct uscsi_cmd *uscmd; 11501 struct sd_lun *un; 11502 dev_t dev; 11503 11504 int format = 0; 11505 int rval; 11506 11507 ASSERT(ssc != NULL); 11508 un = ssc->ssc_un; 11509 ASSERT(un != NULL); 11510 uscmd = ssc->ssc_uscsi_cmd; 11511 ASSERT(uscmd != NULL); 11512 ASSERT(!mutex_owned(SD_MUTEX(un))); 11513 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11514 /* 11515 * If enter here, it indicates that the previous uscsi 11516 * command has not been processed by sd_ssc_assessment. 11517 * This is violating our rules of FMA telemetry processing. 11518 * We should print out this message and the last undisposed 11519 * uscsi command. 11520 */ 11521 if (uscmd->uscsi_cdb != NULL) { 11522 SD_INFO(SD_LOG_SDTEST, un, 11523 "sd_ssc_send is missing the alternative " 11524 "sd_ssc_assessment when running command 0x%x.\n", 11525 uscmd->uscsi_cdb[0]); 11526 } 11527 /* 11528 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11529 * the initial status. 11530 */ 11531 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11532 } 11533 11534 /* 11535 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11536 * followed to avoid missing FMA telemetries. 11537 */ 11538 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11539 11540 #ifdef SDDEBUG 11541 switch (dataspace) { 11542 case UIO_USERSPACE: 11543 SD_TRACE(SD_LOG_IO, un, 11544 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11545 break; 11546 case UIO_SYSSPACE: 11547 SD_TRACE(SD_LOG_IO, un, 11548 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11549 break; 11550 default: 11551 SD_TRACE(SD_LOG_IO, un, 11552 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11553 break; 11554 } 11555 #endif 11556 11557 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11558 SD_ADDRESS(un), &uscmd); 11559 if (rval != 0) { 11560 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11561 "scsi_uscsi_alloc_and_copyin failed\n", un); 11562 return (rval); 11563 } 11564 11565 if ((uscmd->uscsi_cdb != NULL) && 11566 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11567 mutex_enter(SD_MUTEX(un)); 11568 un->un_f_format_in_progress = TRUE; 11569 mutex_exit(SD_MUTEX(un)); 11570 format = 1; 11571 } 11572 11573 /* 11574 * Allocate an sd_uscsi_info struct and fill it with the info 11575 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11576 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11577 * since we allocate the buf here in this function, we do not 11578 * need to preserve the prior contents of b_private. 11579 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11580 */ 11581 uip = ssc->ssc_uscsi_info; 11582 uip->ui_flags = path_flag; 11583 uip->ui_cmdp = uscmd; 11584 11585 /* 11586 * Commands sent with priority are intended for error recovery 11587 * situations, and do not have retries performed. 11588 */ 11589 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11590 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11591 } 11592 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11593 11594 dev = SD_GET_DEV(un); 11595 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11596 sd_uscsi_strategy, NULL, uip); 11597 11598 /* 11599 * mark ssc_flags right after handle_cmd to make sure 11600 * the uscsi has been sent 11601 */ 11602 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11603 11604 #ifdef SDDEBUG 11605 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11606 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11607 uscmd->uscsi_status, uscmd->uscsi_resid); 11608 if (uscmd->uscsi_bufaddr != NULL) { 11609 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11610 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11611 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11612 if (dataspace == UIO_SYSSPACE) { 11613 SD_DUMP_MEMORY(un, SD_LOG_IO, 11614 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11615 uscmd->uscsi_buflen, SD_LOG_HEX); 11616 } 11617 } 11618 #endif 11619 11620 if (format == 1) { 11621 mutex_enter(SD_MUTEX(un)); 11622 un->un_f_format_in_progress = FALSE; 11623 mutex_exit(SD_MUTEX(un)); 11624 } 11625 11626 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11627 11628 return (rval); 11629 } 11630 11631 /* 11632 * Function: sd_ssc_print 11633 * 11634 * Description: Print information available to the console. 11635 * 11636 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11637 * sd_uscsi_info in. 11638 * sd_severity - log level. 11639 * Context: Kernel thread or interrupt context. 11640 */ 11641 static void 11642 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11643 { 11644 struct uscsi_cmd *ucmdp; 11645 struct scsi_device *devp; 11646 dev_info_t *devinfo; 11647 uchar_t *sensep; 11648 int senlen; 11649 union scsi_cdb *cdbp; 11650 uchar_t com; 11651 extern struct scsi_key_strings scsi_cmds[]; 11652 11653 ASSERT(ssc != NULL); 11654 ASSERT(ssc->ssc_un != NULL); 11655 11656 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 11657 return; 11658 ucmdp = ssc->ssc_uscsi_cmd; 11659 devp = SD_SCSI_DEVP(ssc->ssc_un); 11660 devinfo = SD_DEVINFO(ssc->ssc_un); 11661 ASSERT(ucmdp != NULL); 11662 ASSERT(devp != NULL); 11663 ASSERT(devinfo != NULL); 11664 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11665 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11666 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11667 11668 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11669 if (cdbp == NULL) 11670 return; 11671 /* We don't print log if no sense data available. */ 11672 if (senlen == 0) 11673 sensep = NULL; 11674 com = cdbp->scc_cmd; 11675 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11676 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11677 } 11678 11679 /* 11680 * Function: sd_ssc_assessment 11681 * 11682 * Description: We use this function to make an assessment at the point 11683 * where SD driver may encounter a potential error. 11684 * 11685 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11686 * sd_uscsi_info in. 11687 * tp_assess - a hint of strategy for ereport posting. 11688 * Possible values of tp_assess include: 11689 * SD_FMT_IGNORE - we don't post any ereport because we're 11690 * sure that it is ok to ignore the underlying problems. 11691 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11692 * but it might be not correct to ignore the underlying hardware 11693 * error. 11694 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11695 * payload driver-assessment of value "fail" or 11696 * "fatal"(depending on what information we have here). This 11697 * assessment value is usually set when SD driver think there 11698 * is a potential error occurred(Typically, when return value 11699 * of the SCSI command is EIO). 11700 * SD_FMT_STANDARD - we will post an ereport with the payload 11701 * driver-assessment of value "info". This assessment value is 11702 * set when the SCSI command returned successfully and with 11703 * sense data sent back. 11704 * 11705 * Context: Kernel thread. 11706 */ 11707 static void 11708 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11709 { 11710 int senlen = 0; 11711 struct uscsi_cmd *ucmdp = NULL; 11712 struct sd_lun *un; 11713 11714 ASSERT(ssc != NULL); 11715 un = ssc->ssc_un; 11716 ASSERT(un != NULL); 11717 ucmdp = ssc->ssc_uscsi_cmd; 11718 ASSERT(ucmdp != NULL); 11719 11720 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11721 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11722 } else { 11723 /* 11724 * If enter here, it indicates that we have a wrong 11725 * calling sequence of sd_ssc_send and sd_ssc_assessment, 11726 * both of which should be called in a pair in case of 11727 * loss of FMA telemetries. 11728 */ 11729 if (ucmdp->uscsi_cdb != NULL) { 11730 SD_INFO(SD_LOG_SDTEST, un, 11731 "sd_ssc_assessment is missing the " 11732 "alternative sd_ssc_send when running 0x%x, " 11733 "or there are superfluous sd_ssc_assessment for " 11734 "the same sd_ssc_send.\n", 11735 ucmdp->uscsi_cdb[0]); 11736 } 11737 /* 11738 * Set the ssc_flags to the initial value to avoid passing 11739 * down dirty flags to the following sd_ssc_send function. 11740 */ 11741 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11742 return; 11743 } 11744 11745 /* 11746 * Only handle an issued command which is waiting for assessment. 11747 * A command which is not issued will not have 11748 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 11749 */ 11750 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11751 sd_ssc_print(ssc, SCSI_ERR_INFO); 11752 return; 11753 } else { 11754 /* 11755 * For an issued command, we should clear this flag in 11756 * order to make the sd_ssc_t structure be used off 11757 * multiple uscsi commands. 11758 */ 11759 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11760 } 11761 11762 /* 11763 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 11764 * commands here. And we should clear the ssc_flags before return. 11765 */ 11766 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11767 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11768 return; 11769 } 11770 11771 switch (tp_assess) { 11772 case SD_FMT_IGNORE: 11773 case SD_FMT_IGNORE_COMPROMISE: 11774 break; 11775 case SD_FMT_STATUS_CHECK: 11776 /* 11777 * For a failed command(including the succeeded command 11778 * with invalid data sent back). 11779 */ 11780 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11781 break; 11782 case SD_FMT_STANDARD: 11783 /* 11784 * Always for the succeeded commands probably with sense 11785 * data sent back. 11786 * Limitation: 11787 * We can only handle a succeeded command with sense 11788 * data sent back when auto-request-sense is enabled. 11789 */ 11790 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11791 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11792 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11793 (un->un_f_arq_enabled == TRUE) && 11794 senlen > 0 && 11795 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11796 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11797 } 11798 break; 11799 default: 11800 /* 11801 * Should not have other type of assessment. 11802 */ 11803 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11804 "sd_ssc_assessment got wrong " 11805 "sd_type_assessment %d.\n", tp_assess); 11806 break; 11807 } 11808 /* 11809 * Clear up the ssc_flags before return. 11810 */ 11811 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11812 } 11813 11814 /* 11815 * Function: sd_ssc_post 11816 * 11817 * Description: 1. read the driver property to get fm-scsi-log flag. 11818 * 2. print log if fm_log_capable is non-zero. 11819 * 3. call sd_ssc_ereport_post to post ereport if possible. 11820 * 11821 * Context: May be called from kernel thread or interrupt context. 11822 */ 11823 static void 11824 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11825 { 11826 struct sd_lun *un; 11827 int sd_severity; 11828 11829 ASSERT(ssc != NULL); 11830 un = ssc->ssc_un; 11831 ASSERT(un != NULL); 11832 11833 /* 11834 * We may enter here from sd_ssc_assessment(for USCSI command) or 11835 * by directly called from sdintr context. 11836 * We don't handle a non-disk drive(CD-ROM, removable media). 11837 * Clear the ssc_flags before return in case we've set 11838 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 11839 * driver. 11840 */ 11841 if (ISCD(un) || un->un_f_has_removable_media) { 11842 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11843 return; 11844 } 11845 11846 switch (sd_assess) { 11847 case SD_FM_DRV_FATAL: 11848 sd_severity = SCSI_ERR_FATAL; 11849 break; 11850 case SD_FM_DRV_RECOVERY: 11851 sd_severity = SCSI_ERR_RECOVERED; 11852 break; 11853 case SD_FM_DRV_RETRY: 11854 sd_severity = SCSI_ERR_RETRYABLE; 11855 break; 11856 case SD_FM_DRV_NOTICE: 11857 sd_severity = SCSI_ERR_INFO; 11858 break; 11859 default: 11860 sd_severity = SCSI_ERR_UNKNOWN; 11861 } 11862 /* print log */ 11863 sd_ssc_print(ssc, sd_severity); 11864 11865 /* always post ereport */ 11866 sd_ssc_ereport_post(ssc, sd_assess); 11867 } 11868 11869 /* 11870 * Function: sd_ssc_set_info 11871 * 11872 * Description: Mark ssc_flags and set ssc_info which would be the 11873 * payload of uderr ereport. This function will cause 11874 * sd_ssc_ereport_post to post uderr ereport only. 11875 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 11876 * the function will also call SD_ERROR or scsi_log for a 11877 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 11878 * 11879 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11880 * sd_uscsi_info in. 11881 * ssc_flags - indicate the sub-category of a uderr. 11882 * comp - this argument is meaningful only when 11883 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 11884 * values include: 11885 * > 0, SD_ERROR is used with comp as the driver logging 11886 * component; 11887 * = 0, scsi-log is used to log error telemetries; 11888 * < 0, no log available for this telemetry. 11889 * 11890 * Context: Kernel thread or interrupt context 11891 */ 11892 static void 11893 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 11894 { 11895 va_list ap; 11896 11897 ASSERT(ssc != NULL); 11898 ASSERT(ssc->ssc_un != NULL); 11899 11900 ssc->ssc_flags |= ssc_flags; 11901 va_start(ap, fmt); 11902 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11903 va_end(ap); 11904 11905 /* 11906 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 11907 * with invalid data sent back. For non-uscsi command, the 11908 * following code will be bypassed. 11909 */ 11910 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 11911 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 11912 /* 11913 * If the error belong to certain component and we 11914 * do not want it to show up on the console, we 11915 * will use SD_ERROR, otherwise scsi_log is 11916 * preferred. 11917 */ 11918 if (comp > 0) { 11919 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 11920 } else if (comp == 0) { 11921 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 11922 CE_WARN, ssc->ssc_info); 11923 } 11924 } 11925 } 11926 } 11927 11928 /* 11929 * Function: sd_buf_iodone 11930 * 11931 * Description: Frees the sd_xbuf & returns the buf to its originator. 11932 * 11933 * Context: May be called from interrupt context. 11934 */ 11935 /* ARGSUSED */ 11936 static void 11937 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11938 { 11939 struct sd_xbuf *xp; 11940 11941 ASSERT(un != NULL); 11942 ASSERT(bp != NULL); 11943 ASSERT(!mutex_owned(SD_MUTEX(un))); 11944 11945 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11946 11947 xp = SD_GET_XBUF(bp); 11948 ASSERT(xp != NULL); 11949 11950 /* xbuf is gone after this */ 11951 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 11952 mutex_enter(SD_MUTEX(un)); 11953 11954 /* 11955 * Grab time when the cmd completed. 11956 * This is used for determining if the system has been 11957 * idle long enough to make it idle to the PM framework. 11958 * This is for lowering the overhead, and therefore improving 11959 * performance per I/O operation. 11960 */ 11961 un->un_pm_idle_time = ddi_get_time(); 11962 11963 un->un_ncmds_in_driver--; 11964 ASSERT(un->un_ncmds_in_driver >= 0); 11965 SD_INFO(SD_LOG_IO, un, 11966 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11967 un->un_ncmds_in_driver); 11968 11969 mutex_exit(SD_MUTEX(un)); 11970 } 11971 11972 biodone(bp); /* bp is gone after this */ 11973 11974 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11975 } 11976 11977 11978 /* 11979 * Function: sd_uscsi_iodone 11980 * 11981 * Description: Frees the sd_xbuf & returns the buf to its originator. 11982 * 11983 * Context: May be called from interrupt context. 11984 */ 11985 /* ARGSUSED */ 11986 static void 11987 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11988 { 11989 struct sd_xbuf *xp; 11990 11991 ASSERT(un != NULL); 11992 ASSERT(bp != NULL); 11993 11994 xp = SD_GET_XBUF(bp); 11995 ASSERT(xp != NULL); 11996 ASSERT(!mutex_owned(SD_MUTEX(un))); 11997 11998 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11999 12000 bp->b_private = xp->xb_private; 12001 12002 mutex_enter(SD_MUTEX(un)); 12003 12004 /* 12005 * Grab time when the cmd completed. 12006 * This is used for determining if the system has been 12007 * idle long enough to make it idle to the PM framework. 12008 * This is for lowering the overhead, and therefore improving 12009 * performance per I/O operation. 12010 */ 12011 un->un_pm_idle_time = ddi_get_time(); 12012 12013 un->un_ncmds_in_driver--; 12014 ASSERT(un->un_ncmds_in_driver >= 0); 12015 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12016 un->un_ncmds_in_driver); 12017 12018 mutex_exit(SD_MUTEX(un)); 12019 12020 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12021 SENSE_LENGTH) { 12022 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12023 MAX_SENSE_LENGTH); 12024 } else { 12025 kmem_free(xp, sizeof (struct sd_xbuf)); 12026 } 12027 12028 biodone(bp); 12029 12030 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12031 } 12032 12033 12034 /* 12035 * Function: sd_mapblockaddr_iostart 12036 * 12037 * Description: Verify request lies within the partition limits for 12038 * the indicated minor device. Issue "overrun" buf if 12039 * request would exceed partition range. Converts 12040 * partition-relative block address to absolute. 12041 * 12042 * Context: Can sleep 12043 * 12044 * Issues: This follows what the old code did, in terms of accessing 12045 * some of the partition info in the unit struct without holding 12046 * the mutext. This is a general issue, if the partition info 12047 * can be altered while IO is in progress... as soon as we send 12048 * a buf, its partitioning can be invalid before it gets to the 12049 * device. Probably the right fix is to move partitioning out 12050 * of the driver entirely. 12051 */ 12052 12053 static void 12054 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12055 { 12056 diskaddr_t nblocks; /* #blocks in the given partition */ 12057 daddr_t blocknum; /* Block number specified by the buf */ 12058 size_t requested_nblocks; 12059 size_t available_nblocks; 12060 int partition; 12061 diskaddr_t partition_offset; 12062 struct sd_xbuf *xp; 12063 12064 ASSERT(un != NULL); 12065 ASSERT(bp != NULL); 12066 ASSERT(!mutex_owned(SD_MUTEX(un))); 12067 12068 SD_TRACE(SD_LOG_IO_PARTITION, un, 12069 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12070 12071 xp = SD_GET_XBUF(bp); 12072 ASSERT(xp != NULL); 12073 12074 /* 12075 * If the geometry is not indicated as valid, attempt to access 12076 * the unit & verify the geometry/label. This can be the case for 12077 * removable-media devices, of if the device was opened in 12078 * NDELAY/NONBLOCK mode. 12079 */ 12080 partition = SDPART(bp->b_edev); 12081 12082 if (!SD_IS_VALID_LABEL(un)) { 12083 sd_ssc_t *ssc; 12084 /* 12085 * Initialize sd_ssc_t for internal uscsi commands 12086 * In case of potential porformance issue, we need 12087 * to alloc memory only if there is invalid label 12088 */ 12089 ssc = sd_ssc_init(un); 12090 12091 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12092 /* 12093 * For removable devices it is possible to start an 12094 * I/O without a media by opening the device in nodelay 12095 * mode. Also for writable CDs there can be many 12096 * scenarios where there is no geometry yet but volume 12097 * manager is trying to issue a read() just because 12098 * it can see TOC on the CD. So do not print a message 12099 * for removables. 12100 */ 12101 if (!un->un_f_has_removable_media) { 12102 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12103 "i/o to invalid geometry\n"); 12104 } 12105 bioerror(bp, EIO); 12106 bp->b_resid = bp->b_bcount; 12107 SD_BEGIN_IODONE(index, un, bp); 12108 12109 sd_ssc_fini(ssc); 12110 return; 12111 } 12112 sd_ssc_fini(ssc); 12113 } 12114 12115 nblocks = 0; 12116 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12117 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12118 12119 /* 12120 * blocknum is the starting block number of the request. At this 12121 * point it is still relative to the start of the minor device. 12122 */ 12123 blocknum = xp->xb_blkno; 12124 12125 /* 12126 * Legacy: If the starting block number is one past the last block 12127 * in the partition, do not set B_ERROR in the buf. 12128 */ 12129 if (blocknum == nblocks) { 12130 goto error_exit; 12131 } 12132 12133 /* 12134 * Confirm that the first block of the request lies within the 12135 * partition limits. Also the requested number of bytes must be 12136 * a multiple of the system block size. 12137 */ 12138 if ((blocknum < 0) || (blocknum >= nblocks) || 12139 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 12140 bp->b_flags |= B_ERROR; 12141 goto error_exit; 12142 } 12143 12144 /* 12145 * If the requsted # blocks exceeds the available # blocks, that 12146 * is an overrun of the partition. 12147 */ 12148 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 12149 available_nblocks = (size_t)(nblocks - blocknum); 12150 ASSERT(nblocks >= blocknum); 12151 12152 if (requested_nblocks > available_nblocks) { 12153 /* 12154 * Allocate an "overrun" buf to allow the request to proceed 12155 * for the amount of space available in the partition. The 12156 * amount not transferred will be added into the b_resid 12157 * when the operation is complete. The overrun buf 12158 * replaces the original buf here, and the original buf 12159 * is saved inside the overrun buf, for later use. 12160 */ 12161 size_t resid = SD_SYSBLOCKS2BYTES(un, 12162 (offset_t)(requested_nblocks - available_nblocks)); 12163 size_t count = bp->b_bcount - resid; 12164 /* 12165 * Note: count is an unsigned entity thus it'll NEVER 12166 * be less than 0 so ASSERT the original values are 12167 * correct. 12168 */ 12169 ASSERT(bp->b_bcount >= resid); 12170 12171 bp = sd_bioclone_alloc(bp, count, blocknum, 12172 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12173 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12174 ASSERT(xp != NULL); 12175 } 12176 12177 /* At this point there should be no residual for this buf. */ 12178 ASSERT(bp->b_resid == 0); 12179 12180 /* Convert the block number to an absolute address. */ 12181 xp->xb_blkno += partition_offset; 12182 12183 SD_NEXT_IOSTART(index, un, bp); 12184 12185 SD_TRACE(SD_LOG_IO_PARTITION, un, 12186 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12187 12188 return; 12189 12190 error_exit: 12191 bp->b_resid = bp->b_bcount; 12192 SD_BEGIN_IODONE(index, un, bp); 12193 SD_TRACE(SD_LOG_IO_PARTITION, un, 12194 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12195 } 12196 12197 12198 /* 12199 * Function: sd_mapblockaddr_iodone 12200 * 12201 * Description: Completion-side processing for partition management. 12202 * 12203 * Context: May be called under interrupt context 12204 */ 12205 12206 static void 12207 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12208 { 12209 /* int partition; */ /* Not used, see below. */ 12210 ASSERT(un != NULL); 12211 ASSERT(bp != NULL); 12212 ASSERT(!mutex_owned(SD_MUTEX(un))); 12213 12214 SD_TRACE(SD_LOG_IO_PARTITION, un, 12215 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12216 12217 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12218 /* 12219 * We have an "overrun" buf to deal with... 12220 */ 12221 struct sd_xbuf *xp; 12222 struct buf *obp; /* ptr to the original buf */ 12223 12224 xp = SD_GET_XBUF(bp); 12225 ASSERT(xp != NULL); 12226 12227 /* Retrieve the pointer to the original buf */ 12228 obp = (struct buf *)xp->xb_private; 12229 ASSERT(obp != NULL); 12230 12231 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12232 bioerror(obp, bp->b_error); 12233 12234 sd_bioclone_free(bp); 12235 12236 /* 12237 * Get back the original buf. 12238 * Note that since the restoration of xb_blkno below 12239 * was removed, the sd_xbuf is not needed. 12240 */ 12241 bp = obp; 12242 /* 12243 * xp = SD_GET_XBUF(bp); 12244 * ASSERT(xp != NULL); 12245 */ 12246 } 12247 12248 /* 12249 * Convert sd->xb_blkno back to a minor-device relative value. 12250 * Note: this has been commented out, as it is not needed in the 12251 * current implementation of the driver (ie, since this function 12252 * is at the top of the layering chains, so the info will be 12253 * discarded) and it is in the "hot" IO path. 12254 * 12255 * partition = getminor(bp->b_edev) & SDPART_MASK; 12256 * xp->xb_blkno -= un->un_offset[partition]; 12257 */ 12258 12259 SD_NEXT_IODONE(index, un, bp); 12260 12261 SD_TRACE(SD_LOG_IO_PARTITION, un, 12262 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12263 } 12264 12265 12266 /* 12267 * Function: sd_mapblocksize_iostart 12268 * 12269 * Description: Convert between system block size (un->un_sys_blocksize) 12270 * and target block size (un->un_tgt_blocksize). 12271 * 12272 * Context: Can sleep to allocate resources. 12273 * 12274 * Assumptions: A higher layer has already performed any partition validation, 12275 * and converted the xp->xb_blkno to an absolute value relative 12276 * to the start of the device. 12277 * 12278 * It is also assumed that the higher layer has implemented 12279 * an "overrun" mechanism for the case where the request would 12280 * read/write beyond the end of a partition. In this case we 12281 * assume (and ASSERT) that bp->b_resid == 0. 12282 * 12283 * Note: The implementation for this routine assumes the target 12284 * block size remains constant between allocation and transport. 12285 */ 12286 12287 static void 12288 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12289 { 12290 struct sd_mapblocksize_info *bsp; 12291 struct sd_xbuf *xp; 12292 offset_t first_byte; 12293 daddr_t start_block, end_block; 12294 daddr_t request_bytes; 12295 ushort_t is_aligned = FALSE; 12296 12297 ASSERT(un != NULL); 12298 ASSERT(bp != NULL); 12299 ASSERT(!mutex_owned(SD_MUTEX(un))); 12300 ASSERT(bp->b_resid == 0); 12301 12302 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12303 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12304 12305 /* 12306 * For a non-writable CD, a write request is an error 12307 */ 12308 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12309 (un->un_f_mmc_writable_media == FALSE)) { 12310 bioerror(bp, EIO); 12311 bp->b_resid = bp->b_bcount; 12312 SD_BEGIN_IODONE(index, un, bp); 12313 return; 12314 } 12315 12316 /* 12317 * We do not need a shadow buf if the device is using 12318 * un->un_sys_blocksize as its block size or if bcount == 0. 12319 * In this case there is no layer-private data block allocated. 12320 */ 12321 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12322 (bp->b_bcount == 0)) { 12323 goto done; 12324 } 12325 12326 #if defined(__i386) || defined(__amd64) 12327 /* We do not support non-block-aligned transfers for ROD devices */ 12328 ASSERT(!ISROD(un)); 12329 #endif 12330 12331 xp = SD_GET_XBUF(bp); 12332 ASSERT(xp != NULL); 12333 12334 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12335 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12336 un->un_tgt_blocksize, un->un_sys_blocksize); 12337 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12338 "request start block:0x%x\n", xp->xb_blkno); 12339 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12340 "request len:0x%x\n", bp->b_bcount); 12341 12342 /* 12343 * Allocate the layer-private data area for the mapblocksize layer. 12344 * Layers are allowed to use the xp_private member of the sd_xbuf 12345 * struct to store the pointer to their layer-private data block, but 12346 * each layer also has the responsibility of restoring the prior 12347 * contents of xb_private before returning the buf/xbuf to the 12348 * higher layer that sent it. 12349 * 12350 * Here we save the prior contents of xp->xb_private into the 12351 * bsp->mbs_oprivate field of our layer-private data area. This value 12352 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12353 * the layer-private area and returning the buf/xbuf to the layer 12354 * that sent it. 12355 * 12356 * Note that here we use kmem_zalloc for the allocation as there are 12357 * parts of the mapblocksize code that expect certain fields to be 12358 * zero unless explicitly set to a required value. 12359 */ 12360 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12361 bsp->mbs_oprivate = xp->xb_private; 12362 xp->xb_private = bsp; 12363 12364 /* 12365 * This treats the data on the disk (target) as an array of bytes. 12366 * first_byte is the byte offset, from the beginning of the device, 12367 * to the location of the request. This is converted from a 12368 * un->un_sys_blocksize block address to a byte offset, and then back 12369 * to a block address based upon a un->un_tgt_blocksize block size. 12370 * 12371 * xp->xb_blkno should be absolute upon entry into this function, 12372 * but, but it is based upon partitions that use the "system" 12373 * block size. It must be adjusted to reflect the block size of 12374 * the target. 12375 * 12376 * Note that end_block is actually the block that follows the last 12377 * block of the request, but that's what is needed for the computation. 12378 */ 12379 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12380 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12381 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12382 un->un_tgt_blocksize; 12383 12384 /* request_bytes is rounded up to a multiple of the target block size */ 12385 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12386 12387 /* 12388 * See if the starting address of the request and the request 12389 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12390 * then we do not need to allocate a shadow buf to handle the request. 12391 */ 12392 if (((first_byte % un->un_tgt_blocksize) == 0) && 12393 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12394 is_aligned = TRUE; 12395 } 12396 12397 if ((bp->b_flags & B_READ) == 0) { 12398 /* 12399 * Lock the range for a write operation. An aligned request is 12400 * considered a simple write; otherwise the request must be a 12401 * read-modify-write. 12402 */ 12403 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12404 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12405 } 12406 12407 /* 12408 * Alloc a shadow buf if the request is not aligned. Also, this is 12409 * where the READ command is generated for a read-modify-write. (The 12410 * write phase is deferred until after the read completes.) 12411 */ 12412 if (is_aligned == FALSE) { 12413 12414 struct sd_mapblocksize_info *shadow_bsp; 12415 struct sd_xbuf *shadow_xp; 12416 struct buf *shadow_bp; 12417 12418 /* 12419 * Allocate the shadow buf and it associated xbuf. Note that 12420 * after this call the xb_blkno value in both the original 12421 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12422 * same: absolute relative to the start of the device, and 12423 * adjusted for the target block size. The b_blkno in the 12424 * shadow buf will also be set to this value. We should never 12425 * change b_blkno in the original bp however. 12426 * 12427 * Note also that the shadow buf will always need to be a 12428 * READ command, regardless of whether the incoming command 12429 * is a READ or a WRITE. 12430 */ 12431 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12432 xp->xb_blkno, 12433 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12434 12435 shadow_xp = SD_GET_XBUF(shadow_bp); 12436 12437 /* 12438 * Allocate the layer-private data for the shadow buf. 12439 * (No need to preserve xb_private in the shadow xbuf.) 12440 */ 12441 shadow_xp->xb_private = shadow_bsp = 12442 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12443 12444 /* 12445 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12446 * to figure out where the start of the user data is (based upon 12447 * the system block size) in the data returned by the READ 12448 * command (which will be based upon the target blocksize). Note 12449 * that this is only really used if the request is unaligned. 12450 */ 12451 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12452 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12453 ASSERT((bsp->mbs_copy_offset >= 0) && 12454 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12455 12456 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12457 12458 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12459 12460 /* Transfer the wmap (if any) to the shadow buf */ 12461 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12462 bsp->mbs_wmp = NULL; 12463 12464 /* 12465 * The shadow buf goes on from here in place of the 12466 * original buf. 12467 */ 12468 shadow_bsp->mbs_orig_bp = bp; 12469 bp = shadow_bp; 12470 } 12471 12472 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12473 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12474 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12475 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12476 request_bytes); 12477 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12478 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12479 12480 done: 12481 SD_NEXT_IOSTART(index, un, bp); 12482 12483 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12484 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12485 } 12486 12487 12488 /* 12489 * Function: sd_mapblocksize_iodone 12490 * 12491 * Description: Completion side processing for block-size mapping. 12492 * 12493 * Context: May be called under interrupt context 12494 */ 12495 12496 static void 12497 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12498 { 12499 struct sd_mapblocksize_info *bsp; 12500 struct sd_xbuf *xp; 12501 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12502 struct buf *orig_bp; /* ptr to the original buf */ 12503 offset_t shadow_end; 12504 offset_t request_end; 12505 offset_t shadow_start; 12506 ssize_t copy_offset; 12507 size_t copy_length; 12508 size_t shortfall; 12509 uint_t is_write; /* TRUE if this bp is a WRITE */ 12510 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12511 12512 ASSERT(un != NULL); 12513 ASSERT(bp != NULL); 12514 12515 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12516 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12517 12518 /* 12519 * There is no shadow buf or layer-private data if the target is 12520 * using un->un_sys_blocksize as its block size or if bcount == 0. 12521 */ 12522 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12523 (bp->b_bcount == 0)) { 12524 goto exit; 12525 } 12526 12527 xp = SD_GET_XBUF(bp); 12528 ASSERT(xp != NULL); 12529 12530 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12531 bsp = xp->xb_private; 12532 12533 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12534 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12535 12536 if (is_write) { 12537 /* 12538 * For a WRITE request we must free up the block range that 12539 * we have locked up. This holds regardless of whether this is 12540 * an aligned write request or a read-modify-write request. 12541 */ 12542 sd_range_unlock(un, bsp->mbs_wmp); 12543 bsp->mbs_wmp = NULL; 12544 } 12545 12546 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12547 /* 12548 * An aligned read or write command will have no shadow buf; 12549 * there is not much else to do with it. 12550 */ 12551 goto done; 12552 } 12553 12554 orig_bp = bsp->mbs_orig_bp; 12555 ASSERT(orig_bp != NULL); 12556 orig_xp = SD_GET_XBUF(orig_bp); 12557 ASSERT(orig_xp != NULL); 12558 ASSERT(!mutex_owned(SD_MUTEX(un))); 12559 12560 if (!is_write && has_wmap) { 12561 /* 12562 * A READ with a wmap means this is the READ phase of a 12563 * read-modify-write. If an error occurred on the READ then 12564 * we do not proceed with the WRITE phase or copy any data. 12565 * Just release the write maps and return with an error. 12566 */ 12567 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12568 orig_bp->b_resid = orig_bp->b_bcount; 12569 bioerror(orig_bp, bp->b_error); 12570 sd_range_unlock(un, bsp->mbs_wmp); 12571 goto freebuf_done; 12572 } 12573 } 12574 12575 /* 12576 * Here is where we set up to copy the data from the shadow buf 12577 * into the space associated with the original buf. 12578 * 12579 * To deal with the conversion between block sizes, these 12580 * computations treat the data as an array of bytes, with the 12581 * first byte (byte 0) corresponding to the first byte in the 12582 * first block on the disk. 12583 */ 12584 12585 /* 12586 * shadow_start and shadow_len indicate the location and size of 12587 * the data returned with the shadow IO request. 12588 */ 12589 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12590 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12591 12592 /* 12593 * copy_offset gives the offset (in bytes) from the start of the first 12594 * block of the READ request to the beginning of the data. We retrieve 12595 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12596 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12597 * data to be copied (in bytes). 12598 */ 12599 copy_offset = bsp->mbs_copy_offset; 12600 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12601 copy_length = orig_bp->b_bcount; 12602 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12603 12604 /* 12605 * Set up the resid and error fields of orig_bp as appropriate. 12606 */ 12607 if (shadow_end >= request_end) { 12608 /* We got all the requested data; set resid to zero */ 12609 orig_bp->b_resid = 0; 12610 } else { 12611 /* 12612 * We failed to get enough data to fully satisfy the original 12613 * request. Just copy back whatever data we got and set 12614 * up the residual and error code as required. 12615 * 12616 * 'shortfall' is the amount by which the data received with the 12617 * shadow buf has "fallen short" of the requested amount. 12618 */ 12619 shortfall = (size_t)(request_end - shadow_end); 12620 12621 if (shortfall > orig_bp->b_bcount) { 12622 /* 12623 * We did not get enough data to even partially 12624 * fulfill the original request. The residual is 12625 * equal to the amount requested. 12626 */ 12627 orig_bp->b_resid = orig_bp->b_bcount; 12628 } else { 12629 /* 12630 * We did not get all the data that we requested 12631 * from the device, but we will try to return what 12632 * portion we did get. 12633 */ 12634 orig_bp->b_resid = shortfall; 12635 } 12636 ASSERT(copy_length >= orig_bp->b_resid); 12637 copy_length -= orig_bp->b_resid; 12638 } 12639 12640 /* Propagate the error code from the shadow buf to the original buf */ 12641 bioerror(orig_bp, bp->b_error); 12642 12643 if (is_write) { 12644 goto freebuf_done; /* No data copying for a WRITE */ 12645 } 12646 12647 if (has_wmap) { 12648 /* 12649 * This is a READ command from the READ phase of a 12650 * read-modify-write request. We have to copy the data given 12651 * by the user OVER the data returned by the READ command, 12652 * then convert the command from a READ to a WRITE and send 12653 * it back to the target. 12654 */ 12655 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12656 copy_length); 12657 12658 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12659 12660 /* 12661 * Dispatch the WRITE command to the taskq thread, which 12662 * will in turn send the command to the target. When the 12663 * WRITE command completes, we (sd_mapblocksize_iodone()) 12664 * will get called again as part of the iodone chain 12665 * processing for it. Note that we will still be dealing 12666 * with the shadow buf at that point. 12667 */ 12668 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12669 KM_NOSLEEP) != 0) { 12670 /* 12671 * Dispatch was successful so we are done. Return 12672 * without going any higher up the iodone chain. Do 12673 * not free up any layer-private data until after the 12674 * WRITE completes. 12675 */ 12676 return; 12677 } 12678 12679 /* 12680 * Dispatch of the WRITE command failed; set up the error 12681 * condition and send this IO back up the iodone chain. 12682 */ 12683 bioerror(orig_bp, EIO); 12684 orig_bp->b_resid = orig_bp->b_bcount; 12685 12686 } else { 12687 /* 12688 * This is a regular READ request (ie, not a RMW). Copy the 12689 * data from the shadow buf into the original buf. The 12690 * copy_offset compensates for any "misalignment" between the 12691 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12692 * original buf (with its un->un_sys_blocksize blocks). 12693 */ 12694 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12695 copy_length); 12696 } 12697 12698 freebuf_done: 12699 12700 /* 12701 * At this point we still have both the shadow buf AND the original 12702 * buf to deal with, as well as the layer-private data area in each. 12703 * Local variables are as follows: 12704 * 12705 * bp -- points to shadow buf 12706 * xp -- points to xbuf of shadow buf 12707 * bsp -- points to layer-private data area of shadow buf 12708 * orig_bp -- points to original buf 12709 * 12710 * First free the shadow buf and its associated xbuf, then free the 12711 * layer-private data area from the shadow buf. There is no need to 12712 * restore xb_private in the shadow xbuf. 12713 */ 12714 sd_shadow_buf_free(bp); 12715 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12716 12717 /* 12718 * Now update the local variables to point to the original buf, xbuf, 12719 * and layer-private area. 12720 */ 12721 bp = orig_bp; 12722 xp = SD_GET_XBUF(bp); 12723 ASSERT(xp != NULL); 12724 ASSERT(xp == orig_xp); 12725 bsp = xp->xb_private; 12726 ASSERT(bsp != NULL); 12727 12728 done: 12729 /* 12730 * Restore xb_private to whatever it was set to by the next higher 12731 * layer in the chain, then free the layer-private data area. 12732 */ 12733 xp->xb_private = bsp->mbs_oprivate; 12734 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12735 12736 exit: 12737 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12738 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12739 12740 SD_NEXT_IODONE(index, un, bp); 12741 } 12742 12743 12744 /* 12745 * Function: sd_checksum_iostart 12746 * 12747 * Description: A stub function for a layer that's currently not used. 12748 * For now just a placeholder. 12749 * 12750 * Context: Kernel thread context 12751 */ 12752 12753 static void 12754 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12755 { 12756 ASSERT(un != NULL); 12757 ASSERT(bp != NULL); 12758 ASSERT(!mutex_owned(SD_MUTEX(un))); 12759 SD_NEXT_IOSTART(index, un, bp); 12760 } 12761 12762 12763 /* 12764 * Function: sd_checksum_iodone 12765 * 12766 * Description: A stub function for a layer that's currently not used. 12767 * For now just a placeholder. 12768 * 12769 * Context: May be called under interrupt context 12770 */ 12771 12772 static void 12773 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12774 { 12775 ASSERT(un != NULL); 12776 ASSERT(bp != NULL); 12777 ASSERT(!mutex_owned(SD_MUTEX(un))); 12778 SD_NEXT_IODONE(index, un, bp); 12779 } 12780 12781 12782 /* 12783 * Function: sd_checksum_uscsi_iostart 12784 * 12785 * Description: A stub function for a layer that's currently not used. 12786 * For now just a placeholder. 12787 * 12788 * Context: Kernel thread context 12789 */ 12790 12791 static void 12792 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12793 { 12794 ASSERT(un != NULL); 12795 ASSERT(bp != NULL); 12796 ASSERT(!mutex_owned(SD_MUTEX(un))); 12797 SD_NEXT_IOSTART(index, un, bp); 12798 } 12799 12800 12801 /* 12802 * Function: sd_checksum_uscsi_iodone 12803 * 12804 * Description: A stub function for a layer that's currently not used. 12805 * For now just a placeholder. 12806 * 12807 * Context: May be called under interrupt context 12808 */ 12809 12810 static void 12811 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12812 { 12813 ASSERT(un != NULL); 12814 ASSERT(bp != NULL); 12815 ASSERT(!mutex_owned(SD_MUTEX(un))); 12816 SD_NEXT_IODONE(index, un, bp); 12817 } 12818 12819 12820 /* 12821 * Function: sd_pm_iostart 12822 * 12823 * Description: iostart-side routine for Power mangement. 12824 * 12825 * Context: Kernel thread context 12826 */ 12827 12828 static void 12829 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12830 { 12831 ASSERT(un != NULL); 12832 ASSERT(bp != NULL); 12833 ASSERT(!mutex_owned(SD_MUTEX(un))); 12834 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12835 12836 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12837 12838 if (sd_pm_entry(un) != DDI_SUCCESS) { 12839 /* 12840 * Set up to return the failed buf back up the 'iodone' 12841 * side of the calling chain. 12842 */ 12843 bioerror(bp, EIO); 12844 bp->b_resid = bp->b_bcount; 12845 12846 SD_BEGIN_IODONE(index, un, bp); 12847 12848 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12849 return; 12850 } 12851 12852 SD_NEXT_IOSTART(index, un, bp); 12853 12854 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12855 } 12856 12857 12858 /* 12859 * Function: sd_pm_iodone 12860 * 12861 * Description: iodone-side routine for power mangement. 12862 * 12863 * Context: may be called from interrupt context 12864 */ 12865 12866 static void 12867 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12868 { 12869 ASSERT(un != NULL); 12870 ASSERT(bp != NULL); 12871 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12872 12873 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12874 12875 /* 12876 * After attach the following flag is only read, so don't 12877 * take the penalty of acquiring a mutex for it. 12878 */ 12879 if (un->un_f_pm_is_enabled == TRUE) { 12880 sd_pm_exit(un); 12881 } 12882 12883 SD_NEXT_IODONE(index, un, bp); 12884 12885 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12886 } 12887 12888 12889 /* 12890 * Function: sd_core_iostart 12891 * 12892 * Description: Primary driver function for enqueuing buf(9S) structs from 12893 * the system and initiating IO to the target device 12894 * 12895 * Context: Kernel thread context. Can sleep. 12896 * 12897 * Assumptions: - The given xp->xb_blkno is absolute 12898 * (ie, relative to the start of the device). 12899 * - The IO is to be done using the native blocksize of 12900 * the device, as specified in un->un_tgt_blocksize. 12901 */ 12902 /* ARGSUSED */ 12903 static void 12904 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12905 { 12906 struct sd_xbuf *xp; 12907 12908 ASSERT(un != NULL); 12909 ASSERT(bp != NULL); 12910 ASSERT(!mutex_owned(SD_MUTEX(un))); 12911 ASSERT(bp->b_resid == 0); 12912 12913 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12914 12915 xp = SD_GET_XBUF(bp); 12916 ASSERT(xp != NULL); 12917 12918 mutex_enter(SD_MUTEX(un)); 12919 12920 /* 12921 * If we are currently in the failfast state, fail any new IO 12922 * that has B_FAILFAST set, then return. 12923 */ 12924 if ((bp->b_flags & B_FAILFAST) && 12925 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12926 mutex_exit(SD_MUTEX(un)); 12927 bioerror(bp, EIO); 12928 bp->b_resid = bp->b_bcount; 12929 SD_BEGIN_IODONE(index, un, bp); 12930 return; 12931 } 12932 12933 if (SD_IS_DIRECT_PRIORITY(xp)) { 12934 /* 12935 * Priority command -- transport it immediately. 12936 * 12937 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12938 * because all direct priority commands should be associated 12939 * with error recovery actions which we don't want to retry. 12940 */ 12941 sd_start_cmds(un, bp); 12942 } else { 12943 /* 12944 * Normal command -- add it to the wait queue, then start 12945 * transporting commands from the wait queue. 12946 */ 12947 sd_add_buf_to_waitq(un, bp); 12948 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12949 sd_start_cmds(un, NULL); 12950 } 12951 12952 mutex_exit(SD_MUTEX(un)); 12953 12954 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12955 } 12956 12957 12958 /* 12959 * Function: sd_init_cdb_limits 12960 * 12961 * Description: This is to handle scsi_pkt initialization differences 12962 * between the driver platforms. 12963 * 12964 * Legacy behaviors: 12965 * 12966 * If the block number or the sector count exceeds the 12967 * capabilities of a Group 0 command, shift over to a 12968 * Group 1 command. We don't blindly use Group 1 12969 * commands because a) some drives (CDC Wren IVs) get a 12970 * bit confused, and b) there is probably a fair amount 12971 * of speed difference for a target to receive and decode 12972 * a 10 byte command instead of a 6 byte command. 12973 * 12974 * The xfer time difference of 6 vs 10 byte CDBs is 12975 * still significant so this code is still worthwhile. 12976 * 10 byte CDBs are very inefficient with the fas HBA driver 12977 * and older disks. Each CDB byte took 1 usec with some 12978 * popular disks. 12979 * 12980 * Context: Must be called at attach time 12981 */ 12982 12983 static void 12984 sd_init_cdb_limits(struct sd_lun *un) 12985 { 12986 int hba_cdb_limit; 12987 12988 /* 12989 * Use CDB_GROUP1 commands for most devices except for 12990 * parallel SCSI fixed drives in which case we get better 12991 * performance using CDB_GROUP0 commands (where applicable). 12992 */ 12993 un->un_mincdb = SD_CDB_GROUP1; 12994 #if !defined(__fibre) 12995 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12996 !un->un_f_has_removable_media) { 12997 un->un_mincdb = SD_CDB_GROUP0; 12998 } 12999 #endif 13000 13001 /* 13002 * Try to read the max-cdb-length supported by HBA. 13003 */ 13004 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13005 if (0 >= un->un_max_hba_cdb) { 13006 un->un_max_hba_cdb = CDB_GROUP4; 13007 hba_cdb_limit = SD_CDB_GROUP4; 13008 } else if (0 < un->un_max_hba_cdb && 13009 un->un_max_hba_cdb < CDB_GROUP1) { 13010 hba_cdb_limit = SD_CDB_GROUP0; 13011 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13012 un->un_max_hba_cdb < CDB_GROUP5) { 13013 hba_cdb_limit = SD_CDB_GROUP1; 13014 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13015 un->un_max_hba_cdb < CDB_GROUP4) { 13016 hba_cdb_limit = SD_CDB_GROUP5; 13017 } else { 13018 hba_cdb_limit = SD_CDB_GROUP4; 13019 } 13020 13021 /* 13022 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13023 * commands for fixed disks unless we are building for a 32 bit 13024 * kernel. 13025 */ 13026 #ifdef _LP64 13027 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13028 min(hba_cdb_limit, SD_CDB_GROUP4); 13029 #else 13030 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13031 min(hba_cdb_limit, SD_CDB_GROUP1); 13032 #endif 13033 13034 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13035 ? sizeof (struct scsi_arq_status) : 1); 13036 un->un_cmd_timeout = (ushort_t)sd_io_time; 13037 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13038 } 13039 13040 13041 /* 13042 * Function: sd_initpkt_for_buf 13043 * 13044 * Description: Allocate and initialize for transport a scsi_pkt struct, 13045 * based upon the info specified in the given buf struct. 13046 * 13047 * Assumes the xb_blkno in the request is absolute (ie, 13048 * relative to the start of the device (NOT partition!). 13049 * Also assumes that the request is using the native block 13050 * size of the device (as returned by the READ CAPACITY 13051 * command). 13052 * 13053 * Return Code: SD_PKT_ALLOC_SUCCESS 13054 * SD_PKT_ALLOC_FAILURE 13055 * SD_PKT_ALLOC_FAILURE_NO_DMA 13056 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13057 * 13058 * Context: Kernel thread and may be called from software interrupt context 13059 * as part of a sdrunout callback. This function may not block or 13060 * call routines that block 13061 */ 13062 13063 static int 13064 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13065 { 13066 struct sd_xbuf *xp; 13067 struct scsi_pkt *pktp = NULL; 13068 struct sd_lun *un; 13069 size_t blockcount; 13070 daddr_t startblock; 13071 int rval; 13072 int cmd_flags; 13073 13074 ASSERT(bp != NULL); 13075 ASSERT(pktpp != NULL); 13076 xp = SD_GET_XBUF(bp); 13077 ASSERT(xp != NULL); 13078 un = SD_GET_UN(bp); 13079 ASSERT(un != NULL); 13080 ASSERT(mutex_owned(SD_MUTEX(un))); 13081 ASSERT(bp->b_resid == 0); 13082 13083 SD_TRACE(SD_LOG_IO_CORE, un, 13084 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13085 13086 mutex_exit(SD_MUTEX(un)); 13087 13088 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13089 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13090 /* 13091 * Already have a scsi_pkt -- just need DMA resources. 13092 * We must recompute the CDB in case the mapping returns 13093 * a nonzero pkt_resid. 13094 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13095 * that is being retried, the unmap/remap of the DMA resouces 13096 * will result in the entire transfer starting over again 13097 * from the very first block. 13098 */ 13099 ASSERT(xp->xb_pktp != NULL); 13100 pktp = xp->xb_pktp; 13101 } else { 13102 pktp = NULL; 13103 } 13104 #endif /* __i386 || __amd64 */ 13105 13106 startblock = xp->xb_blkno; /* Absolute block num. */ 13107 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13108 13109 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13110 13111 /* 13112 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13113 * call scsi_init_pkt, and build the CDB. 13114 */ 13115 rval = sd_setup_rw_pkt(un, &pktp, bp, 13116 cmd_flags, sdrunout, (caddr_t)un, 13117 startblock, blockcount); 13118 13119 if (rval == 0) { 13120 /* 13121 * Success. 13122 * 13123 * If partial DMA is being used and required for this transfer. 13124 * set it up here. 13125 */ 13126 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13127 (pktp->pkt_resid != 0)) { 13128 13129 /* 13130 * Save the CDB length and pkt_resid for the 13131 * next xfer 13132 */ 13133 xp->xb_dma_resid = pktp->pkt_resid; 13134 13135 /* rezero resid */ 13136 pktp->pkt_resid = 0; 13137 13138 } else { 13139 xp->xb_dma_resid = 0; 13140 } 13141 13142 pktp->pkt_flags = un->un_tagflags; 13143 pktp->pkt_time = un->un_cmd_timeout; 13144 pktp->pkt_comp = sdintr; 13145 13146 pktp->pkt_private = bp; 13147 *pktpp = pktp; 13148 13149 SD_TRACE(SD_LOG_IO_CORE, un, 13150 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13151 13152 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13153 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13154 #endif 13155 13156 mutex_enter(SD_MUTEX(un)); 13157 return (SD_PKT_ALLOC_SUCCESS); 13158 13159 } 13160 13161 /* 13162 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13163 * from sd_setup_rw_pkt. 13164 */ 13165 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13166 13167 if (rval == SD_PKT_ALLOC_FAILURE) { 13168 *pktpp = NULL; 13169 /* 13170 * Set the driver state to RWAIT to indicate the driver 13171 * is waiting on resource allocations. The driver will not 13172 * suspend, pm_suspend, or detatch while the state is RWAIT. 13173 */ 13174 mutex_enter(SD_MUTEX(un)); 13175 New_state(un, SD_STATE_RWAIT); 13176 13177 SD_ERROR(SD_LOG_IO_CORE, un, 13178 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13179 13180 if ((bp->b_flags & B_ERROR) != 0) { 13181 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13182 } 13183 return (SD_PKT_ALLOC_FAILURE); 13184 } else { 13185 /* 13186 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13187 * 13188 * This should never happen. Maybe someone messed with the 13189 * kernel's minphys? 13190 */ 13191 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13192 "Request rejected: too large for CDB: " 13193 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13194 SD_ERROR(SD_LOG_IO_CORE, un, 13195 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13196 mutex_enter(SD_MUTEX(un)); 13197 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13198 13199 } 13200 } 13201 13202 13203 /* 13204 * Function: sd_destroypkt_for_buf 13205 * 13206 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13207 * 13208 * Context: Kernel thread or interrupt context 13209 */ 13210 13211 static void 13212 sd_destroypkt_for_buf(struct buf *bp) 13213 { 13214 ASSERT(bp != NULL); 13215 ASSERT(SD_GET_UN(bp) != NULL); 13216 13217 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13218 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13219 13220 ASSERT(SD_GET_PKTP(bp) != NULL); 13221 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13222 13223 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13224 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13225 } 13226 13227 /* 13228 * Function: sd_setup_rw_pkt 13229 * 13230 * Description: Determines appropriate CDB group for the requested LBA 13231 * and transfer length, calls scsi_init_pkt, and builds 13232 * the CDB. Do not use for partial DMA transfers except 13233 * for the initial transfer since the CDB size must 13234 * remain constant. 13235 * 13236 * Context: Kernel thread and may be called from software interrupt 13237 * context as part of a sdrunout callback. This function may not 13238 * block or call routines that block 13239 */ 13240 13241 13242 int 13243 sd_setup_rw_pkt(struct sd_lun *un, 13244 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13245 int (*callback)(caddr_t), caddr_t callback_arg, 13246 diskaddr_t lba, uint32_t blockcount) 13247 { 13248 struct scsi_pkt *return_pktp; 13249 union scsi_cdb *cdbp; 13250 struct sd_cdbinfo *cp = NULL; 13251 int i; 13252 13253 /* 13254 * See which size CDB to use, based upon the request. 13255 */ 13256 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13257 13258 /* 13259 * Check lba and block count against sd_cdbtab limits. 13260 * In the partial DMA case, we have to use the same size 13261 * CDB for all the transfers. Check lba + blockcount 13262 * against the max LBA so we know that segment of the 13263 * transfer can use the CDB we select. 13264 */ 13265 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13266 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13267 13268 /* 13269 * The command will fit into the CDB type 13270 * specified by sd_cdbtab[i]. 13271 */ 13272 cp = sd_cdbtab + i; 13273 13274 /* 13275 * Call scsi_init_pkt so we can fill in the 13276 * CDB. 13277 */ 13278 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13279 bp, cp->sc_grpcode, un->un_status_len, 0, 13280 flags, callback, callback_arg); 13281 13282 if (return_pktp != NULL) { 13283 13284 /* 13285 * Return new value of pkt 13286 */ 13287 *pktpp = return_pktp; 13288 13289 /* 13290 * To be safe, zero the CDB insuring there is 13291 * no leftover data from a previous command. 13292 */ 13293 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13294 13295 /* 13296 * Handle partial DMA mapping 13297 */ 13298 if (return_pktp->pkt_resid != 0) { 13299 13300 /* 13301 * Not going to xfer as many blocks as 13302 * originally expected 13303 */ 13304 blockcount -= 13305 SD_BYTES2TGTBLOCKS(un, 13306 return_pktp->pkt_resid); 13307 } 13308 13309 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13310 13311 /* 13312 * Set command byte based on the CDB 13313 * type we matched. 13314 */ 13315 cdbp->scc_cmd = cp->sc_grpmask | 13316 ((bp->b_flags & B_READ) ? 13317 SCMD_READ : SCMD_WRITE); 13318 13319 SD_FILL_SCSI1_LUN(un, return_pktp); 13320 13321 /* 13322 * Fill in LBA and length 13323 */ 13324 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13325 (cp->sc_grpcode == CDB_GROUP4) || 13326 (cp->sc_grpcode == CDB_GROUP0) || 13327 (cp->sc_grpcode == CDB_GROUP5)); 13328 13329 if (cp->sc_grpcode == CDB_GROUP1) { 13330 FORMG1ADDR(cdbp, lba); 13331 FORMG1COUNT(cdbp, blockcount); 13332 return (0); 13333 } else if (cp->sc_grpcode == CDB_GROUP4) { 13334 FORMG4LONGADDR(cdbp, lba); 13335 FORMG4COUNT(cdbp, blockcount); 13336 return (0); 13337 } else if (cp->sc_grpcode == CDB_GROUP0) { 13338 FORMG0ADDR(cdbp, lba); 13339 FORMG0COUNT(cdbp, blockcount); 13340 return (0); 13341 } else if (cp->sc_grpcode == CDB_GROUP5) { 13342 FORMG5ADDR(cdbp, lba); 13343 FORMG5COUNT(cdbp, blockcount); 13344 return (0); 13345 } 13346 13347 /* 13348 * It should be impossible to not match one 13349 * of the CDB types above, so we should never 13350 * reach this point. Set the CDB command byte 13351 * to test-unit-ready to avoid writing 13352 * to somewhere we don't intend. 13353 */ 13354 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13355 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13356 } else { 13357 /* 13358 * Couldn't get scsi_pkt 13359 */ 13360 return (SD_PKT_ALLOC_FAILURE); 13361 } 13362 } 13363 } 13364 13365 /* 13366 * None of the available CDB types were suitable. This really 13367 * should never happen: on a 64 bit system we support 13368 * READ16/WRITE16 which will hold an entire 64 bit disk address 13369 * and on a 32 bit system we will refuse to bind to a device 13370 * larger than 2TB so addresses will never be larger than 32 bits. 13371 */ 13372 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13373 } 13374 13375 /* 13376 * Function: sd_setup_next_rw_pkt 13377 * 13378 * Description: Setup packet for partial DMA transfers, except for the 13379 * initial transfer. sd_setup_rw_pkt should be used for 13380 * the initial transfer. 13381 * 13382 * Context: Kernel thread and may be called from interrupt context. 13383 */ 13384 13385 int 13386 sd_setup_next_rw_pkt(struct sd_lun *un, 13387 struct scsi_pkt *pktp, struct buf *bp, 13388 diskaddr_t lba, uint32_t blockcount) 13389 { 13390 uchar_t com; 13391 union scsi_cdb *cdbp; 13392 uchar_t cdb_group_id; 13393 13394 ASSERT(pktp != NULL); 13395 ASSERT(pktp->pkt_cdbp != NULL); 13396 13397 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13398 com = cdbp->scc_cmd; 13399 cdb_group_id = CDB_GROUPID(com); 13400 13401 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13402 (cdb_group_id == CDB_GROUPID_1) || 13403 (cdb_group_id == CDB_GROUPID_4) || 13404 (cdb_group_id == CDB_GROUPID_5)); 13405 13406 /* 13407 * Move pkt to the next portion of the xfer. 13408 * func is NULL_FUNC so we do not have to release 13409 * the disk mutex here. 13410 */ 13411 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13412 NULL_FUNC, NULL) == pktp) { 13413 /* Success. Handle partial DMA */ 13414 if (pktp->pkt_resid != 0) { 13415 blockcount -= 13416 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13417 } 13418 13419 cdbp->scc_cmd = com; 13420 SD_FILL_SCSI1_LUN(un, pktp); 13421 if (cdb_group_id == CDB_GROUPID_1) { 13422 FORMG1ADDR(cdbp, lba); 13423 FORMG1COUNT(cdbp, blockcount); 13424 return (0); 13425 } else if (cdb_group_id == CDB_GROUPID_4) { 13426 FORMG4LONGADDR(cdbp, lba); 13427 FORMG4COUNT(cdbp, blockcount); 13428 return (0); 13429 } else if (cdb_group_id == CDB_GROUPID_0) { 13430 FORMG0ADDR(cdbp, lba); 13431 FORMG0COUNT(cdbp, blockcount); 13432 return (0); 13433 } else if (cdb_group_id == CDB_GROUPID_5) { 13434 FORMG5ADDR(cdbp, lba); 13435 FORMG5COUNT(cdbp, blockcount); 13436 return (0); 13437 } 13438 13439 /* Unreachable */ 13440 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13441 } 13442 13443 /* 13444 * Error setting up next portion of cmd transfer. 13445 * Something is definitely very wrong and this 13446 * should not happen. 13447 */ 13448 return (SD_PKT_ALLOC_FAILURE); 13449 } 13450 13451 /* 13452 * Function: sd_initpkt_for_uscsi 13453 * 13454 * Description: Allocate and initialize for transport a scsi_pkt struct, 13455 * based upon the info specified in the given uscsi_cmd struct. 13456 * 13457 * Return Code: SD_PKT_ALLOC_SUCCESS 13458 * SD_PKT_ALLOC_FAILURE 13459 * SD_PKT_ALLOC_FAILURE_NO_DMA 13460 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13461 * 13462 * Context: Kernel thread and may be called from software interrupt context 13463 * as part of a sdrunout callback. This function may not block or 13464 * call routines that block 13465 */ 13466 13467 static int 13468 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13469 { 13470 struct uscsi_cmd *uscmd; 13471 struct sd_xbuf *xp; 13472 struct scsi_pkt *pktp; 13473 struct sd_lun *un; 13474 uint32_t flags = 0; 13475 13476 ASSERT(bp != NULL); 13477 ASSERT(pktpp != NULL); 13478 xp = SD_GET_XBUF(bp); 13479 ASSERT(xp != NULL); 13480 un = SD_GET_UN(bp); 13481 ASSERT(un != NULL); 13482 ASSERT(mutex_owned(SD_MUTEX(un))); 13483 13484 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13485 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13486 ASSERT(uscmd != NULL); 13487 13488 SD_TRACE(SD_LOG_IO_CORE, un, 13489 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13490 13491 /* 13492 * Allocate the scsi_pkt for the command. 13493 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13494 * during scsi_init_pkt time and will continue to use the 13495 * same path as long as the same scsi_pkt is used without 13496 * intervening scsi_dma_free(). Since uscsi command does 13497 * not call scsi_dmafree() before retry failed command, it 13498 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13499 * set such that scsi_vhci can use other available path for 13500 * retry. Besides, ucsci command does not allow DMA breakup, 13501 * so there is no need to set PKT_DMA_PARTIAL flag. 13502 */ 13503 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13504 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13505 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13506 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13507 - sizeof (struct scsi_extended_sense)), 0, 13508 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13509 sdrunout, (caddr_t)un); 13510 } else { 13511 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13512 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13513 sizeof (struct scsi_arq_status), 0, 13514 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13515 sdrunout, (caddr_t)un); 13516 } 13517 13518 if (pktp == NULL) { 13519 *pktpp = NULL; 13520 /* 13521 * Set the driver state to RWAIT to indicate the driver 13522 * is waiting on resource allocations. The driver will not 13523 * suspend, pm_suspend, or detatch while the state is RWAIT. 13524 */ 13525 New_state(un, SD_STATE_RWAIT); 13526 13527 SD_ERROR(SD_LOG_IO_CORE, un, 13528 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13529 13530 if ((bp->b_flags & B_ERROR) != 0) { 13531 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13532 } 13533 return (SD_PKT_ALLOC_FAILURE); 13534 } 13535 13536 /* 13537 * We do not do DMA breakup for USCSI commands, so return failure 13538 * here if all the needed DMA resources were not allocated. 13539 */ 13540 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13541 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13542 scsi_destroy_pkt(pktp); 13543 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13544 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13545 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13546 } 13547 13548 /* Init the cdb from the given uscsi struct */ 13549 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13550 uscmd->uscsi_cdb[0], 0, 0, 0); 13551 13552 SD_FILL_SCSI1_LUN(un, pktp); 13553 13554 /* 13555 * Set up the optional USCSI flags. See the uscsi (7I) man page 13556 * for listing of the supported flags. 13557 */ 13558 13559 if (uscmd->uscsi_flags & USCSI_SILENT) { 13560 flags |= FLAG_SILENT; 13561 } 13562 13563 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13564 flags |= FLAG_DIAGNOSE; 13565 } 13566 13567 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13568 flags |= FLAG_ISOLATE; 13569 } 13570 13571 if (un->un_f_is_fibre == FALSE) { 13572 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13573 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13574 } 13575 } 13576 13577 /* 13578 * Set the pkt flags here so we save time later. 13579 * Note: These flags are NOT in the uscsi man page!!! 13580 */ 13581 if (uscmd->uscsi_flags & USCSI_HEAD) { 13582 flags |= FLAG_HEAD; 13583 } 13584 13585 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13586 flags |= FLAG_NOINTR; 13587 } 13588 13589 /* 13590 * For tagged queueing, things get a bit complicated. 13591 * Check first for head of queue and last for ordered queue. 13592 * If neither head nor order, use the default driver tag flags. 13593 */ 13594 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13595 if (uscmd->uscsi_flags & USCSI_HTAG) { 13596 flags |= FLAG_HTAG; 13597 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13598 flags |= FLAG_OTAG; 13599 } else { 13600 flags |= un->un_tagflags & FLAG_TAGMASK; 13601 } 13602 } 13603 13604 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13605 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13606 } 13607 13608 pktp->pkt_flags = flags; 13609 13610 /* Transfer uscsi information to scsi_pkt */ 13611 (void) scsi_uscsi_pktinit(uscmd, pktp); 13612 13613 /* Copy the caller's CDB into the pkt... */ 13614 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13615 13616 if (uscmd->uscsi_timeout == 0) { 13617 pktp->pkt_time = un->un_uscsi_timeout; 13618 } else { 13619 pktp->pkt_time = uscmd->uscsi_timeout; 13620 } 13621 13622 /* need it later to identify USCSI request in sdintr */ 13623 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13624 13625 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13626 13627 pktp->pkt_private = bp; 13628 pktp->pkt_comp = sdintr; 13629 *pktpp = pktp; 13630 13631 SD_TRACE(SD_LOG_IO_CORE, un, 13632 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13633 13634 return (SD_PKT_ALLOC_SUCCESS); 13635 } 13636 13637 13638 /* 13639 * Function: sd_destroypkt_for_uscsi 13640 * 13641 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13642 * IOs.. Also saves relevant info into the associated uscsi_cmd 13643 * struct. 13644 * 13645 * Context: May be called under interrupt context 13646 */ 13647 13648 static void 13649 sd_destroypkt_for_uscsi(struct buf *bp) 13650 { 13651 struct uscsi_cmd *uscmd; 13652 struct sd_xbuf *xp; 13653 struct scsi_pkt *pktp; 13654 struct sd_lun *un; 13655 struct sd_uscsi_info *suip; 13656 13657 ASSERT(bp != NULL); 13658 xp = SD_GET_XBUF(bp); 13659 ASSERT(xp != NULL); 13660 un = SD_GET_UN(bp); 13661 ASSERT(un != NULL); 13662 ASSERT(!mutex_owned(SD_MUTEX(un))); 13663 pktp = SD_GET_PKTP(bp); 13664 ASSERT(pktp != NULL); 13665 13666 SD_TRACE(SD_LOG_IO_CORE, un, 13667 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13668 13669 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13670 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13671 ASSERT(uscmd != NULL); 13672 13673 /* Save the status and the residual into the uscsi_cmd struct */ 13674 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13675 uscmd->uscsi_resid = bp->b_resid; 13676 13677 /* Transfer scsi_pkt information to uscsi */ 13678 (void) scsi_uscsi_pktfini(pktp, uscmd); 13679 13680 /* 13681 * If enabled, copy any saved sense data into the area specified 13682 * by the uscsi command. 13683 */ 13684 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13685 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13686 /* 13687 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13688 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13689 */ 13690 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13691 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13692 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13693 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13694 MAX_SENSE_LENGTH); 13695 } else { 13696 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13697 SENSE_LENGTH); 13698 } 13699 } 13700 /* 13701 * The following assignments are for SCSI FMA. 13702 */ 13703 ASSERT(xp->xb_private != NULL); 13704 suip = (struct sd_uscsi_info *)xp->xb_private; 13705 suip->ui_pkt_reason = pktp->pkt_reason; 13706 suip->ui_pkt_state = pktp->pkt_state; 13707 suip->ui_pkt_statistics = pktp->pkt_statistics; 13708 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13709 13710 /* We are done with the scsi_pkt; free it now */ 13711 ASSERT(SD_GET_PKTP(bp) != NULL); 13712 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13713 13714 SD_TRACE(SD_LOG_IO_CORE, un, 13715 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13716 } 13717 13718 13719 /* 13720 * Function: sd_bioclone_alloc 13721 * 13722 * Description: Allocate a buf(9S) and init it as per the given buf 13723 * and the various arguments. The associated sd_xbuf 13724 * struct is (nearly) duplicated. The struct buf *bp 13725 * argument is saved in new_xp->xb_private. 13726 * 13727 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13728 * datalen - size of data area for the shadow bp 13729 * blkno - starting LBA 13730 * func - function pointer for b_iodone in the shadow buf. (May 13731 * be NULL if none.) 13732 * 13733 * Return Code: Pointer to allocates buf(9S) struct 13734 * 13735 * Context: Can sleep. 13736 */ 13737 13738 static struct buf * 13739 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13740 daddr_t blkno, int (*func)(struct buf *)) 13741 { 13742 struct sd_lun *un; 13743 struct sd_xbuf *xp; 13744 struct sd_xbuf *new_xp; 13745 struct buf *new_bp; 13746 13747 ASSERT(bp != NULL); 13748 xp = SD_GET_XBUF(bp); 13749 ASSERT(xp != NULL); 13750 un = SD_GET_UN(bp); 13751 ASSERT(un != NULL); 13752 ASSERT(!mutex_owned(SD_MUTEX(un))); 13753 13754 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13755 NULL, KM_SLEEP); 13756 13757 new_bp->b_lblkno = blkno; 13758 13759 /* 13760 * Allocate an xbuf for the shadow bp and copy the contents of the 13761 * original xbuf into it. 13762 */ 13763 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13764 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13765 13766 /* 13767 * The given bp is automatically saved in the xb_private member 13768 * of the new xbuf. Callers are allowed to depend on this. 13769 */ 13770 new_xp->xb_private = bp; 13771 13772 new_bp->b_private = new_xp; 13773 13774 return (new_bp); 13775 } 13776 13777 /* 13778 * Function: sd_shadow_buf_alloc 13779 * 13780 * Description: Allocate a buf(9S) and init it as per the given buf 13781 * and the various arguments. The associated sd_xbuf 13782 * struct is (nearly) duplicated. The struct buf *bp 13783 * argument is saved in new_xp->xb_private. 13784 * 13785 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13786 * datalen - size of data area for the shadow bp 13787 * bflags - B_READ or B_WRITE (pseudo flag) 13788 * blkno - starting LBA 13789 * func - function pointer for b_iodone in the shadow buf. (May 13790 * be NULL if none.) 13791 * 13792 * Return Code: Pointer to allocates buf(9S) struct 13793 * 13794 * Context: Can sleep. 13795 */ 13796 13797 static struct buf * 13798 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13799 daddr_t blkno, int (*func)(struct buf *)) 13800 { 13801 struct sd_lun *un; 13802 struct sd_xbuf *xp; 13803 struct sd_xbuf *new_xp; 13804 struct buf *new_bp; 13805 13806 ASSERT(bp != NULL); 13807 xp = SD_GET_XBUF(bp); 13808 ASSERT(xp != NULL); 13809 un = SD_GET_UN(bp); 13810 ASSERT(un != NULL); 13811 ASSERT(!mutex_owned(SD_MUTEX(un))); 13812 13813 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13814 bp_mapin(bp); 13815 } 13816 13817 bflags &= (B_READ | B_WRITE); 13818 #if defined(__i386) || defined(__amd64) 13819 new_bp = getrbuf(KM_SLEEP); 13820 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13821 new_bp->b_bcount = datalen; 13822 new_bp->b_flags = bflags | 13823 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13824 #else 13825 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13826 datalen, bflags, SLEEP_FUNC, NULL); 13827 #endif 13828 new_bp->av_forw = NULL; 13829 new_bp->av_back = NULL; 13830 new_bp->b_dev = bp->b_dev; 13831 new_bp->b_blkno = blkno; 13832 new_bp->b_iodone = func; 13833 new_bp->b_edev = bp->b_edev; 13834 new_bp->b_resid = 0; 13835 13836 /* We need to preserve the B_FAILFAST flag */ 13837 if (bp->b_flags & B_FAILFAST) { 13838 new_bp->b_flags |= B_FAILFAST; 13839 } 13840 13841 /* 13842 * Allocate an xbuf for the shadow bp and copy the contents of the 13843 * original xbuf into it. 13844 */ 13845 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13846 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13847 13848 /* Need later to copy data between the shadow buf & original buf! */ 13849 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13850 13851 /* 13852 * The given bp is automatically saved in the xb_private member 13853 * of the new xbuf. Callers are allowed to depend on this. 13854 */ 13855 new_xp->xb_private = bp; 13856 13857 new_bp->b_private = new_xp; 13858 13859 return (new_bp); 13860 } 13861 13862 /* 13863 * Function: sd_bioclone_free 13864 * 13865 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13866 * in the larger than partition operation. 13867 * 13868 * Context: May be called under interrupt context 13869 */ 13870 13871 static void 13872 sd_bioclone_free(struct buf *bp) 13873 { 13874 struct sd_xbuf *xp; 13875 13876 ASSERT(bp != NULL); 13877 xp = SD_GET_XBUF(bp); 13878 ASSERT(xp != NULL); 13879 13880 /* 13881 * Call bp_mapout() before freeing the buf, in case a lower 13882 * layer or HBA had done a bp_mapin(). we must do this here 13883 * as we are the "originator" of the shadow buf. 13884 */ 13885 bp_mapout(bp); 13886 13887 /* 13888 * Null out b_iodone before freeing the bp, to ensure that the driver 13889 * never gets confused by a stale value in this field. (Just a little 13890 * extra defensiveness here.) 13891 */ 13892 bp->b_iodone = NULL; 13893 13894 freerbuf(bp); 13895 13896 kmem_free(xp, sizeof (struct sd_xbuf)); 13897 } 13898 13899 /* 13900 * Function: sd_shadow_buf_free 13901 * 13902 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13903 * 13904 * Context: May be called under interrupt context 13905 */ 13906 13907 static void 13908 sd_shadow_buf_free(struct buf *bp) 13909 { 13910 struct sd_xbuf *xp; 13911 13912 ASSERT(bp != NULL); 13913 xp = SD_GET_XBUF(bp); 13914 ASSERT(xp != NULL); 13915 13916 #if defined(__sparc) 13917 /* 13918 * Call bp_mapout() before freeing the buf, in case a lower 13919 * layer or HBA had done a bp_mapin(). we must do this here 13920 * as we are the "originator" of the shadow buf. 13921 */ 13922 bp_mapout(bp); 13923 #endif 13924 13925 /* 13926 * Null out b_iodone before freeing the bp, to ensure that the driver 13927 * never gets confused by a stale value in this field. (Just a little 13928 * extra defensiveness here.) 13929 */ 13930 bp->b_iodone = NULL; 13931 13932 #if defined(__i386) || defined(__amd64) 13933 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13934 freerbuf(bp); 13935 #else 13936 scsi_free_consistent_buf(bp); 13937 #endif 13938 13939 kmem_free(xp, sizeof (struct sd_xbuf)); 13940 } 13941 13942 13943 /* 13944 * Function: sd_print_transport_rejected_message 13945 * 13946 * Description: This implements the ludicrously complex rules for printing 13947 * a "transport rejected" message. This is to address the 13948 * specific problem of having a flood of this error message 13949 * produced when a failover occurs. 13950 * 13951 * Context: Any. 13952 */ 13953 13954 static void 13955 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13956 int code) 13957 { 13958 ASSERT(un != NULL); 13959 ASSERT(mutex_owned(SD_MUTEX(un))); 13960 ASSERT(xp != NULL); 13961 13962 /* 13963 * Print the "transport rejected" message under the following 13964 * conditions: 13965 * 13966 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13967 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13968 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13969 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13970 * scsi_transport(9F) (which indicates that the target might have 13971 * gone off-line). This uses the un->un_tran_fatal_count 13972 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13973 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13974 * from scsi_transport(). 13975 * 13976 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13977 * the preceeding cases in order for the message to be printed. 13978 */ 13979 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 13980 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 13981 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13982 (code != TRAN_FATAL_ERROR) || 13983 (un->un_tran_fatal_count == 1)) { 13984 switch (code) { 13985 case TRAN_BADPKT: 13986 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13987 "transport rejected bad packet\n"); 13988 break; 13989 case TRAN_FATAL_ERROR: 13990 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13991 "transport rejected fatal error\n"); 13992 break; 13993 default: 13994 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13995 "transport rejected (%d)\n", code); 13996 break; 13997 } 13998 } 13999 } 14000 } 14001 14002 14003 /* 14004 * Function: sd_add_buf_to_waitq 14005 * 14006 * Description: Add the given buf(9S) struct to the wait queue for the 14007 * instance. If sorting is enabled, then the buf is added 14008 * to the queue via an elevator sort algorithm (a la 14009 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14010 * If sorting is not enabled, then the buf is just added 14011 * to the end of the wait queue. 14012 * 14013 * Return Code: void 14014 * 14015 * Context: Does not sleep/block, therefore technically can be called 14016 * from any context. However if sorting is enabled then the 14017 * execution time is indeterminate, and may take long if 14018 * the wait queue grows large. 14019 */ 14020 14021 static void 14022 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14023 { 14024 struct buf *ap; 14025 14026 ASSERT(bp != NULL); 14027 ASSERT(un != NULL); 14028 ASSERT(mutex_owned(SD_MUTEX(un))); 14029 14030 /* If the queue is empty, add the buf as the only entry & return. */ 14031 if (un->un_waitq_headp == NULL) { 14032 ASSERT(un->un_waitq_tailp == NULL); 14033 un->un_waitq_headp = un->un_waitq_tailp = bp; 14034 bp->av_forw = NULL; 14035 return; 14036 } 14037 14038 ASSERT(un->un_waitq_tailp != NULL); 14039 14040 /* 14041 * If sorting is disabled, just add the buf to the tail end of 14042 * the wait queue and return. 14043 */ 14044 if (un->un_f_disksort_disabled) { 14045 un->un_waitq_tailp->av_forw = bp; 14046 un->un_waitq_tailp = bp; 14047 bp->av_forw = NULL; 14048 return; 14049 } 14050 14051 /* 14052 * Sort thru the list of requests currently on the wait queue 14053 * and add the new buf request at the appropriate position. 14054 * 14055 * The un->un_waitq_headp is an activity chain pointer on which 14056 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14057 * first queue holds those requests which are positioned after 14058 * the current SD_GET_BLKNO() (in the first request); the second holds 14059 * requests which came in after their SD_GET_BLKNO() number was passed. 14060 * Thus we implement a one way scan, retracting after reaching 14061 * the end of the drive to the first request on the second 14062 * queue, at which time it becomes the first queue. 14063 * A one-way scan is natural because of the way UNIX read-ahead 14064 * blocks are allocated. 14065 * 14066 * If we lie after the first request, then we must locate the 14067 * second request list and add ourselves to it. 14068 */ 14069 ap = un->un_waitq_headp; 14070 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14071 while (ap->av_forw != NULL) { 14072 /* 14073 * Look for an "inversion" in the (normally 14074 * ascending) block numbers. This indicates 14075 * the start of the second request list. 14076 */ 14077 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14078 /* 14079 * Search the second request list for the 14080 * first request at a larger block number. 14081 * We go before that; however if there is 14082 * no such request, we go at the end. 14083 */ 14084 do { 14085 if (SD_GET_BLKNO(bp) < 14086 SD_GET_BLKNO(ap->av_forw)) { 14087 goto insert; 14088 } 14089 ap = ap->av_forw; 14090 } while (ap->av_forw != NULL); 14091 goto insert; /* after last */ 14092 } 14093 ap = ap->av_forw; 14094 } 14095 14096 /* 14097 * No inversions... we will go after the last, and 14098 * be the first request in the second request list. 14099 */ 14100 goto insert; 14101 } 14102 14103 /* 14104 * Request is at/after the current request... 14105 * sort in the first request list. 14106 */ 14107 while (ap->av_forw != NULL) { 14108 /* 14109 * We want to go after the current request (1) if 14110 * there is an inversion after it (i.e. it is the end 14111 * of the first request list), or (2) if the next 14112 * request is a larger block no. than our request. 14113 */ 14114 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14115 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14116 goto insert; 14117 } 14118 ap = ap->av_forw; 14119 } 14120 14121 /* 14122 * Neither a second list nor a larger request, therefore 14123 * we go at the end of the first list (which is the same 14124 * as the end of the whole schebang). 14125 */ 14126 insert: 14127 bp->av_forw = ap->av_forw; 14128 ap->av_forw = bp; 14129 14130 /* 14131 * If we inserted onto the tail end of the waitq, make sure the 14132 * tail pointer is updated. 14133 */ 14134 if (ap == un->un_waitq_tailp) { 14135 un->un_waitq_tailp = bp; 14136 } 14137 } 14138 14139 14140 /* 14141 * Function: sd_start_cmds 14142 * 14143 * Description: Remove and transport cmds from the driver queues. 14144 * 14145 * Arguments: un - pointer to the unit (soft state) struct for the target. 14146 * 14147 * immed_bp - ptr to a buf to be transported immediately. Only 14148 * the immed_bp is transported; bufs on the waitq are not 14149 * processed and the un_retry_bp is not checked. If immed_bp is 14150 * NULL, then normal queue processing is performed. 14151 * 14152 * Context: May be called from kernel thread context, interrupt context, 14153 * or runout callback context. This function may not block or 14154 * call routines that block. 14155 */ 14156 14157 static void 14158 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14159 { 14160 struct sd_xbuf *xp; 14161 struct buf *bp; 14162 void (*statp)(kstat_io_t *); 14163 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14164 void (*saved_statp)(kstat_io_t *); 14165 #endif 14166 int rval; 14167 struct sd_fm_internal *sfip = NULL; 14168 14169 ASSERT(un != NULL); 14170 ASSERT(mutex_owned(SD_MUTEX(un))); 14171 ASSERT(un->un_ncmds_in_transport >= 0); 14172 ASSERT(un->un_throttle >= 0); 14173 14174 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14175 14176 do { 14177 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14178 saved_statp = NULL; 14179 #endif 14180 14181 /* 14182 * If we are syncing or dumping, fail the command to 14183 * avoid recursively calling back into scsi_transport(). 14184 * The dump I/O itself uses a separate code path so this 14185 * only prevents non-dump I/O from being sent while dumping. 14186 * File system sync takes place before dumping begins. 14187 * During panic, filesystem I/O is allowed provided 14188 * un_in_callback is <= 1. This is to prevent recursion 14189 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14190 * sd_start_cmds and so on. See panic.c for more information 14191 * about the states the system can be in during panic. 14192 */ 14193 if ((un->un_state == SD_STATE_DUMPING) || 14194 (ddi_in_panic() && (un->un_in_callback > 1))) { 14195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14196 "sd_start_cmds: panicking\n"); 14197 goto exit; 14198 } 14199 14200 if ((bp = immed_bp) != NULL) { 14201 /* 14202 * We have a bp that must be transported immediately. 14203 * It's OK to transport the immed_bp here without doing 14204 * the throttle limit check because the immed_bp is 14205 * always used in a retry/recovery case. This means 14206 * that we know we are not at the throttle limit by 14207 * virtue of the fact that to get here we must have 14208 * already gotten a command back via sdintr(). This also 14209 * relies on (1) the command on un_retry_bp preventing 14210 * further commands from the waitq from being issued; 14211 * and (2) the code in sd_retry_command checking the 14212 * throttle limit before issuing a delayed or immediate 14213 * retry. This holds even if the throttle limit is 14214 * currently ratcheted down from its maximum value. 14215 */ 14216 statp = kstat_runq_enter; 14217 if (bp == un->un_retry_bp) { 14218 ASSERT((un->un_retry_statp == NULL) || 14219 (un->un_retry_statp == kstat_waitq_enter) || 14220 (un->un_retry_statp == 14221 kstat_runq_back_to_waitq)); 14222 /* 14223 * If the waitq kstat was incremented when 14224 * sd_set_retry_bp() queued this bp for a retry, 14225 * then we must set up statp so that the waitq 14226 * count will get decremented correctly below. 14227 * Also we must clear un->un_retry_statp to 14228 * ensure that we do not act on a stale value 14229 * in this field. 14230 */ 14231 if ((un->un_retry_statp == kstat_waitq_enter) || 14232 (un->un_retry_statp == 14233 kstat_runq_back_to_waitq)) { 14234 statp = kstat_waitq_to_runq; 14235 } 14236 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14237 saved_statp = un->un_retry_statp; 14238 #endif 14239 un->un_retry_statp = NULL; 14240 14241 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14242 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14243 "un_throttle:%d un_ncmds_in_transport:%d\n", 14244 un, un->un_retry_bp, un->un_throttle, 14245 un->un_ncmds_in_transport); 14246 } else { 14247 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14248 "processing priority bp:0x%p\n", bp); 14249 } 14250 14251 } else if ((bp = un->un_waitq_headp) != NULL) { 14252 /* 14253 * A command on the waitq is ready to go, but do not 14254 * send it if: 14255 * 14256 * (1) the throttle limit has been reached, or 14257 * (2) a retry is pending, or 14258 * (3) a START_STOP_UNIT callback pending, or 14259 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14260 * command is pending. 14261 * 14262 * For all of these conditions, IO processing will 14263 * restart after the condition is cleared. 14264 */ 14265 if (un->un_ncmds_in_transport >= un->un_throttle) { 14266 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14267 "sd_start_cmds: exiting, " 14268 "throttle limit reached!\n"); 14269 goto exit; 14270 } 14271 if (un->un_retry_bp != NULL) { 14272 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14273 "sd_start_cmds: exiting, retry pending!\n"); 14274 goto exit; 14275 } 14276 if (un->un_startstop_timeid != NULL) { 14277 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14278 "sd_start_cmds: exiting, " 14279 "START_STOP pending!\n"); 14280 goto exit; 14281 } 14282 if (un->un_direct_priority_timeid != NULL) { 14283 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14284 "sd_start_cmds: exiting, " 14285 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14286 goto exit; 14287 } 14288 14289 /* Dequeue the command */ 14290 un->un_waitq_headp = bp->av_forw; 14291 if (un->un_waitq_headp == NULL) { 14292 un->un_waitq_tailp = NULL; 14293 } 14294 bp->av_forw = NULL; 14295 statp = kstat_waitq_to_runq; 14296 SD_TRACE(SD_LOG_IO_CORE, un, 14297 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14298 14299 } else { 14300 /* No work to do so bail out now */ 14301 SD_TRACE(SD_LOG_IO_CORE, un, 14302 "sd_start_cmds: no more work, exiting!\n"); 14303 goto exit; 14304 } 14305 14306 /* 14307 * Reset the state to normal. This is the mechanism by which 14308 * the state transitions from either SD_STATE_RWAIT or 14309 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14310 * If state is SD_STATE_PM_CHANGING then this command is 14311 * part of the device power control and the state must 14312 * not be put back to normal. Doing so would would 14313 * allow new commands to proceed when they shouldn't, 14314 * the device may be going off. 14315 */ 14316 if ((un->un_state != SD_STATE_SUSPENDED) && 14317 (un->un_state != SD_STATE_PM_CHANGING)) { 14318 New_state(un, SD_STATE_NORMAL); 14319 } 14320 14321 xp = SD_GET_XBUF(bp); 14322 ASSERT(xp != NULL); 14323 14324 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14325 /* 14326 * Allocate the scsi_pkt if we need one, or attach DMA 14327 * resources if we have a scsi_pkt that needs them. The 14328 * latter should only occur for commands that are being 14329 * retried. 14330 */ 14331 if ((xp->xb_pktp == NULL) || 14332 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14333 #else 14334 if (xp->xb_pktp == NULL) { 14335 #endif 14336 /* 14337 * There is no scsi_pkt allocated for this buf. Call 14338 * the initpkt function to allocate & init one. 14339 * 14340 * The scsi_init_pkt runout callback functionality is 14341 * implemented as follows: 14342 * 14343 * 1) The initpkt function always calls 14344 * scsi_init_pkt(9F) with sdrunout specified as the 14345 * callback routine. 14346 * 2) A successful packet allocation is initialized and 14347 * the I/O is transported. 14348 * 3) The I/O associated with an allocation resource 14349 * failure is left on its queue to be retried via 14350 * runout or the next I/O. 14351 * 4) The I/O associated with a DMA error is removed 14352 * from the queue and failed with EIO. Processing of 14353 * the transport queues is also halted to be 14354 * restarted via runout or the next I/O. 14355 * 5) The I/O associated with a CDB size or packet 14356 * size error is removed from the queue and failed 14357 * with EIO. Processing of the transport queues is 14358 * continued. 14359 * 14360 * Note: there is no interface for canceling a runout 14361 * callback. To prevent the driver from detaching or 14362 * suspending while a runout is pending the driver 14363 * state is set to SD_STATE_RWAIT 14364 * 14365 * Note: using the scsi_init_pkt callback facility can 14366 * result in an I/O request persisting at the head of 14367 * the list which cannot be satisfied even after 14368 * multiple retries. In the future the driver may 14369 * implement some kind of maximum runout count before 14370 * failing an I/O. 14371 * 14372 * Note: the use of funcp below may seem superfluous, 14373 * but it helps warlock figure out the correct 14374 * initpkt function calls (see [s]sd.wlcmd). 14375 */ 14376 struct scsi_pkt *pktp; 14377 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14378 14379 ASSERT(bp != un->un_rqs_bp); 14380 14381 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14382 switch ((*funcp)(bp, &pktp)) { 14383 case SD_PKT_ALLOC_SUCCESS: 14384 xp->xb_pktp = pktp; 14385 SD_TRACE(SD_LOG_IO_CORE, un, 14386 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14387 pktp); 14388 goto got_pkt; 14389 14390 case SD_PKT_ALLOC_FAILURE: 14391 /* 14392 * Temporary (hopefully) resource depletion. 14393 * Since retries and RQS commands always have a 14394 * scsi_pkt allocated, these cases should never 14395 * get here. So the only cases this needs to 14396 * handle is a bp from the waitq (which we put 14397 * back onto the waitq for sdrunout), or a bp 14398 * sent as an immed_bp (which we just fail). 14399 */ 14400 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14401 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14402 14403 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14404 14405 if (bp == immed_bp) { 14406 /* 14407 * If SD_XB_DMA_FREED is clear, then 14408 * this is a failure to allocate a 14409 * scsi_pkt, and we must fail the 14410 * command. 14411 */ 14412 if ((xp->xb_pkt_flags & 14413 SD_XB_DMA_FREED) == 0) { 14414 break; 14415 } 14416 14417 /* 14418 * If this immediate command is NOT our 14419 * un_retry_bp, then we must fail it. 14420 */ 14421 if (bp != un->un_retry_bp) { 14422 break; 14423 } 14424 14425 /* 14426 * We get here if this cmd is our 14427 * un_retry_bp that was DMAFREED, but 14428 * scsi_init_pkt() failed to reallocate 14429 * DMA resources when we attempted to 14430 * retry it. This can happen when an 14431 * mpxio failover is in progress, but 14432 * we don't want to just fail the 14433 * command in this case. 14434 * 14435 * Use timeout(9F) to restart it after 14436 * a 100ms delay. We don't want to 14437 * let sdrunout() restart it, because 14438 * sdrunout() is just supposed to start 14439 * commands that are sitting on the 14440 * wait queue. The un_retry_bp stays 14441 * set until the command completes, but 14442 * sdrunout can be called many times 14443 * before that happens. Since sdrunout 14444 * cannot tell if the un_retry_bp is 14445 * already in the transport, it could 14446 * end up calling scsi_transport() for 14447 * the un_retry_bp multiple times. 14448 * 14449 * Also: don't schedule the callback 14450 * if some other callback is already 14451 * pending. 14452 */ 14453 if (un->un_retry_statp == NULL) { 14454 /* 14455 * restore the kstat pointer to 14456 * keep kstat counts coherent 14457 * when we do retry the command. 14458 */ 14459 un->un_retry_statp = 14460 saved_statp; 14461 } 14462 14463 if ((un->un_startstop_timeid == NULL) && 14464 (un->un_retry_timeid == NULL) && 14465 (un->un_direct_priority_timeid == 14466 NULL)) { 14467 14468 un->un_retry_timeid = 14469 timeout( 14470 sd_start_retry_command, 14471 un, SD_RESTART_TIMEOUT); 14472 } 14473 goto exit; 14474 } 14475 14476 #else 14477 if (bp == immed_bp) { 14478 break; /* Just fail the command */ 14479 } 14480 #endif 14481 14482 /* Add the buf back to the head of the waitq */ 14483 bp->av_forw = un->un_waitq_headp; 14484 un->un_waitq_headp = bp; 14485 if (un->un_waitq_tailp == NULL) { 14486 un->un_waitq_tailp = bp; 14487 } 14488 goto exit; 14489 14490 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14491 /* 14492 * HBA DMA resource failure. Fail the command 14493 * and continue processing of the queues. 14494 */ 14495 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14496 "sd_start_cmds: " 14497 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14498 break; 14499 14500 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14501 /* 14502 * Note:x86: Partial DMA mapping not supported 14503 * for USCSI commands, and all the needed DMA 14504 * resources were not allocated. 14505 */ 14506 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14507 "sd_start_cmds: " 14508 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14509 break; 14510 14511 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14512 /* 14513 * Note:x86: Request cannot fit into CDB based 14514 * on lba and len. 14515 */ 14516 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14517 "sd_start_cmds: " 14518 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14519 break; 14520 14521 default: 14522 /* Should NEVER get here! */ 14523 panic("scsi_initpkt error"); 14524 /*NOTREACHED*/ 14525 } 14526 14527 /* 14528 * Fatal error in allocating a scsi_pkt for this buf. 14529 * Update kstats & return the buf with an error code. 14530 * We must use sd_return_failed_command_no_restart() to 14531 * avoid a recursive call back into sd_start_cmds(). 14532 * However this also means that we must keep processing 14533 * the waitq here in order to avoid stalling. 14534 */ 14535 if (statp == kstat_waitq_to_runq) { 14536 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14537 } 14538 sd_return_failed_command_no_restart(un, bp, EIO); 14539 if (bp == immed_bp) { 14540 /* immed_bp is gone by now, so clear this */ 14541 immed_bp = NULL; 14542 } 14543 continue; 14544 } 14545 got_pkt: 14546 if (bp == immed_bp) { 14547 /* goto the head of the class.... */ 14548 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14549 } 14550 14551 un->un_ncmds_in_transport++; 14552 SD_UPDATE_KSTATS(un, statp, bp); 14553 14554 /* 14555 * Call scsi_transport() to send the command to the target. 14556 * According to SCSA architecture, we must drop the mutex here 14557 * before calling scsi_transport() in order to avoid deadlock. 14558 * Note that the scsi_pkt's completion routine can be executed 14559 * (from interrupt context) even before the call to 14560 * scsi_transport() returns. 14561 */ 14562 SD_TRACE(SD_LOG_IO_CORE, un, 14563 "sd_start_cmds: calling scsi_transport()\n"); 14564 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14565 14566 mutex_exit(SD_MUTEX(un)); 14567 rval = scsi_transport(xp->xb_pktp); 14568 mutex_enter(SD_MUTEX(un)); 14569 14570 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14571 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14572 14573 switch (rval) { 14574 case TRAN_ACCEPT: 14575 /* Clear this with every pkt accepted by the HBA */ 14576 un->un_tran_fatal_count = 0; 14577 break; /* Success; try the next cmd (if any) */ 14578 14579 case TRAN_BUSY: 14580 un->un_ncmds_in_transport--; 14581 ASSERT(un->un_ncmds_in_transport >= 0); 14582 14583 /* 14584 * Don't retry request sense, the sense data 14585 * is lost when another request is sent. 14586 * Free up the rqs buf and retry 14587 * the original failed cmd. Update kstat. 14588 */ 14589 if (bp == un->un_rqs_bp) { 14590 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14591 bp = sd_mark_rqs_idle(un, xp); 14592 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14593 NULL, NULL, EIO, un->un_busy_timeout / 500, 14594 kstat_waitq_enter); 14595 goto exit; 14596 } 14597 14598 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14599 /* 14600 * Free the DMA resources for the scsi_pkt. This will 14601 * allow mpxio to select another path the next time 14602 * we call scsi_transport() with this scsi_pkt. 14603 * See sdintr() for the rationalization behind this. 14604 */ 14605 if ((un->un_f_is_fibre == TRUE) && 14606 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14607 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14608 scsi_dmafree(xp->xb_pktp); 14609 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14610 } 14611 #endif 14612 14613 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14614 /* 14615 * Commands that are SD_PATH_DIRECT_PRIORITY 14616 * are for error recovery situations. These do 14617 * not use the normal command waitq, so if they 14618 * get a TRAN_BUSY we cannot put them back onto 14619 * the waitq for later retry. One possible 14620 * problem is that there could already be some 14621 * other command on un_retry_bp that is waiting 14622 * for this one to complete, so we would be 14623 * deadlocked if we put this command back onto 14624 * the waitq for later retry (since un_retry_bp 14625 * must complete before the driver gets back to 14626 * commands on the waitq). 14627 * 14628 * To avoid deadlock we must schedule a callback 14629 * that will restart this command after a set 14630 * interval. This should keep retrying for as 14631 * long as the underlying transport keeps 14632 * returning TRAN_BUSY (just like for other 14633 * commands). Use the same timeout interval as 14634 * for the ordinary TRAN_BUSY retry. 14635 */ 14636 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14637 "sd_start_cmds: scsi_transport() returned " 14638 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14639 14640 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14641 un->un_direct_priority_timeid = 14642 timeout(sd_start_direct_priority_command, 14643 bp, un->un_busy_timeout / 500); 14644 14645 goto exit; 14646 } 14647 14648 /* 14649 * For TRAN_BUSY, we want to reduce the throttle value, 14650 * unless we are retrying a command. 14651 */ 14652 if (bp != un->un_retry_bp) { 14653 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14654 } 14655 14656 /* 14657 * Set up the bp to be tried again 10 ms later. 14658 * Note:x86: Is there a timeout value in the sd_lun 14659 * for this condition? 14660 */ 14661 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14662 kstat_runq_back_to_waitq); 14663 goto exit; 14664 14665 case TRAN_FATAL_ERROR: 14666 un->un_tran_fatal_count++; 14667 /* FALLTHRU */ 14668 14669 case TRAN_BADPKT: 14670 default: 14671 un->un_ncmds_in_transport--; 14672 ASSERT(un->un_ncmds_in_transport >= 0); 14673 14674 /* 14675 * If this is our REQUEST SENSE command with a 14676 * transport error, we must get back the pointers 14677 * to the original buf, and mark the REQUEST 14678 * SENSE command as "available". 14679 */ 14680 if (bp == un->un_rqs_bp) { 14681 bp = sd_mark_rqs_idle(un, xp); 14682 xp = SD_GET_XBUF(bp); 14683 } else { 14684 /* 14685 * Legacy behavior: do not update transport 14686 * error count for request sense commands. 14687 */ 14688 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14689 } 14690 14691 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14692 sd_print_transport_rejected_message(un, xp, rval); 14693 14694 /* 14695 * This command will be terminated by SD driver due 14696 * to a fatal transport error. We should post 14697 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14698 * of "fail" for any command to indicate this 14699 * situation. 14700 */ 14701 if (xp->xb_ena > 0) { 14702 ASSERT(un->un_fm_private != NULL); 14703 sfip = un->un_fm_private; 14704 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14705 sd_ssc_extract_info(&sfip->fm_ssc, un, 14706 xp->xb_pktp, bp, xp); 14707 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14708 } 14709 14710 /* 14711 * We must use sd_return_failed_command_no_restart() to 14712 * avoid a recursive call back into sd_start_cmds(). 14713 * However this also means that we must keep processing 14714 * the waitq here in order to avoid stalling. 14715 */ 14716 sd_return_failed_command_no_restart(un, bp, EIO); 14717 14718 /* 14719 * Notify any threads waiting in sd_ddi_suspend() that 14720 * a command completion has occurred. 14721 */ 14722 if (un->un_state == SD_STATE_SUSPENDED) { 14723 cv_broadcast(&un->un_disk_busy_cv); 14724 } 14725 14726 if (bp == immed_bp) { 14727 /* immed_bp is gone by now, so clear this */ 14728 immed_bp = NULL; 14729 } 14730 break; 14731 } 14732 14733 } while (immed_bp == NULL); 14734 14735 exit: 14736 ASSERT(mutex_owned(SD_MUTEX(un))); 14737 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14738 } 14739 14740 14741 /* 14742 * Function: sd_return_command 14743 * 14744 * Description: Returns a command to its originator (with or without an 14745 * error). Also starts commands waiting to be transported 14746 * to the target. 14747 * 14748 * Context: May be called from interrupt, kernel, or timeout context 14749 */ 14750 14751 static void 14752 sd_return_command(struct sd_lun *un, struct buf *bp) 14753 { 14754 struct sd_xbuf *xp; 14755 struct scsi_pkt *pktp; 14756 struct sd_fm_internal *sfip; 14757 14758 ASSERT(bp != NULL); 14759 ASSERT(un != NULL); 14760 ASSERT(mutex_owned(SD_MUTEX(un))); 14761 ASSERT(bp != un->un_rqs_bp); 14762 xp = SD_GET_XBUF(bp); 14763 ASSERT(xp != NULL); 14764 14765 pktp = SD_GET_PKTP(bp); 14766 sfip = (struct sd_fm_internal *)un->un_fm_private; 14767 ASSERT(sfip != NULL); 14768 14769 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14770 14771 /* 14772 * Note: check for the "sdrestart failed" case. 14773 */ 14774 if ((un->un_partial_dma_supported == 1) && 14775 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14776 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14777 (xp->xb_pktp->pkt_resid == 0)) { 14778 14779 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14780 /* 14781 * Successfully set up next portion of cmd 14782 * transfer, try sending it 14783 */ 14784 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14785 NULL, NULL, 0, (clock_t)0, NULL); 14786 sd_start_cmds(un, NULL); 14787 return; /* Note:x86: need a return here? */ 14788 } 14789 } 14790 14791 /* 14792 * If this is the failfast bp, clear it from un_failfast_bp. This 14793 * can happen if upon being re-tried the failfast bp either 14794 * succeeded or encountered another error (possibly even a different 14795 * error than the one that precipitated the failfast state, but in 14796 * that case it would have had to exhaust retries as well). Regardless, 14797 * this should not occur whenever the instance is in the active 14798 * failfast state. 14799 */ 14800 if (bp == un->un_failfast_bp) { 14801 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14802 un->un_failfast_bp = NULL; 14803 } 14804 14805 /* 14806 * Clear the failfast state upon successful completion of ANY cmd. 14807 */ 14808 if (bp->b_error == 0) { 14809 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14810 /* 14811 * If this is a successful command, but used to be retried, 14812 * we will take it as a recovered command and post an 14813 * ereport with driver-assessment of "recovered". 14814 */ 14815 if (xp->xb_ena > 0) { 14816 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14817 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14818 } 14819 } else { 14820 /* 14821 * If this is a failed non-USCSI command we will post an 14822 * ereport with driver-assessment set accordingly("fail" or 14823 * "fatal"). 14824 */ 14825 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 14826 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14827 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14828 } 14829 } 14830 14831 /* 14832 * This is used if the command was retried one or more times. Show that 14833 * we are done with it, and allow processing of the waitq to resume. 14834 */ 14835 if (bp == un->un_retry_bp) { 14836 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14837 "sd_return_command: un:0x%p: " 14838 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14839 un->un_retry_bp = NULL; 14840 un->un_retry_statp = NULL; 14841 } 14842 14843 SD_UPDATE_RDWR_STATS(un, bp); 14844 SD_UPDATE_PARTITION_STATS(un, bp); 14845 14846 switch (un->un_state) { 14847 case SD_STATE_SUSPENDED: 14848 /* 14849 * Notify any threads waiting in sd_ddi_suspend() that 14850 * a command completion has occurred. 14851 */ 14852 cv_broadcast(&un->un_disk_busy_cv); 14853 break; 14854 default: 14855 sd_start_cmds(un, NULL); 14856 break; 14857 } 14858 14859 /* Return this command up the iodone chain to its originator. */ 14860 mutex_exit(SD_MUTEX(un)); 14861 14862 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14863 xp->xb_pktp = NULL; 14864 14865 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14866 14867 ASSERT(!mutex_owned(SD_MUTEX(un))); 14868 mutex_enter(SD_MUTEX(un)); 14869 14870 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14871 } 14872 14873 14874 /* 14875 * Function: sd_return_failed_command 14876 * 14877 * Description: Command completion when an error occurred. 14878 * 14879 * Context: May be called from interrupt context 14880 */ 14881 14882 static void 14883 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14884 { 14885 ASSERT(bp != NULL); 14886 ASSERT(un != NULL); 14887 ASSERT(mutex_owned(SD_MUTEX(un))); 14888 14889 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14890 "sd_return_failed_command: entry\n"); 14891 14892 /* 14893 * b_resid could already be nonzero due to a partial data 14894 * transfer, so do not change it here. 14895 */ 14896 SD_BIOERROR(bp, errcode); 14897 14898 sd_return_command(un, bp); 14899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14900 "sd_return_failed_command: exit\n"); 14901 } 14902 14903 14904 /* 14905 * Function: sd_return_failed_command_no_restart 14906 * 14907 * Description: Same as sd_return_failed_command, but ensures that no 14908 * call back into sd_start_cmds will be issued. 14909 * 14910 * Context: May be called from interrupt context 14911 */ 14912 14913 static void 14914 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14915 int errcode) 14916 { 14917 struct sd_xbuf *xp; 14918 14919 ASSERT(bp != NULL); 14920 ASSERT(un != NULL); 14921 ASSERT(mutex_owned(SD_MUTEX(un))); 14922 xp = SD_GET_XBUF(bp); 14923 ASSERT(xp != NULL); 14924 ASSERT(errcode != 0); 14925 14926 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14927 "sd_return_failed_command_no_restart: entry\n"); 14928 14929 /* 14930 * b_resid could already be nonzero due to a partial data 14931 * transfer, so do not change it here. 14932 */ 14933 SD_BIOERROR(bp, errcode); 14934 14935 /* 14936 * If this is the failfast bp, clear it. This can happen if the 14937 * failfast bp encounterd a fatal error when we attempted to 14938 * re-try it (such as a scsi_transport(9F) failure). However 14939 * we should NOT be in an active failfast state if the failfast 14940 * bp is not NULL. 14941 */ 14942 if (bp == un->un_failfast_bp) { 14943 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14944 un->un_failfast_bp = NULL; 14945 } 14946 14947 if (bp == un->un_retry_bp) { 14948 /* 14949 * This command was retried one or more times. Show that we are 14950 * done with it, and allow processing of the waitq to resume. 14951 */ 14952 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14953 "sd_return_failed_command_no_restart: " 14954 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14955 un->un_retry_bp = NULL; 14956 un->un_retry_statp = NULL; 14957 } 14958 14959 SD_UPDATE_RDWR_STATS(un, bp); 14960 SD_UPDATE_PARTITION_STATS(un, bp); 14961 14962 mutex_exit(SD_MUTEX(un)); 14963 14964 if (xp->xb_pktp != NULL) { 14965 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14966 xp->xb_pktp = NULL; 14967 } 14968 14969 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14970 14971 mutex_enter(SD_MUTEX(un)); 14972 14973 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14974 "sd_return_failed_command_no_restart: exit\n"); 14975 } 14976 14977 14978 /* 14979 * Function: sd_retry_command 14980 * 14981 * Description: queue up a command for retry, or (optionally) fail it 14982 * if retry counts are exhausted. 14983 * 14984 * Arguments: un - Pointer to the sd_lun struct for the target. 14985 * 14986 * bp - Pointer to the buf for the command to be retried. 14987 * 14988 * retry_check_flag - Flag to see which (if any) of the retry 14989 * counts should be decremented/checked. If the indicated 14990 * retry count is exhausted, then the command will not be 14991 * retried; it will be failed instead. This should use a 14992 * value equal to one of the following: 14993 * 14994 * SD_RETRIES_NOCHECK 14995 * SD_RESD_RETRIES_STANDARD 14996 * SD_RETRIES_VICTIM 14997 * 14998 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14999 * if the check should be made to see of FLAG_ISOLATE is set 15000 * in the pkt. If FLAG_ISOLATE is set, then the command is 15001 * not retried, it is simply failed. 15002 * 15003 * user_funcp - Ptr to function to call before dispatching the 15004 * command. May be NULL if no action needs to be performed. 15005 * (Primarily intended for printing messages.) 15006 * 15007 * user_arg - Optional argument to be passed along to 15008 * the user_funcp call. 15009 * 15010 * failure_code - errno return code to set in the bp if the 15011 * command is going to be failed. 15012 * 15013 * retry_delay - Retry delay interval in (clock_t) units. May 15014 * be zero which indicates that the retry should be retried 15015 * immediately (ie, without an intervening delay). 15016 * 15017 * statp - Ptr to kstat function to be updated if the command 15018 * is queued for a delayed retry. May be NULL if no kstat 15019 * update is desired. 15020 * 15021 * Context: May be called from interrupt context. 15022 */ 15023 15024 static void 15025 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15026 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15027 code), void *user_arg, int failure_code, clock_t retry_delay, 15028 void (*statp)(kstat_io_t *)) 15029 { 15030 struct sd_xbuf *xp; 15031 struct scsi_pkt *pktp; 15032 struct sd_fm_internal *sfip; 15033 15034 ASSERT(un != NULL); 15035 ASSERT(mutex_owned(SD_MUTEX(un))); 15036 ASSERT(bp != NULL); 15037 xp = SD_GET_XBUF(bp); 15038 ASSERT(xp != NULL); 15039 pktp = SD_GET_PKTP(bp); 15040 ASSERT(pktp != NULL); 15041 15042 sfip = (struct sd_fm_internal *)un->un_fm_private; 15043 ASSERT(sfip != NULL); 15044 15045 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15046 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15047 15048 /* 15049 * If we are syncing or dumping, fail the command to avoid 15050 * recursively calling back into scsi_transport(). 15051 */ 15052 if (ddi_in_panic()) { 15053 goto fail_command_no_log; 15054 } 15055 15056 /* 15057 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15058 * log an error and fail the command. 15059 */ 15060 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15061 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15062 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15063 sd_dump_memory(un, SD_LOG_IO, "CDB", 15064 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15065 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15066 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15067 goto fail_command; 15068 } 15069 15070 /* 15071 * If we are suspended, then put the command onto head of the 15072 * wait queue since we don't want to start more commands, and 15073 * clear the un_retry_bp. Next time when we are resumed, will 15074 * handle the command in the wait queue. 15075 */ 15076 switch (un->un_state) { 15077 case SD_STATE_SUSPENDED: 15078 case SD_STATE_DUMPING: 15079 bp->av_forw = un->un_waitq_headp; 15080 un->un_waitq_headp = bp; 15081 if (un->un_waitq_tailp == NULL) { 15082 un->un_waitq_tailp = bp; 15083 } 15084 if (bp == un->un_retry_bp) { 15085 un->un_retry_bp = NULL; 15086 un->un_retry_statp = NULL; 15087 } 15088 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15089 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15090 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15091 return; 15092 default: 15093 break; 15094 } 15095 15096 /* 15097 * If the caller wants us to check FLAG_ISOLATE, then see if that 15098 * is set; if it is then we do not want to retry the command. 15099 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15100 */ 15101 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15102 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15103 goto fail_command; 15104 } 15105 } 15106 15107 15108 /* 15109 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15110 * command timeout or a selection timeout has occurred. This means 15111 * that we were unable to establish an kind of communication with 15112 * the target, and subsequent retries and/or commands are likely 15113 * to encounter similar results and take a long time to complete. 15114 * 15115 * If this is a failfast error condition, we need to update the 15116 * failfast state, even if this bp does not have B_FAILFAST set. 15117 */ 15118 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15119 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15120 ASSERT(un->un_failfast_bp == NULL); 15121 /* 15122 * If we are already in the active failfast state, and 15123 * another failfast error condition has been detected, 15124 * then fail this command if it has B_FAILFAST set. 15125 * If B_FAILFAST is clear, then maintain the legacy 15126 * behavior of retrying heroically, even tho this will 15127 * take a lot more time to fail the command. 15128 */ 15129 if (bp->b_flags & B_FAILFAST) { 15130 goto fail_command; 15131 } 15132 } else { 15133 /* 15134 * We're not in the active failfast state, but we 15135 * have a failfast error condition, so we must begin 15136 * transition to the next state. We do this regardless 15137 * of whether or not this bp has B_FAILFAST set. 15138 */ 15139 if (un->un_failfast_bp == NULL) { 15140 /* 15141 * This is the first bp to meet a failfast 15142 * condition so save it on un_failfast_bp & 15143 * do normal retry processing. Do not enter 15144 * active failfast state yet. This marks 15145 * entry into the "failfast pending" state. 15146 */ 15147 un->un_failfast_bp = bp; 15148 15149 } else if (un->un_failfast_bp == bp) { 15150 /* 15151 * This is the second time *this* bp has 15152 * encountered a failfast error condition, 15153 * so enter active failfast state & flush 15154 * queues as appropriate. 15155 */ 15156 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15157 un->un_failfast_bp = NULL; 15158 sd_failfast_flushq(un); 15159 15160 /* 15161 * Fail this bp now if B_FAILFAST set; 15162 * otherwise continue with retries. (It would 15163 * be pretty ironic if this bp succeeded on a 15164 * subsequent retry after we just flushed all 15165 * the queues). 15166 */ 15167 if (bp->b_flags & B_FAILFAST) { 15168 goto fail_command; 15169 } 15170 15171 #if !defined(lint) && !defined(__lint) 15172 } else { 15173 /* 15174 * If neither of the preceeding conditionals 15175 * was true, it means that there is some 15176 * *other* bp that has met an inital failfast 15177 * condition and is currently either being 15178 * retried or is waiting to be retried. In 15179 * that case we should perform normal retry 15180 * processing on *this* bp, since there is a 15181 * chance that the current failfast condition 15182 * is transient and recoverable. If that does 15183 * not turn out to be the case, then retries 15184 * will be cleared when the wait queue is 15185 * flushed anyway. 15186 */ 15187 #endif 15188 } 15189 } 15190 } else { 15191 /* 15192 * SD_RETRIES_FAILFAST is clear, which indicates that we 15193 * likely were able to at least establish some level of 15194 * communication with the target and subsequent commands 15195 * and/or retries are likely to get through to the target, 15196 * In this case we want to be aggressive about clearing 15197 * the failfast state. Note that this does not affect 15198 * the "failfast pending" condition. 15199 */ 15200 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15201 } 15202 15203 15204 /* 15205 * Check the specified retry count to see if we can still do 15206 * any retries with this pkt before we should fail it. 15207 */ 15208 switch (retry_check_flag & SD_RETRIES_MASK) { 15209 case SD_RETRIES_VICTIM: 15210 /* 15211 * Check the victim retry count. If exhausted, then fall 15212 * thru & check against the standard retry count. 15213 */ 15214 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15215 /* Increment count & proceed with the retry */ 15216 xp->xb_victim_retry_count++; 15217 break; 15218 } 15219 /* Victim retries exhausted, fall back to std. retries... */ 15220 /* FALLTHRU */ 15221 15222 case SD_RETRIES_STANDARD: 15223 if (xp->xb_retry_count >= un->un_retry_count) { 15224 /* Retries exhausted, fail the command */ 15225 SD_TRACE(SD_LOG_IO_CORE, un, 15226 "sd_retry_command: retries exhausted!\n"); 15227 /* 15228 * update b_resid for failed SCMD_READ & SCMD_WRITE 15229 * commands with nonzero pkt_resid. 15230 */ 15231 if ((pktp->pkt_reason == CMD_CMPLT) && 15232 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15233 (pktp->pkt_resid != 0)) { 15234 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15235 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15236 SD_UPDATE_B_RESID(bp, pktp); 15237 } 15238 } 15239 goto fail_command; 15240 } 15241 xp->xb_retry_count++; 15242 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15243 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15244 break; 15245 15246 case SD_RETRIES_UA: 15247 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15248 /* Retries exhausted, fail the command */ 15249 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15250 "Unit Attention retries exhausted. " 15251 "Check the target.\n"); 15252 goto fail_command; 15253 } 15254 xp->xb_ua_retry_count++; 15255 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15256 "sd_retry_command: retry count:%d\n", 15257 xp->xb_ua_retry_count); 15258 break; 15259 15260 case SD_RETRIES_BUSY: 15261 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15262 /* Retries exhausted, fail the command */ 15263 SD_TRACE(SD_LOG_IO_CORE, un, 15264 "sd_retry_command: retries exhausted!\n"); 15265 goto fail_command; 15266 } 15267 xp->xb_retry_count++; 15268 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15269 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15270 break; 15271 15272 case SD_RETRIES_NOCHECK: 15273 default: 15274 /* No retry count to check. Just proceed with the retry */ 15275 break; 15276 } 15277 15278 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15279 15280 /* 15281 * If this is a non-USCSI command being retried 15282 * during execution last time, we should post an ereport with 15283 * driver-assessment of the value "retry". 15284 * For partial DMA, request sense and STATUS_QFULL, there are no 15285 * hardware errors, we bypass ereport posting. 15286 */ 15287 if (failure_code != 0) { 15288 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15289 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15290 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15291 } 15292 } 15293 15294 /* 15295 * If we were given a zero timeout, we must attempt to retry the 15296 * command immediately (ie, without a delay). 15297 */ 15298 if (retry_delay == 0) { 15299 /* 15300 * Check some limiting conditions to see if we can actually 15301 * do the immediate retry. If we cannot, then we must 15302 * fall back to queueing up a delayed retry. 15303 */ 15304 if (un->un_ncmds_in_transport >= un->un_throttle) { 15305 /* 15306 * We are at the throttle limit for the target, 15307 * fall back to delayed retry. 15308 */ 15309 retry_delay = un->un_busy_timeout; 15310 statp = kstat_waitq_enter; 15311 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15312 "sd_retry_command: immed. retry hit " 15313 "throttle!\n"); 15314 } else { 15315 /* 15316 * We're clear to proceed with the immediate retry. 15317 * First call the user-provided function (if any) 15318 */ 15319 if (user_funcp != NULL) { 15320 (*user_funcp)(un, bp, user_arg, 15321 SD_IMMEDIATE_RETRY_ISSUED); 15322 #ifdef __lock_lint 15323 sd_print_incomplete_msg(un, bp, user_arg, 15324 SD_IMMEDIATE_RETRY_ISSUED); 15325 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15326 SD_IMMEDIATE_RETRY_ISSUED); 15327 sd_print_sense_failed_msg(un, bp, user_arg, 15328 SD_IMMEDIATE_RETRY_ISSUED); 15329 #endif 15330 } 15331 15332 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15333 "sd_retry_command: issuing immediate retry\n"); 15334 15335 /* 15336 * Call sd_start_cmds() to transport the command to 15337 * the target. 15338 */ 15339 sd_start_cmds(un, bp); 15340 15341 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15342 "sd_retry_command exit\n"); 15343 return; 15344 } 15345 } 15346 15347 /* 15348 * Set up to retry the command after a delay. 15349 * First call the user-provided function (if any) 15350 */ 15351 if (user_funcp != NULL) { 15352 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15353 } 15354 15355 sd_set_retry_bp(un, bp, retry_delay, statp); 15356 15357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15358 return; 15359 15360 fail_command: 15361 15362 if (user_funcp != NULL) { 15363 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15364 } 15365 15366 fail_command_no_log: 15367 15368 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15369 "sd_retry_command: returning failed command\n"); 15370 15371 sd_return_failed_command(un, bp, failure_code); 15372 15373 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15374 } 15375 15376 15377 /* 15378 * Function: sd_set_retry_bp 15379 * 15380 * Description: Set up the given bp for retry. 15381 * 15382 * Arguments: un - ptr to associated softstate 15383 * bp - ptr to buf(9S) for the command 15384 * retry_delay - time interval before issuing retry (may be 0) 15385 * statp - optional pointer to kstat function 15386 * 15387 * Context: May be called under interrupt context 15388 */ 15389 15390 static void 15391 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15392 void (*statp)(kstat_io_t *)) 15393 { 15394 ASSERT(un != NULL); 15395 ASSERT(mutex_owned(SD_MUTEX(un))); 15396 ASSERT(bp != NULL); 15397 15398 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15399 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15400 15401 /* 15402 * Indicate that the command is being retried. This will not allow any 15403 * other commands on the wait queue to be transported to the target 15404 * until this command has been completed (success or failure). The 15405 * "retry command" is not transported to the target until the given 15406 * time delay expires, unless the user specified a 0 retry_delay. 15407 * 15408 * Note: the timeout(9F) callback routine is what actually calls 15409 * sd_start_cmds() to transport the command, with the exception of a 15410 * zero retry_delay. The only current implementor of a zero retry delay 15411 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15412 */ 15413 if (un->un_retry_bp == NULL) { 15414 ASSERT(un->un_retry_statp == NULL); 15415 un->un_retry_bp = bp; 15416 15417 /* 15418 * If the user has not specified a delay the command should 15419 * be queued and no timeout should be scheduled. 15420 */ 15421 if (retry_delay == 0) { 15422 /* 15423 * Save the kstat pointer that will be used in the 15424 * call to SD_UPDATE_KSTATS() below, so that 15425 * sd_start_cmds() can correctly decrement the waitq 15426 * count when it is time to transport this command. 15427 */ 15428 un->un_retry_statp = statp; 15429 goto done; 15430 } 15431 } 15432 15433 if (un->un_retry_bp == bp) { 15434 /* 15435 * Save the kstat pointer that will be used in the call to 15436 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15437 * correctly decrement the waitq count when it is time to 15438 * transport this command. 15439 */ 15440 un->un_retry_statp = statp; 15441 15442 /* 15443 * Schedule a timeout if: 15444 * 1) The user has specified a delay. 15445 * 2) There is not a START_STOP_UNIT callback pending. 15446 * 15447 * If no delay has been specified, then it is up to the caller 15448 * to ensure that IO processing continues without stalling. 15449 * Effectively, this means that the caller will issue the 15450 * required call to sd_start_cmds(). The START_STOP_UNIT 15451 * callback does this after the START STOP UNIT command has 15452 * completed. In either of these cases we should not schedule 15453 * a timeout callback here. Also don't schedule the timeout if 15454 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15455 */ 15456 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15457 (un->un_direct_priority_timeid == NULL)) { 15458 un->un_retry_timeid = 15459 timeout(sd_start_retry_command, un, retry_delay); 15460 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15461 "sd_set_retry_bp: setting timeout: un: 0x%p" 15462 " bp:0x%p un_retry_timeid:0x%p\n", 15463 un, bp, un->un_retry_timeid); 15464 } 15465 } else { 15466 /* 15467 * We only get in here if there is already another command 15468 * waiting to be retried. In this case, we just put the 15469 * given command onto the wait queue, so it can be transported 15470 * after the current retry command has completed. 15471 * 15472 * Also we have to make sure that if the command at the head 15473 * of the wait queue is the un_failfast_bp, that we do not 15474 * put ahead of it any other commands that are to be retried. 15475 */ 15476 if ((un->un_failfast_bp != NULL) && 15477 (un->un_failfast_bp == un->un_waitq_headp)) { 15478 /* 15479 * Enqueue this command AFTER the first command on 15480 * the wait queue (which is also un_failfast_bp). 15481 */ 15482 bp->av_forw = un->un_waitq_headp->av_forw; 15483 un->un_waitq_headp->av_forw = bp; 15484 if (un->un_waitq_headp == un->un_waitq_tailp) { 15485 un->un_waitq_tailp = bp; 15486 } 15487 } else { 15488 /* Enqueue this command at the head of the waitq. */ 15489 bp->av_forw = un->un_waitq_headp; 15490 un->un_waitq_headp = bp; 15491 if (un->un_waitq_tailp == NULL) { 15492 un->un_waitq_tailp = bp; 15493 } 15494 } 15495 15496 if (statp == NULL) { 15497 statp = kstat_waitq_enter; 15498 } 15499 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15500 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15501 } 15502 15503 done: 15504 if (statp != NULL) { 15505 SD_UPDATE_KSTATS(un, statp, bp); 15506 } 15507 15508 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15509 "sd_set_retry_bp: exit un:0x%p\n", un); 15510 } 15511 15512 15513 /* 15514 * Function: sd_start_retry_command 15515 * 15516 * Description: Start the command that has been waiting on the target's 15517 * retry queue. Called from timeout(9F) context after the 15518 * retry delay interval has expired. 15519 * 15520 * Arguments: arg - pointer to associated softstate for the device. 15521 * 15522 * Context: timeout(9F) thread context. May not sleep. 15523 */ 15524 15525 static void 15526 sd_start_retry_command(void *arg) 15527 { 15528 struct sd_lun *un = arg; 15529 15530 ASSERT(un != NULL); 15531 ASSERT(!mutex_owned(SD_MUTEX(un))); 15532 15533 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15534 "sd_start_retry_command: entry\n"); 15535 15536 mutex_enter(SD_MUTEX(un)); 15537 15538 un->un_retry_timeid = NULL; 15539 15540 if (un->un_retry_bp != NULL) { 15541 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15542 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15543 un, un->un_retry_bp); 15544 sd_start_cmds(un, un->un_retry_bp); 15545 } 15546 15547 mutex_exit(SD_MUTEX(un)); 15548 15549 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15550 "sd_start_retry_command: exit\n"); 15551 } 15552 15553 15554 /* 15555 * Function: sd_start_direct_priority_command 15556 * 15557 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15558 * received TRAN_BUSY when we called scsi_transport() to send it 15559 * to the underlying HBA. This function is called from timeout(9F) 15560 * context after the delay interval has expired. 15561 * 15562 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15563 * 15564 * Context: timeout(9F) thread context. May not sleep. 15565 */ 15566 15567 static void 15568 sd_start_direct_priority_command(void *arg) 15569 { 15570 struct buf *priority_bp = arg; 15571 struct sd_lun *un; 15572 15573 ASSERT(priority_bp != NULL); 15574 un = SD_GET_UN(priority_bp); 15575 ASSERT(un != NULL); 15576 ASSERT(!mutex_owned(SD_MUTEX(un))); 15577 15578 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15579 "sd_start_direct_priority_command: entry\n"); 15580 15581 mutex_enter(SD_MUTEX(un)); 15582 un->un_direct_priority_timeid = NULL; 15583 sd_start_cmds(un, priority_bp); 15584 mutex_exit(SD_MUTEX(un)); 15585 15586 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15587 "sd_start_direct_priority_command: exit\n"); 15588 } 15589 15590 15591 /* 15592 * Function: sd_send_request_sense_command 15593 * 15594 * Description: Sends a REQUEST SENSE command to the target 15595 * 15596 * Context: May be called from interrupt context. 15597 */ 15598 15599 static void 15600 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15601 struct scsi_pkt *pktp) 15602 { 15603 ASSERT(bp != NULL); 15604 ASSERT(un != NULL); 15605 ASSERT(mutex_owned(SD_MUTEX(un))); 15606 15607 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15608 "entry: buf:0x%p\n", bp); 15609 15610 /* 15611 * If we are syncing or dumping, then fail the command to avoid a 15612 * recursive callback into scsi_transport(). Also fail the command 15613 * if we are suspended (legacy behavior). 15614 */ 15615 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15616 (un->un_state == SD_STATE_DUMPING)) { 15617 sd_return_failed_command(un, bp, EIO); 15618 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15619 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15620 return; 15621 } 15622 15623 /* 15624 * Retry the failed command and don't issue the request sense if: 15625 * 1) the sense buf is busy 15626 * 2) we have 1 or more outstanding commands on the target 15627 * (the sense data will be cleared or invalidated any way) 15628 * 15629 * Note: There could be an issue with not checking a retry limit here, 15630 * the problem is determining which retry limit to check. 15631 */ 15632 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15633 /* Don't retry if the command is flagged as non-retryable */ 15634 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15635 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15636 NULL, NULL, 0, un->un_busy_timeout, 15637 kstat_waitq_enter); 15638 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15639 "sd_send_request_sense_command: " 15640 "at full throttle, retrying exit\n"); 15641 } else { 15642 sd_return_failed_command(un, bp, EIO); 15643 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15644 "sd_send_request_sense_command: " 15645 "at full throttle, non-retryable exit\n"); 15646 } 15647 return; 15648 } 15649 15650 sd_mark_rqs_busy(un, bp); 15651 sd_start_cmds(un, un->un_rqs_bp); 15652 15653 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15654 "sd_send_request_sense_command: exit\n"); 15655 } 15656 15657 15658 /* 15659 * Function: sd_mark_rqs_busy 15660 * 15661 * Description: Indicate that the request sense bp for this instance is 15662 * in use. 15663 * 15664 * Context: May be called under interrupt context 15665 */ 15666 15667 static void 15668 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15669 { 15670 struct sd_xbuf *sense_xp; 15671 15672 ASSERT(un != NULL); 15673 ASSERT(bp != NULL); 15674 ASSERT(mutex_owned(SD_MUTEX(un))); 15675 ASSERT(un->un_sense_isbusy == 0); 15676 15677 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15678 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15679 15680 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15681 ASSERT(sense_xp != NULL); 15682 15683 SD_INFO(SD_LOG_IO, un, 15684 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15685 15686 ASSERT(sense_xp->xb_pktp != NULL); 15687 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15688 == (FLAG_SENSING | FLAG_HEAD)); 15689 15690 un->un_sense_isbusy = 1; 15691 un->un_rqs_bp->b_resid = 0; 15692 sense_xp->xb_pktp->pkt_resid = 0; 15693 sense_xp->xb_pktp->pkt_reason = 0; 15694 15695 /* So we can get back the bp at interrupt time! */ 15696 sense_xp->xb_sense_bp = bp; 15697 15698 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15699 15700 /* 15701 * Mark this buf as awaiting sense data. (This is already set in 15702 * the pkt_flags for the RQS packet.) 15703 */ 15704 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15705 15706 /* Request sense down same path */ 15707 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15708 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15709 sense_xp->xb_pktp->pkt_path_instance = 15710 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15711 15712 sense_xp->xb_retry_count = 0; 15713 sense_xp->xb_victim_retry_count = 0; 15714 sense_xp->xb_ua_retry_count = 0; 15715 sense_xp->xb_nr_retry_count = 0; 15716 sense_xp->xb_dma_resid = 0; 15717 15718 /* Clean up the fields for auto-request sense */ 15719 sense_xp->xb_sense_status = 0; 15720 sense_xp->xb_sense_state = 0; 15721 sense_xp->xb_sense_resid = 0; 15722 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15723 15724 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15725 } 15726 15727 15728 /* 15729 * Function: sd_mark_rqs_idle 15730 * 15731 * Description: SD_MUTEX must be held continuously through this routine 15732 * to prevent reuse of the rqs struct before the caller can 15733 * complete it's processing. 15734 * 15735 * Return Code: Pointer to the RQS buf 15736 * 15737 * Context: May be called under interrupt context 15738 */ 15739 15740 static struct buf * 15741 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15742 { 15743 struct buf *bp; 15744 ASSERT(un != NULL); 15745 ASSERT(sense_xp != NULL); 15746 ASSERT(mutex_owned(SD_MUTEX(un))); 15747 ASSERT(un->un_sense_isbusy != 0); 15748 15749 un->un_sense_isbusy = 0; 15750 bp = sense_xp->xb_sense_bp; 15751 sense_xp->xb_sense_bp = NULL; 15752 15753 /* This pkt is no longer interested in getting sense data */ 15754 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15755 15756 return (bp); 15757 } 15758 15759 15760 15761 /* 15762 * Function: sd_alloc_rqs 15763 * 15764 * Description: Set up the unit to receive auto request sense data 15765 * 15766 * Return Code: DDI_SUCCESS or DDI_FAILURE 15767 * 15768 * Context: Called under attach(9E) context 15769 */ 15770 15771 static int 15772 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15773 { 15774 struct sd_xbuf *xp; 15775 15776 ASSERT(un != NULL); 15777 ASSERT(!mutex_owned(SD_MUTEX(un))); 15778 ASSERT(un->un_rqs_bp == NULL); 15779 ASSERT(un->un_rqs_pktp == NULL); 15780 15781 /* 15782 * First allocate the required buf and scsi_pkt structs, then set up 15783 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15784 */ 15785 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15786 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15787 if (un->un_rqs_bp == NULL) { 15788 return (DDI_FAILURE); 15789 } 15790 15791 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15792 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15793 15794 if (un->un_rqs_pktp == NULL) { 15795 sd_free_rqs(un); 15796 return (DDI_FAILURE); 15797 } 15798 15799 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15800 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15801 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15802 15803 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15804 15805 /* Set up the other needed members in the ARQ scsi_pkt. */ 15806 un->un_rqs_pktp->pkt_comp = sdintr; 15807 un->un_rqs_pktp->pkt_time = sd_io_time; 15808 un->un_rqs_pktp->pkt_flags |= 15809 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15810 15811 /* 15812 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15813 * provide any intpkt, destroypkt routines as we take care of 15814 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15815 */ 15816 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15817 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15818 xp->xb_pktp = un->un_rqs_pktp; 15819 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15820 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15821 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15822 15823 /* 15824 * Save the pointer to the request sense private bp so it can 15825 * be retrieved in sdintr. 15826 */ 15827 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15828 ASSERT(un->un_rqs_bp->b_private == xp); 15829 15830 /* 15831 * See if the HBA supports auto-request sense for the specified 15832 * target/lun. If it does, then try to enable it (if not already 15833 * enabled). 15834 * 15835 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15836 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15837 * return success. However, in both of these cases ARQ is always 15838 * enabled and scsi_ifgetcap will always return true. The best approach 15839 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15840 * 15841 * The 3rd case is the HBA (adp) always return enabled on 15842 * scsi_ifgetgetcap even when it's not enable, the best approach 15843 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15844 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15845 */ 15846 15847 if (un->un_f_is_fibre == TRUE) { 15848 un->un_f_arq_enabled = TRUE; 15849 } else { 15850 #if defined(__i386) || defined(__amd64) 15851 /* 15852 * Circumvent the Adaptec bug, remove this code when 15853 * the bug is fixed 15854 */ 15855 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15856 #endif 15857 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15858 case 0: 15859 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15860 "sd_alloc_rqs: HBA supports ARQ\n"); 15861 /* 15862 * ARQ is supported by this HBA but currently is not 15863 * enabled. Attempt to enable it and if successful then 15864 * mark this instance as ARQ enabled. 15865 */ 15866 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15867 == 1) { 15868 /* Successfully enabled ARQ in the HBA */ 15869 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15870 "sd_alloc_rqs: ARQ enabled\n"); 15871 un->un_f_arq_enabled = TRUE; 15872 } else { 15873 /* Could not enable ARQ in the HBA */ 15874 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15875 "sd_alloc_rqs: failed ARQ enable\n"); 15876 un->un_f_arq_enabled = FALSE; 15877 } 15878 break; 15879 case 1: 15880 /* 15881 * ARQ is supported by this HBA and is already enabled. 15882 * Just mark ARQ as enabled for this instance. 15883 */ 15884 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15885 "sd_alloc_rqs: ARQ already enabled\n"); 15886 un->un_f_arq_enabled = TRUE; 15887 break; 15888 default: 15889 /* 15890 * ARQ is not supported by this HBA; disable it for this 15891 * instance. 15892 */ 15893 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15894 "sd_alloc_rqs: HBA does not support ARQ\n"); 15895 un->un_f_arq_enabled = FALSE; 15896 break; 15897 } 15898 } 15899 15900 return (DDI_SUCCESS); 15901 } 15902 15903 15904 /* 15905 * Function: sd_free_rqs 15906 * 15907 * Description: Cleanup for the pre-instance RQS command. 15908 * 15909 * Context: Kernel thread context 15910 */ 15911 15912 static void 15913 sd_free_rqs(struct sd_lun *un) 15914 { 15915 ASSERT(un != NULL); 15916 15917 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15918 15919 /* 15920 * If consistent memory is bound to a scsi_pkt, the pkt 15921 * has to be destroyed *before* freeing the consistent memory. 15922 * Don't change the sequence of this operations. 15923 * scsi_destroy_pkt() might access memory, which isn't allowed, 15924 * after it was freed in scsi_free_consistent_buf(). 15925 */ 15926 if (un->un_rqs_pktp != NULL) { 15927 scsi_destroy_pkt(un->un_rqs_pktp); 15928 un->un_rqs_pktp = NULL; 15929 } 15930 15931 if (un->un_rqs_bp != NULL) { 15932 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15933 if (xp != NULL) { 15934 kmem_free(xp, sizeof (struct sd_xbuf)); 15935 } 15936 scsi_free_consistent_buf(un->un_rqs_bp); 15937 un->un_rqs_bp = NULL; 15938 } 15939 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15940 } 15941 15942 15943 15944 /* 15945 * Function: sd_reduce_throttle 15946 * 15947 * Description: Reduces the maximum # of outstanding commands on a 15948 * target to the current number of outstanding commands. 15949 * Queues a tiemout(9F) callback to restore the limit 15950 * after a specified interval has elapsed. 15951 * Typically used when we get a TRAN_BUSY return code 15952 * back from scsi_transport(). 15953 * 15954 * Arguments: un - ptr to the sd_lun softstate struct 15955 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15956 * 15957 * Context: May be called from interrupt context 15958 */ 15959 15960 static void 15961 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15962 { 15963 ASSERT(un != NULL); 15964 ASSERT(mutex_owned(SD_MUTEX(un))); 15965 ASSERT(un->un_ncmds_in_transport >= 0); 15966 15967 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15968 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15969 un, un->un_throttle, un->un_ncmds_in_transport); 15970 15971 if (un->un_throttle > 1) { 15972 if (un->un_f_use_adaptive_throttle == TRUE) { 15973 switch (throttle_type) { 15974 case SD_THROTTLE_TRAN_BUSY: 15975 if (un->un_busy_throttle == 0) { 15976 un->un_busy_throttle = un->un_throttle; 15977 } 15978 break; 15979 case SD_THROTTLE_QFULL: 15980 un->un_busy_throttle = 0; 15981 break; 15982 default: 15983 ASSERT(FALSE); 15984 } 15985 15986 if (un->un_ncmds_in_transport > 0) { 15987 un->un_throttle = un->un_ncmds_in_transport; 15988 } 15989 15990 } else { 15991 if (un->un_ncmds_in_transport == 0) { 15992 un->un_throttle = 1; 15993 } else { 15994 un->un_throttle = un->un_ncmds_in_transport; 15995 } 15996 } 15997 } 15998 15999 /* Reschedule the timeout if none is currently active */ 16000 if (un->un_reset_throttle_timeid == NULL) { 16001 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16002 un, SD_THROTTLE_RESET_INTERVAL); 16003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16004 "sd_reduce_throttle: timeout scheduled!\n"); 16005 } 16006 16007 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16008 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16009 } 16010 16011 16012 16013 /* 16014 * Function: sd_restore_throttle 16015 * 16016 * Description: Callback function for timeout(9F). Resets the current 16017 * value of un->un_throttle to its default. 16018 * 16019 * Arguments: arg - pointer to associated softstate for the device. 16020 * 16021 * Context: May be called from interrupt context 16022 */ 16023 16024 static void 16025 sd_restore_throttle(void *arg) 16026 { 16027 struct sd_lun *un = arg; 16028 16029 ASSERT(un != NULL); 16030 ASSERT(!mutex_owned(SD_MUTEX(un))); 16031 16032 mutex_enter(SD_MUTEX(un)); 16033 16034 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16035 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16036 16037 un->un_reset_throttle_timeid = NULL; 16038 16039 if (un->un_f_use_adaptive_throttle == TRUE) { 16040 /* 16041 * If un_busy_throttle is nonzero, then it contains the 16042 * value that un_throttle was when we got a TRAN_BUSY back 16043 * from scsi_transport(). We want to revert back to this 16044 * value. 16045 * 16046 * In the QFULL case, the throttle limit will incrementally 16047 * increase until it reaches max throttle. 16048 */ 16049 if (un->un_busy_throttle > 0) { 16050 un->un_throttle = un->un_busy_throttle; 16051 un->un_busy_throttle = 0; 16052 } else { 16053 /* 16054 * increase throttle by 10% open gate slowly, schedule 16055 * another restore if saved throttle has not been 16056 * reached 16057 */ 16058 short throttle; 16059 if (sd_qfull_throttle_enable) { 16060 throttle = un->un_throttle + 16061 max((un->un_throttle / 10), 1); 16062 un->un_throttle = 16063 (throttle < un->un_saved_throttle) ? 16064 throttle : un->un_saved_throttle; 16065 if (un->un_throttle < un->un_saved_throttle) { 16066 un->un_reset_throttle_timeid = 16067 timeout(sd_restore_throttle, 16068 un, 16069 SD_QFULL_THROTTLE_RESET_INTERVAL); 16070 } 16071 } 16072 } 16073 16074 /* 16075 * If un_throttle has fallen below the low-water mark, we 16076 * restore the maximum value here (and allow it to ratchet 16077 * down again if necessary). 16078 */ 16079 if (un->un_throttle < un->un_min_throttle) { 16080 un->un_throttle = un->un_saved_throttle; 16081 } 16082 } else { 16083 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16084 "restoring limit from 0x%x to 0x%x\n", 16085 un->un_throttle, un->un_saved_throttle); 16086 un->un_throttle = un->un_saved_throttle; 16087 } 16088 16089 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16090 "sd_restore_throttle: calling sd_start_cmds!\n"); 16091 16092 sd_start_cmds(un, NULL); 16093 16094 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16095 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16096 un, un->un_throttle); 16097 16098 mutex_exit(SD_MUTEX(un)); 16099 16100 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16101 } 16102 16103 /* 16104 * Function: sdrunout 16105 * 16106 * Description: Callback routine for scsi_init_pkt when a resource allocation 16107 * fails. 16108 * 16109 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16110 * soft state instance. 16111 * 16112 * Return Code: The scsi_init_pkt routine allows for the callback function to 16113 * return a 0 indicating the callback should be rescheduled or a 1 16114 * indicating not to reschedule. This routine always returns 1 16115 * because the driver always provides a callback function to 16116 * scsi_init_pkt. This results in a callback always being scheduled 16117 * (via the scsi_init_pkt callback implementation) if a resource 16118 * failure occurs. 16119 * 16120 * Context: This callback function may not block or call routines that block 16121 * 16122 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16123 * request persisting at the head of the list which cannot be 16124 * satisfied even after multiple retries. In the future the driver 16125 * may implement some time of maximum runout count before failing 16126 * an I/O. 16127 */ 16128 16129 static int 16130 sdrunout(caddr_t arg) 16131 { 16132 struct sd_lun *un = (struct sd_lun *)arg; 16133 16134 ASSERT(un != NULL); 16135 ASSERT(!mutex_owned(SD_MUTEX(un))); 16136 16137 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16138 16139 mutex_enter(SD_MUTEX(un)); 16140 sd_start_cmds(un, NULL); 16141 mutex_exit(SD_MUTEX(un)); 16142 /* 16143 * This callback routine always returns 1 (i.e. do not reschedule) 16144 * because we always specify sdrunout as the callback handler for 16145 * scsi_init_pkt inside the call to sd_start_cmds. 16146 */ 16147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16148 return (1); 16149 } 16150 16151 16152 /* 16153 * Function: sdintr 16154 * 16155 * Description: Completion callback routine for scsi_pkt(9S) structs 16156 * sent to the HBA driver via scsi_transport(9F). 16157 * 16158 * Context: Interrupt context 16159 */ 16160 16161 static void 16162 sdintr(struct scsi_pkt *pktp) 16163 { 16164 struct buf *bp; 16165 struct sd_xbuf *xp; 16166 struct sd_lun *un; 16167 size_t actual_len; 16168 sd_ssc_t *sscp; 16169 16170 ASSERT(pktp != NULL); 16171 bp = (struct buf *)pktp->pkt_private; 16172 ASSERT(bp != NULL); 16173 xp = SD_GET_XBUF(bp); 16174 ASSERT(xp != NULL); 16175 ASSERT(xp->xb_pktp != NULL); 16176 un = SD_GET_UN(bp); 16177 ASSERT(un != NULL); 16178 ASSERT(!mutex_owned(SD_MUTEX(un))); 16179 16180 #ifdef SD_FAULT_INJECTION 16181 16182 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16183 /* SD FaultInjection */ 16184 sd_faultinjection(pktp); 16185 16186 #endif /* SD_FAULT_INJECTION */ 16187 16188 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16189 " xp:0x%p, un:0x%p\n", bp, xp, un); 16190 16191 mutex_enter(SD_MUTEX(un)); 16192 16193 ASSERT(un->un_fm_private != NULL); 16194 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16195 ASSERT(sscp != NULL); 16196 16197 /* Reduce the count of the #commands currently in transport */ 16198 un->un_ncmds_in_transport--; 16199 ASSERT(un->un_ncmds_in_transport >= 0); 16200 16201 /* Increment counter to indicate that the callback routine is active */ 16202 un->un_in_callback++; 16203 16204 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16205 16206 #ifdef SDDEBUG 16207 if (bp == un->un_retry_bp) { 16208 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16209 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16210 un, un->un_retry_bp, un->un_ncmds_in_transport); 16211 } 16212 #endif 16213 16214 /* 16215 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16216 * state if needed. 16217 */ 16218 if (pktp->pkt_reason == CMD_DEV_GONE) { 16219 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16220 "Command failed to complete...Device is gone\n"); 16221 if (un->un_mediastate != DKIO_DEV_GONE) { 16222 un->un_mediastate = DKIO_DEV_GONE; 16223 cv_broadcast(&un->un_state_cv); 16224 } 16225 sd_return_failed_command(un, bp, EIO); 16226 goto exit; 16227 } 16228 16229 if (pktp->pkt_state & STATE_XARQ_DONE) { 16230 SD_TRACE(SD_LOG_COMMON, un, 16231 "sdintr: extra sense data received. pkt=%p\n", pktp); 16232 } 16233 16234 /* 16235 * First see if the pkt has auto-request sense data with it.... 16236 * Look at the packet state first so we don't take a performance 16237 * hit looking at the arq enabled flag unless absolutely necessary. 16238 */ 16239 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16240 (un->un_f_arq_enabled == TRUE)) { 16241 /* 16242 * The HBA did an auto request sense for this command so check 16243 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16244 * driver command that should not be retried. 16245 */ 16246 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16247 /* 16248 * Save the relevant sense info into the xp for the 16249 * original cmd. 16250 */ 16251 struct scsi_arq_status *asp; 16252 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16253 xp->xb_sense_status = 16254 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16255 xp->xb_sense_state = asp->sts_rqpkt_state; 16256 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16257 if (pktp->pkt_state & STATE_XARQ_DONE) { 16258 actual_len = MAX_SENSE_LENGTH - 16259 xp->xb_sense_resid; 16260 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16261 MAX_SENSE_LENGTH); 16262 } else { 16263 if (xp->xb_sense_resid > SENSE_LENGTH) { 16264 actual_len = MAX_SENSE_LENGTH - 16265 xp->xb_sense_resid; 16266 } else { 16267 actual_len = SENSE_LENGTH - 16268 xp->xb_sense_resid; 16269 } 16270 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16271 if ((((struct uscsi_cmd *) 16272 (xp->xb_pktinfo))->uscsi_rqlen) > 16273 actual_len) { 16274 xp->xb_sense_resid = 16275 (((struct uscsi_cmd *) 16276 (xp->xb_pktinfo))-> 16277 uscsi_rqlen) - actual_len; 16278 } else { 16279 xp->xb_sense_resid = 0; 16280 } 16281 } 16282 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16283 SENSE_LENGTH); 16284 } 16285 16286 /* fail the command */ 16287 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16288 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16289 sd_return_failed_command(un, bp, EIO); 16290 goto exit; 16291 } 16292 16293 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16294 /* 16295 * We want to either retry or fail this command, so free 16296 * the DMA resources here. If we retry the command then 16297 * the DMA resources will be reallocated in sd_start_cmds(). 16298 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16299 * causes the *entire* transfer to start over again from the 16300 * beginning of the request, even for PARTIAL chunks that 16301 * have already transferred successfully. 16302 */ 16303 if ((un->un_f_is_fibre == TRUE) && 16304 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16305 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16306 scsi_dmafree(pktp); 16307 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16308 } 16309 #endif 16310 16311 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16312 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16313 16314 sd_handle_auto_request_sense(un, bp, xp, pktp); 16315 goto exit; 16316 } 16317 16318 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16319 if (pktp->pkt_flags & FLAG_SENSING) { 16320 /* This pktp is from the unit's REQUEST_SENSE command */ 16321 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16322 "sdintr: sd_handle_request_sense\n"); 16323 sd_handle_request_sense(un, bp, xp, pktp); 16324 goto exit; 16325 } 16326 16327 /* 16328 * Check to see if the command successfully completed as requested; 16329 * this is the most common case (and also the hot performance path). 16330 * 16331 * Requirements for successful completion are: 16332 * pkt_reason is CMD_CMPLT and packet status is status good. 16333 * In addition: 16334 * - A residual of zero indicates successful completion no matter what 16335 * the command is. 16336 * - If the residual is not zero and the command is not a read or 16337 * write, then it's still defined as successful completion. In other 16338 * words, if the command is a read or write the residual must be 16339 * zero for successful completion. 16340 * - If the residual is not zero and the command is a read or 16341 * write, and it's a USCSICMD, then it's still defined as 16342 * successful completion. 16343 */ 16344 if ((pktp->pkt_reason == CMD_CMPLT) && 16345 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16346 16347 /* 16348 * Since this command is returned with a good status, we 16349 * can reset the count for Sonoma failover. 16350 */ 16351 un->un_sonoma_failure_count = 0; 16352 16353 /* 16354 * Return all USCSI commands on good status 16355 */ 16356 if (pktp->pkt_resid == 0) { 16357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16358 "sdintr: returning command for resid == 0\n"); 16359 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16360 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16361 SD_UPDATE_B_RESID(bp, pktp); 16362 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16363 "sdintr: returning command for resid != 0\n"); 16364 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16365 SD_UPDATE_B_RESID(bp, pktp); 16366 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16367 "sdintr: returning uscsi command\n"); 16368 } else { 16369 goto not_successful; 16370 } 16371 sd_return_command(un, bp); 16372 16373 /* 16374 * Decrement counter to indicate that the callback routine 16375 * is done. 16376 */ 16377 un->un_in_callback--; 16378 ASSERT(un->un_in_callback >= 0); 16379 mutex_exit(SD_MUTEX(un)); 16380 16381 return; 16382 } 16383 16384 not_successful: 16385 16386 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16387 /* 16388 * The following is based upon knowledge of the underlying transport 16389 * and its use of DMA resources. This code should be removed when 16390 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16391 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16392 * and sd_start_cmds(). 16393 * 16394 * Free any DMA resources associated with this command if there 16395 * is a chance it could be retried or enqueued for later retry. 16396 * If we keep the DMA binding then mpxio cannot reissue the 16397 * command on another path whenever a path failure occurs. 16398 * 16399 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16400 * causes the *entire* transfer to start over again from the 16401 * beginning of the request, even for PARTIAL chunks that 16402 * have already transferred successfully. 16403 * 16404 * This is only done for non-uscsi commands (and also skipped for the 16405 * driver's internal RQS command). Also just do this for Fibre Channel 16406 * devices as these are the only ones that support mpxio. 16407 */ 16408 if ((un->un_f_is_fibre == TRUE) && 16409 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16410 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16411 scsi_dmafree(pktp); 16412 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16413 } 16414 #endif 16415 16416 /* 16417 * The command did not successfully complete as requested so check 16418 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16419 * driver command that should not be retried so just return. If 16420 * FLAG_DIAGNOSE is not set the error will be processed below. 16421 */ 16422 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16423 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16424 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16425 /* 16426 * Issue a request sense if a check condition caused the error 16427 * (we handle the auto request sense case above), otherwise 16428 * just fail the command. 16429 */ 16430 if ((pktp->pkt_reason == CMD_CMPLT) && 16431 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16432 sd_send_request_sense_command(un, bp, pktp); 16433 } else { 16434 sd_return_failed_command(un, bp, EIO); 16435 } 16436 goto exit; 16437 } 16438 16439 /* 16440 * The command did not successfully complete as requested so process 16441 * the error, retry, and/or attempt recovery. 16442 */ 16443 switch (pktp->pkt_reason) { 16444 case CMD_CMPLT: 16445 switch (SD_GET_PKT_STATUS(pktp)) { 16446 case STATUS_GOOD: 16447 /* 16448 * The command completed successfully with a non-zero 16449 * residual 16450 */ 16451 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16452 "sdintr: STATUS_GOOD \n"); 16453 sd_pkt_status_good(un, bp, xp, pktp); 16454 break; 16455 16456 case STATUS_CHECK: 16457 case STATUS_TERMINATED: 16458 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16459 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16460 sd_pkt_status_check_condition(un, bp, xp, pktp); 16461 break; 16462 16463 case STATUS_BUSY: 16464 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16465 "sdintr: STATUS_BUSY\n"); 16466 sd_pkt_status_busy(un, bp, xp, pktp); 16467 break; 16468 16469 case STATUS_RESERVATION_CONFLICT: 16470 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16471 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16472 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16473 break; 16474 16475 case STATUS_QFULL: 16476 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16477 "sdintr: STATUS_QFULL\n"); 16478 sd_pkt_status_qfull(un, bp, xp, pktp); 16479 break; 16480 16481 case STATUS_MET: 16482 case STATUS_INTERMEDIATE: 16483 case STATUS_SCSI2: 16484 case STATUS_INTERMEDIATE_MET: 16485 case STATUS_ACA_ACTIVE: 16486 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16487 "Unexpected SCSI status received: 0x%x\n", 16488 SD_GET_PKT_STATUS(pktp)); 16489 /* 16490 * Mark the ssc_flags when detected invalid status 16491 * code for non-USCSI command. 16492 */ 16493 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16494 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16495 0, "stat-code"); 16496 } 16497 sd_return_failed_command(un, bp, EIO); 16498 break; 16499 16500 default: 16501 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16502 "Invalid SCSI status received: 0x%x\n", 16503 SD_GET_PKT_STATUS(pktp)); 16504 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16505 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16506 0, "stat-code"); 16507 } 16508 sd_return_failed_command(un, bp, EIO); 16509 break; 16510 16511 } 16512 break; 16513 16514 case CMD_INCOMPLETE: 16515 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16516 "sdintr: CMD_INCOMPLETE\n"); 16517 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16518 break; 16519 case CMD_TRAN_ERR: 16520 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16521 "sdintr: CMD_TRAN_ERR\n"); 16522 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16523 break; 16524 case CMD_RESET: 16525 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16526 "sdintr: CMD_RESET \n"); 16527 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16528 break; 16529 case CMD_ABORTED: 16530 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16531 "sdintr: CMD_ABORTED \n"); 16532 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16533 break; 16534 case CMD_TIMEOUT: 16535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16536 "sdintr: CMD_TIMEOUT\n"); 16537 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16538 break; 16539 case CMD_UNX_BUS_FREE: 16540 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16541 "sdintr: CMD_UNX_BUS_FREE \n"); 16542 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16543 break; 16544 case CMD_TAG_REJECT: 16545 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16546 "sdintr: CMD_TAG_REJECT\n"); 16547 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16548 break; 16549 default: 16550 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16551 "sdintr: default\n"); 16552 /* 16553 * Mark the ssc_flags for detecting invliad pkt_reason. 16554 */ 16555 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16556 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16557 0, "pkt-reason"); 16558 } 16559 sd_pkt_reason_default(un, bp, xp, pktp); 16560 break; 16561 } 16562 16563 exit: 16564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16565 16566 /* Decrement counter to indicate that the callback routine is done. */ 16567 un->un_in_callback--; 16568 ASSERT(un->un_in_callback >= 0); 16569 16570 /* 16571 * At this point, the pkt has been dispatched, ie, it is either 16572 * being re-tried or has been returned to its caller and should 16573 * not be referenced. 16574 */ 16575 16576 mutex_exit(SD_MUTEX(un)); 16577 } 16578 16579 16580 /* 16581 * Function: sd_print_incomplete_msg 16582 * 16583 * Description: Prints the error message for a CMD_INCOMPLETE error. 16584 * 16585 * Arguments: un - ptr to associated softstate for the device. 16586 * bp - ptr to the buf(9S) for the command. 16587 * arg - message string ptr 16588 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16589 * or SD_NO_RETRY_ISSUED. 16590 * 16591 * Context: May be called under interrupt context 16592 */ 16593 16594 static void 16595 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16596 { 16597 struct scsi_pkt *pktp; 16598 char *msgp; 16599 char *cmdp = arg; 16600 16601 ASSERT(un != NULL); 16602 ASSERT(mutex_owned(SD_MUTEX(un))); 16603 ASSERT(bp != NULL); 16604 ASSERT(arg != NULL); 16605 pktp = SD_GET_PKTP(bp); 16606 ASSERT(pktp != NULL); 16607 16608 switch (code) { 16609 case SD_DELAYED_RETRY_ISSUED: 16610 case SD_IMMEDIATE_RETRY_ISSUED: 16611 msgp = "retrying"; 16612 break; 16613 case SD_NO_RETRY_ISSUED: 16614 default: 16615 msgp = "giving up"; 16616 break; 16617 } 16618 16619 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16620 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16621 "incomplete %s- %s\n", cmdp, msgp); 16622 } 16623 } 16624 16625 16626 16627 /* 16628 * Function: sd_pkt_status_good 16629 * 16630 * Description: Processing for a STATUS_GOOD code in pkt_status. 16631 * 16632 * Context: May be called under interrupt context 16633 */ 16634 16635 static void 16636 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16637 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16638 { 16639 char *cmdp; 16640 16641 ASSERT(un != NULL); 16642 ASSERT(mutex_owned(SD_MUTEX(un))); 16643 ASSERT(bp != NULL); 16644 ASSERT(xp != NULL); 16645 ASSERT(pktp != NULL); 16646 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16647 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16648 ASSERT(pktp->pkt_resid != 0); 16649 16650 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16651 16652 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16653 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16654 case SCMD_READ: 16655 cmdp = "read"; 16656 break; 16657 case SCMD_WRITE: 16658 cmdp = "write"; 16659 break; 16660 default: 16661 SD_UPDATE_B_RESID(bp, pktp); 16662 sd_return_command(un, bp); 16663 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16664 return; 16665 } 16666 16667 /* 16668 * See if we can retry the read/write, preferrably immediately. 16669 * If retries are exhaused, then sd_retry_command() will update 16670 * the b_resid count. 16671 */ 16672 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16673 cmdp, EIO, (clock_t)0, NULL); 16674 16675 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16676 } 16677 16678 16679 16680 16681 16682 /* 16683 * Function: sd_handle_request_sense 16684 * 16685 * Description: Processing for non-auto Request Sense command. 16686 * 16687 * Arguments: un - ptr to associated softstate 16688 * sense_bp - ptr to buf(9S) for the RQS command 16689 * sense_xp - ptr to the sd_xbuf for the RQS command 16690 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16691 * 16692 * Context: May be called under interrupt context 16693 */ 16694 16695 static void 16696 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16697 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16698 { 16699 struct buf *cmd_bp; /* buf for the original command */ 16700 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16701 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16702 size_t actual_len; /* actual sense data length */ 16703 16704 ASSERT(un != NULL); 16705 ASSERT(mutex_owned(SD_MUTEX(un))); 16706 ASSERT(sense_bp != NULL); 16707 ASSERT(sense_xp != NULL); 16708 ASSERT(sense_pktp != NULL); 16709 16710 /* 16711 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16712 * RQS command and not the original command. 16713 */ 16714 ASSERT(sense_pktp == un->un_rqs_pktp); 16715 ASSERT(sense_bp == un->un_rqs_bp); 16716 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16717 (FLAG_SENSING | FLAG_HEAD)); 16718 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16719 FLAG_SENSING) == FLAG_SENSING); 16720 16721 /* These are the bp, xp, and pktp for the original command */ 16722 cmd_bp = sense_xp->xb_sense_bp; 16723 cmd_xp = SD_GET_XBUF(cmd_bp); 16724 cmd_pktp = SD_GET_PKTP(cmd_bp); 16725 16726 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16727 /* 16728 * The REQUEST SENSE command failed. Release the REQUEST 16729 * SENSE command for re-use, get back the bp for the original 16730 * command, and attempt to re-try the original command if 16731 * FLAG_DIAGNOSE is not set in the original packet. 16732 */ 16733 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16734 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16735 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16736 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16737 NULL, NULL, EIO, (clock_t)0, NULL); 16738 return; 16739 } 16740 } 16741 16742 /* 16743 * Save the relevant sense info into the xp for the original cmd. 16744 * 16745 * Note: if the request sense failed the state info will be zero 16746 * as set in sd_mark_rqs_busy() 16747 */ 16748 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16749 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16750 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16751 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16752 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16753 SENSE_LENGTH)) { 16754 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16755 MAX_SENSE_LENGTH); 16756 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16757 } else { 16758 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16759 SENSE_LENGTH); 16760 if (actual_len < SENSE_LENGTH) { 16761 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16762 } else { 16763 cmd_xp->xb_sense_resid = 0; 16764 } 16765 } 16766 16767 /* 16768 * Free up the RQS command.... 16769 * NOTE: 16770 * Must do this BEFORE calling sd_validate_sense_data! 16771 * sd_validate_sense_data may return the original command in 16772 * which case the pkt will be freed and the flags can no 16773 * longer be touched. 16774 * SD_MUTEX is held through this process until the command 16775 * is dispatched based upon the sense data, so there are 16776 * no race conditions. 16777 */ 16778 (void) sd_mark_rqs_idle(un, sense_xp); 16779 16780 /* 16781 * For a retryable command see if we have valid sense data, if so then 16782 * turn it over to sd_decode_sense() to figure out the right course of 16783 * action. Just fail a non-retryable command. 16784 */ 16785 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16786 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16787 SD_SENSE_DATA_IS_VALID) { 16788 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16789 } 16790 } else { 16791 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16792 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16793 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16794 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16795 sd_return_failed_command(un, cmd_bp, EIO); 16796 } 16797 } 16798 16799 16800 16801 16802 /* 16803 * Function: sd_handle_auto_request_sense 16804 * 16805 * Description: Processing for auto-request sense information. 16806 * 16807 * Arguments: un - ptr to associated softstate 16808 * bp - ptr to buf(9S) for the command 16809 * xp - ptr to the sd_xbuf for the command 16810 * pktp - ptr to the scsi_pkt(9S) for the command 16811 * 16812 * Context: May be called under interrupt context 16813 */ 16814 16815 static void 16816 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16817 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16818 { 16819 struct scsi_arq_status *asp; 16820 size_t actual_len; 16821 16822 ASSERT(un != NULL); 16823 ASSERT(mutex_owned(SD_MUTEX(un))); 16824 ASSERT(bp != NULL); 16825 ASSERT(xp != NULL); 16826 ASSERT(pktp != NULL); 16827 ASSERT(pktp != un->un_rqs_pktp); 16828 ASSERT(bp != un->un_rqs_bp); 16829 16830 /* 16831 * For auto-request sense, we get a scsi_arq_status back from 16832 * the HBA, with the sense data in the sts_sensedata member. 16833 * The pkt_scbp of the packet points to this scsi_arq_status. 16834 */ 16835 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16836 16837 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16838 /* 16839 * The auto REQUEST SENSE failed; see if we can re-try 16840 * the original command. 16841 */ 16842 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16843 "auto request sense failed (reason=%s)\n", 16844 scsi_rname(asp->sts_rqpkt_reason)); 16845 16846 sd_reset_target(un, pktp); 16847 16848 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16849 NULL, NULL, EIO, (clock_t)0, NULL); 16850 return; 16851 } 16852 16853 /* Save the relevant sense info into the xp for the original cmd. */ 16854 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16855 xp->xb_sense_state = asp->sts_rqpkt_state; 16856 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16857 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16858 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16859 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16860 MAX_SENSE_LENGTH); 16861 } else { 16862 if (xp->xb_sense_resid > SENSE_LENGTH) { 16863 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16864 } else { 16865 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16866 } 16867 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16868 if ((((struct uscsi_cmd *) 16869 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16870 xp->xb_sense_resid = (((struct uscsi_cmd *) 16871 (xp->xb_pktinfo))->uscsi_rqlen) - 16872 actual_len; 16873 } else { 16874 xp->xb_sense_resid = 0; 16875 } 16876 } 16877 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16878 } 16879 16880 /* 16881 * See if we have valid sense data, if so then turn it over to 16882 * sd_decode_sense() to figure out the right course of action. 16883 */ 16884 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16885 SD_SENSE_DATA_IS_VALID) { 16886 sd_decode_sense(un, bp, xp, pktp); 16887 } 16888 } 16889 16890 16891 /* 16892 * Function: sd_print_sense_failed_msg 16893 * 16894 * Description: Print log message when RQS has failed. 16895 * 16896 * Arguments: un - ptr to associated softstate 16897 * bp - ptr to buf(9S) for the command 16898 * arg - generic message string ptr 16899 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16900 * or SD_NO_RETRY_ISSUED 16901 * 16902 * Context: May be called from interrupt context 16903 */ 16904 16905 static void 16906 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16907 int code) 16908 { 16909 char *msgp = arg; 16910 16911 ASSERT(un != NULL); 16912 ASSERT(mutex_owned(SD_MUTEX(un))); 16913 ASSERT(bp != NULL); 16914 16915 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16916 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16917 } 16918 } 16919 16920 16921 /* 16922 * Function: sd_validate_sense_data 16923 * 16924 * Description: Check the given sense data for validity. 16925 * If the sense data is not valid, the command will 16926 * be either failed or retried! 16927 * 16928 * Return Code: SD_SENSE_DATA_IS_INVALID 16929 * SD_SENSE_DATA_IS_VALID 16930 * 16931 * Context: May be called from interrupt context 16932 */ 16933 16934 static int 16935 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16936 size_t actual_len) 16937 { 16938 struct scsi_extended_sense *esp; 16939 struct scsi_pkt *pktp; 16940 char *msgp = NULL; 16941 sd_ssc_t *sscp; 16942 16943 ASSERT(un != NULL); 16944 ASSERT(mutex_owned(SD_MUTEX(un))); 16945 ASSERT(bp != NULL); 16946 ASSERT(bp != un->un_rqs_bp); 16947 ASSERT(xp != NULL); 16948 ASSERT(un->un_fm_private != NULL); 16949 16950 pktp = SD_GET_PKTP(bp); 16951 ASSERT(pktp != NULL); 16952 16953 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16954 ASSERT(sscp != NULL); 16955 16956 /* 16957 * Check the status of the RQS command (auto or manual). 16958 */ 16959 switch (xp->xb_sense_status & STATUS_MASK) { 16960 case STATUS_GOOD: 16961 break; 16962 16963 case STATUS_RESERVATION_CONFLICT: 16964 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16965 return (SD_SENSE_DATA_IS_INVALID); 16966 16967 case STATUS_BUSY: 16968 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16969 "Busy Status on REQUEST SENSE\n"); 16970 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16971 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16972 return (SD_SENSE_DATA_IS_INVALID); 16973 16974 case STATUS_QFULL: 16975 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16976 "QFULL Status on REQUEST SENSE\n"); 16977 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16978 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16979 return (SD_SENSE_DATA_IS_INVALID); 16980 16981 case STATUS_CHECK: 16982 case STATUS_TERMINATED: 16983 msgp = "Check Condition on REQUEST SENSE\n"; 16984 goto sense_failed; 16985 16986 default: 16987 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16988 goto sense_failed; 16989 } 16990 16991 /* 16992 * See if we got the minimum required amount of sense data. 16993 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16994 * or less. 16995 */ 16996 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16997 (actual_len == 0)) { 16998 msgp = "Request Sense couldn't get sense data\n"; 16999 goto sense_failed; 17000 } 17001 17002 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17003 msgp = "Not enough sense information\n"; 17004 /* Mark the ssc_flags for detecting invalid sense data */ 17005 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17006 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17007 "sense-data"); 17008 } 17009 goto sense_failed; 17010 } 17011 17012 /* 17013 * We require the extended sense data 17014 */ 17015 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17016 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17017 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17018 static char tmp[8]; 17019 static char buf[148]; 17020 char *p = (char *)(xp->xb_sense_data); 17021 int i; 17022 17023 mutex_enter(&sd_sense_mutex); 17024 (void) strcpy(buf, "undecodable sense information:"); 17025 for (i = 0; i < actual_len; i++) { 17026 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17027 (void) strcpy(&buf[strlen(buf)], tmp); 17028 } 17029 i = strlen(buf); 17030 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17031 17032 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17033 scsi_log(SD_DEVINFO(un), sd_label, 17034 CE_WARN, buf); 17035 } 17036 mutex_exit(&sd_sense_mutex); 17037 } 17038 17039 /* Mark the ssc_flags for detecting invalid sense data */ 17040 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17041 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17042 "sense-data"); 17043 } 17044 17045 /* Note: Legacy behavior, fail the command with no retry */ 17046 sd_return_failed_command(un, bp, EIO); 17047 return (SD_SENSE_DATA_IS_INVALID); 17048 } 17049 17050 /* 17051 * Check that es_code is valid (es_class concatenated with es_code 17052 * make up the "response code" field. es_class will always be 7, so 17053 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17054 * format. 17055 */ 17056 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17057 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17058 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17059 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17060 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17061 /* Mark the ssc_flags for detecting invalid sense data */ 17062 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17063 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17064 "sense-data"); 17065 } 17066 goto sense_failed; 17067 } 17068 17069 return (SD_SENSE_DATA_IS_VALID); 17070 17071 sense_failed: 17072 /* 17073 * If the request sense failed (for whatever reason), attempt 17074 * to retry the original command. 17075 */ 17076 #if defined(__i386) || defined(__amd64) 17077 /* 17078 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17079 * sddef.h for Sparc platform, and x86 uses 1 binary 17080 * for both SCSI/FC. 17081 * The SD_RETRY_DELAY value need to be adjusted here 17082 * when SD_RETRY_DELAY change in sddef.h 17083 */ 17084 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17085 sd_print_sense_failed_msg, msgp, EIO, 17086 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17087 #else 17088 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17089 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17090 #endif 17091 17092 return (SD_SENSE_DATA_IS_INVALID); 17093 } 17094 17095 /* 17096 * Function: sd_decode_sense 17097 * 17098 * Description: Take recovery action(s) when SCSI Sense Data is received. 17099 * 17100 * Context: Interrupt context. 17101 */ 17102 17103 static void 17104 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17105 struct scsi_pkt *pktp) 17106 { 17107 uint8_t sense_key; 17108 17109 ASSERT(un != NULL); 17110 ASSERT(mutex_owned(SD_MUTEX(un))); 17111 ASSERT(bp != NULL); 17112 ASSERT(bp != un->un_rqs_bp); 17113 ASSERT(xp != NULL); 17114 ASSERT(pktp != NULL); 17115 17116 sense_key = scsi_sense_key(xp->xb_sense_data); 17117 17118 switch (sense_key) { 17119 case KEY_NO_SENSE: 17120 sd_sense_key_no_sense(un, bp, xp, pktp); 17121 break; 17122 case KEY_RECOVERABLE_ERROR: 17123 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17124 bp, xp, pktp); 17125 break; 17126 case KEY_NOT_READY: 17127 sd_sense_key_not_ready(un, xp->xb_sense_data, 17128 bp, xp, pktp); 17129 break; 17130 case KEY_MEDIUM_ERROR: 17131 case KEY_HARDWARE_ERROR: 17132 sd_sense_key_medium_or_hardware_error(un, 17133 xp->xb_sense_data, bp, xp, pktp); 17134 break; 17135 case KEY_ILLEGAL_REQUEST: 17136 sd_sense_key_illegal_request(un, bp, xp, pktp); 17137 break; 17138 case KEY_UNIT_ATTENTION: 17139 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17140 bp, xp, pktp); 17141 break; 17142 case KEY_WRITE_PROTECT: 17143 case KEY_VOLUME_OVERFLOW: 17144 case KEY_MISCOMPARE: 17145 sd_sense_key_fail_command(un, bp, xp, pktp); 17146 break; 17147 case KEY_BLANK_CHECK: 17148 sd_sense_key_blank_check(un, bp, xp, pktp); 17149 break; 17150 case KEY_ABORTED_COMMAND: 17151 sd_sense_key_aborted_command(un, bp, xp, pktp); 17152 break; 17153 case KEY_VENDOR_UNIQUE: 17154 case KEY_COPY_ABORTED: 17155 case KEY_EQUAL: 17156 case KEY_RESERVED: 17157 default: 17158 sd_sense_key_default(un, xp->xb_sense_data, 17159 bp, xp, pktp); 17160 break; 17161 } 17162 } 17163 17164 17165 /* 17166 * Function: sd_dump_memory 17167 * 17168 * Description: Debug logging routine to print the contents of a user provided 17169 * buffer. The output of the buffer is broken up into 256 byte 17170 * segments due to a size constraint of the scsi_log. 17171 * implementation. 17172 * 17173 * Arguments: un - ptr to softstate 17174 * comp - component mask 17175 * title - "title" string to preceed data when printed 17176 * data - ptr to data block to be printed 17177 * len - size of data block to be printed 17178 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17179 * 17180 * Context: May be called from interrupt context 17181 */ 17182 17183 #define SD_DUMP_MEMORY_BUF_SIZE 256 17184 17185 static char *sd_dump_format_string[] = { 17186 " 0x%02x", 17187 " %c" 17188 }; 17189 17190 static void 17191 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17192 int len, int fmt) 17193 { 17194 int i, j; 17195 int avail_count; 17196 int start_offset; 17197 int end_offset; 17198 size_t entry_len; 17199 char *bufp; 17200 char *local_buf; 17201 char *format_string; 17202 17203 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17204 17205 /* 17206 * In the debug version of the driver, this function is called from a 17207 * number of places which are NOPs in the release driver. 17208 * The debug driver therefore has additional methods of filtering 17209 * debug output. 17210 */ 17211 #ifdef SDDEBUG 17212 /* 17213 * In the debug version of the driver we can reduce the amount of debug 17214 * messages by setting sd_error_level to something other than 17215 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17216 * sd_component_mask. 17217 */ 17218 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17219 (sd_error_level != SCSI_ERR_ALL)) { 17220 return; 17221 } 17222 if (((sd_component_mask & comp) == 0) || 17223 (sd_error_level != SCSI_ERR_ALL)) { 17224 return; 17225 } 17226 #else 17227 if (sd_error_level != SCSI_ERR_ALL) { 17228 return; 17229 } 17230 #endif 17231 17232 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17233 bufp = local_buf; 17234 /* 17235 * Available length is the length of local_buf[], minus the 17236 * length of the title string, minus one for the ":", minus 17237 * one for the newline, minus one for the NULL terminator. 17238 * This gives the #bytes available for holding the printed 17239 * values from the given data buffer. 17240 */ 17241 if (fmt == SD_LOG_HEX) { 17242 format_string = sd_dump_format_string[0]; 17243 } else /* SD_LOG_CHAR */ { 17244 format_string = sd_dump_format_string[1]; 17245 } 17246 /* 17247 * Available count is the number of elements from the given 17248 * data buffer that we can fit into the available length. 17249 * This is based upon the size of the format string used. 17250 * Make one entry and find it's size. 17251 */ 17252 (void) sprintf(bufp, format_string, data[0]); 17253 entry_len = strlen(bufp); 17254 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17255 17256 j = 0; 17257 while (j < len) { 17258 bufp = local_buf; 17259 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17260 start_offset = j; 17261 17262 end_offset = start_offset + avail_count; 17263 17264 (void) sprintf(bufp, "%s:", title); 17265 bufp += strlen(bufp); 17266 for (i = start_offset; ((i < end_offset) && (j < len)); 17267 i++, j++) { 17268 (void) sprintf(bufp, format_string, data[i]); 17269 bufp += entry_len; 17270 } 17271 (void) sprintf(bufp, "\n"); 17272 17273 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17274 } 17275 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17276 } 17277 17278 /* 17279 * Function: sd_print_sense_msg 17280 * 17281 * Description: Log a message based upon the given sense data. 17282 * 17283 * Arguments: un - ptr to associated softstate 17284 * bp - ptr to buf(9S) for the command 17285 * arg - ptr to associate sd_sense_info struct 17286 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17287 * or SD_NO_RETRY_ISSUED 17288 * 17289 * Context: May be called from interrupt context 17290 */ 17291 17292 static void 17293 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17294 { 17295 struct sd_xbuf *xp; 17296 struct scsi_pkt *pktp; 17297 uint8_t *sensep; 17298 daddr_t request_blkno; 17299 diskaddr_t err_blkno; 17300 int severity; 17301 int pfa_flag; 17302 extern struct scsi_key_strings scsi_cmds[]; 17303 17304 ASSERT(un != NULL); 17305 ASSERT(mutex_owned(SD_MUTEX(un))); 17306 ASSERT(bp != NULL); 17307 xp = SD_GET_XBUF(bp); 17308 ASSERT(xp != NULL); 17309 pktp = SD_GET_PKTP(bp); 17310 ASSERT(pktp != NULL); 17311 ASSERT(arg != NULL); 17312 17313 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17314 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17315 17316 if ((code == SD_DELAYED_RETRY_ISSUED) || 17317 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17318 severity = SCSI_ERR_RETRYABLE; 17319 } 17320 17321 /* Use absolute block number for the request block number */ 17322 request_blkno = xp->xb_blkno; 17323 17324 /* 17325 * Now try to get the error block number from the sense data 17326 */ 17327 sensep = xp->xb_sense_data; 17328 17329 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17330 (uint64_t *)&err_blkno)) { 17331 /* 17332 * We retrieved the error block number from the information 17333 * portion of the sense data. 17334 * 17335 * For USCSI commands we are better off using the error 17336 * block no. as the requested block no. (This is the best 17337 * we can estimate.) 17338 */ 17339 if ((SD_IS_BUFIO(xp) == FALSE) && 17340 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17341 request_blkno = err_blkno; 17342 } 17343 } else { 17344 /* 17345 * Without the es_valid bit set (for fixed format) or an 17346 * information descriptor (for descriptor format) we cannot 17347 * be certain of the error blkno, so just use the 17348 * request_blkno. 17349 */ 17350 err_blkno = (diskaddr_t)request_blkno; 17351 } 17352 17353 /* 17354 * The following will log the buffer contents for the release driver 17355 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17356 * level is set to verbose. 17357 */ 17358 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17359 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17360 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17361 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17362 17363 if (pfa_flag == FALSE) { 17364 /* This is normally only set for USCSI */ 17365 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17366 return; 17367 } 17368 17369 if ((SD_IS_BUFIO(xp) == TRUE) && 17370 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17371 (severity < sd_error_level))) { 17372 return; 17373 } 17374 } 17375 /* 17376 * Check for Sonoma Failover and keep a count of how many failed I/O's 17377 */ 17378 if ((SD_IS_LSI(un)) && 17379 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17380 (scsi_sense_asc(sensep) == 0x94) && 17381 (scsi_sense_ascq(sensep) == 0x01)) { 17382 un->un_sonoma_failure_count++; 17383 if (un->un_sonoma_failure_count > 1) { 17384 return; 17385 } 17386 } 17387 17388 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17389 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17390 (pktp->pkt_resid == 0))) { 17391 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17392 request_blkno, err_blkno, scsi_cmds, 17393 (struct scsi_extended_sense *)sensep, 17394 un->un_additional_codes, NULL); 17395 } 17396 } 17397 17398 /* 17399 * Function: sd_sense_key_no_sense 17400 * 17401 * Description: Recovery action when sense data was not received. 17402 * 17403 * Context: May be called from interrupt context 17404 */ 17405 17406 static void 17407 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17408 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17409 { 17410 struct sd_sense_info si; 17411 17412 ASSERT(un != NULL); 17413 ASSERT(mutex_owned(SD_MUTEX(un))); 17414 ASSERT(bp != NULL); 17415 ASSERT(xp != NULL); 17416 ASSERT(pktp != NULL); 17417 17418 si.ssi_severity = SCSI_ERR_FATAL; 17419 si.ssi_pfa_flag = FALSE; 17420 17421 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17422 17423 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17424 &si, EIO, (clock_t)0, NULL); 17425 } 17426 17427 17428 /* 17429 * Function: sd_sense_key_recoverable_error 17430 * 17431 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17432 * 17433 * Context: May be called from interrupt context 17434 */ 17435 17436 static void 17437 sd_sense_key_recoverable_error(struct sd_lun *un, 17438 uint8_t *sense_datap, 17439 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17440 { 17441 struct sd_sense_info si; 17442 uint8_t asc = scsi_sense_asc(sense_datap); 17443 17444 ASSERT(un != NULL); 17445 ASSERT(mutex_owned(SD_MUTEX(un))); 17446 ASSERT(bp != NULL); 17447 ASSERT(xp != NULL); 17448 ASSERT(pktp != NULL); 17449 17450 /* 17451 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17452 */ 17453 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17454 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17455 si.ssi_severity = SCSI_ERR_INFO; 17456 si.ssi_pfa_flag = TRUE; 17457 } else { 17458 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17459 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17460 si.ssi_severity = SCSI_ERR_RECOVERED; 17461 si.ssi_pfa_flag = FALSE; 17462 } 17463 17464 if (pktp->pkt_resid == 0) { 17465 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17466 sd_return_command(un, bp); 17467 return; 17468 } 17469 17470 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17471 &si, EIO, (clock_t)0, NULL); 17472 } 17473 17474 17475 17476 17477 /* 17478 * Function: sd_sense_key_not_ready 17479 * 17480 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17481 * 17482 * Context: May be called from interrupt context 17483 */ 17484 17485 static void 17486 sd_sense_key_not_ready(struct sd_lun *un, 17487 uint8_t *sense_datap, 17488 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17489 { 17490 struct sd_sense_info si; 17491 uint8_t asc = scsi_sense_asc(sense_datap); 17492 uint8_t ascq = scsi_sense_ascq(sense_datap); 17493 17494 ASSERT(un != NULL); 17495 ASSERT(mutex_owned(SD_MUTEX(un))); 17496 ASSERT(bp != NULL); 17497 ASSERT(xp != NULL); 17498 ASSERT(pktp != NULL); 17499 17500 si.ssi_severity = SCSI_ERR_FATAL; 17501 si.ssi_pfa_flag = FALSE; 17502 17503 /* 17504 * Update error stats after first NOT READY error. Disks may have 17505 * been powered down and may need to be restarted. For CDROMs, 17506 * report NOT READY errors only if media is present. 17507 */ 17508 if ((ISCD(un) && (asc == 0x3A)) || 17509 (xp->xb_nr_retry_count > 0)) { 17510 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17511 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17512 } 17513 17514 /* 17515 * Just fail if the "not ready" retry limit has been reached. 17516 */ 17517 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17518 /* Special check for error message printing for removables. */ 17519 if (un->un_f_has_removable_media && (asc == 0x04) && 17520 (ascq >= 0x04)) { 17521 si.ssi_severity = SCSI_ERR_ALL; 17522 } 17523 goto fail_command; 17524 } 17525 17526 /* 17527 * Check the ASC and ASCQ in the sense data as needed, to determine 17528 * what to do. 17529 */ 17530 switch (asc) { 17531 case 0x04: /* LOGICAL UNIT NOT READY */ 17532 /* 17533 * disk drives that don't spin up result in a very long delay 17534 * in format without warning messages. We will log a message 17535 * if the error level is set to verbose. 17536 */ 17537 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17538 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17539 "logical unit not ready, resetting disk\n"); 17540 } 17541 17542 /* 17543 * There are different requirements for CDROMs and disks for 17544 * the number of retries. If a CD-ROM is giving this, it is 17545 * probably reading TOC and is in the process of getting 17546 * ready, so we should keep on trying for a long time to make 17547 * sure that all types of media are taken in account (for 17548 * some media the drive takes a long time to read TOC). For 17549 * disks we do not want to retry this too many times as this 17550 * can cause a long hang in format when the drive refuses to 17551 * spin up (a very common failure). 17552 */ 17553 switch (ascq) { 17554 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17555 /* 17556 * Disk drives frequently refuse to spin up which 17557 * results in a very long hang in format without 17558 * warning messages. 17559 * 17560 * Note: This code preserves the legacy behavior of 17561 * comparing xb_nr_retry_count against zero for fibre 17562 * channel targets instead of comparing against the 17563 * un_reset_retry_count value. The reason for this 17564 * discrepancy has been so utterly lost beneath the 17565 * Sands of Time that even Indiana Jones could not 17566 * find it. 17567 */ 17568 if (un->un_f_is_fibre == TRUE) { 17569 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17570 (xp->xb_nr_retry_count > 0)) && 17571 (un->un_startstop_timeid == NULL)) { 17572 scsi_log(SD_DEVINFO(un), sd_label, 17573 CE_WARN, "logical unit not ready, " 17574 "resetting disk\n"); 17575 sd_reset_target(un, pktp); 17576 } 17577 } else { 17578 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17579 (xp->xb_nr_retry_count > 17580 un->un_reset_retry_count)) && 17581 (un->un_startstop_timeid == NULL)) { 17582 scsi_log(SD_DEVINFO(un), sd_label, 17583 CE_WARN, "logical unit not ready, " 17584 "resetting disk\n"); 17585 sd_reset_target(un, pktp); 17586 } 17587 } 17588 break; 17589 17590 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17591 /* 17592 * If the target is in the process of becoming 17593 * ready, just proceed with the retry. This can 17594 * happen with CD-ROMs that take a long time to 17595 * read TOC after a power cycle or reset. 17596 */ 17597 goto do_retry; 17598 17599 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17600 break; 17601 17602 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17603 /* 17604 * Retries cannot help here so just fail right away. 17605 */ 17606 goto fail_command; 17607 17608 case 0x88: 17609 /* 17610 * Vendor-unique code for T3/T4: it indicates a 17611 * path problem in a mutipathed config, but as far as 17612 * the target driver is concerned it equates to a fatal 17613 * error, so we should just fail the command right away 17614 * (without printing anything to the console). If this 17615 * is not a T3/T4, fall thru to the default recovery 17616 * action. 17617 * T3/T4 is FC only, don't need to check is_fibre 17618 */ 17619 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17620 sd_return_failed_command(un, bp, EIO); 17621 return; 17622 } 17623 /* FALLTHRU */ 17624 17625 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17626 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17627 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17628 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17629 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17630 default: /* Possible future codes in SCSI spec? */ 17631 /* 17632 * For removable-media devices, do not retry if 17633 * ASCQ > 2 as these result mostly from USCSI commands 17634 * on MMC devices issued to check status of an 17635 * operation initiated in immediate mode. Also for 17636 * ASCQ >= 4 do not print console messages as these 17637 * mainly represent a user-initiated operation 17638 * instead of a system failure. 17639 */ 17640 if (un->un_f_has_removable_media) { 17641 si.ssi_severity = SCSI_ERR_ALL; 17642 goto fail_command; 17643 } 17644 break; 17645 } 17646 17647 /* 17648 * As part of our recovery attempt for the NOT READY 17649 * condition, we issue a START STOP UNIT command. However 17650 * we want to wait for a short delay before attempting this 17651 * as there may still be more commands coming back from the 17652 * target with the check condition. To do this we use 17653 * timeout(9F) to call sd_start_stop_unit_callback() after 17654 * the delay interval expires. (sd_start_stop_unit_callback() 17655 * dispatches sd_start_stop_unit_task(), which will issue 17656 * the actual START STOP UNIT command. The delay interval 17657 * is one-half of the delay that we will use to retry the 17658 * command that generated the NOT READY condition. 17659 * 17660 * Note that we could just dispatch sd_start_stop_unit_task() 17661 * from here and allow it to sleep for the delay interval, 17662 * but then we would be tying up the taskq thread 17663 * uncesessarily for the duration of the delay. 17664 * 17665 * Do not issue the START STOP UNIT if the current command 17666 * is already a START STOP UNIT. 17667 */ 17668 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17669 break; 17670 } 17671 17672 /* 17673 * Do not schedule the timeout if one is already pending. 17674 */ 17675 if (un->un_startstop_timeid != NULL) { 17676 SD_INFO(SD_LOG_ERROR, un, 17677 "sd_sense_key_not_ready: restart already issued to" 17678 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17679 ddi_get_instance(SD_DEVINFO(un))); 17680 break; 17681 } 17682 17683 /* 17684 * Schedule the START STOP UNIT command, then queue the command 17685 * for a retry. 17686 * 17687 * Note: A timeout is not scheduled for this retry because we 17688 * want the retry to be serial with the START_STOP_UNIT. The 17689 * retry will be started when the START_STOP_UNIT is completed 17690 * in sd_start_stop_unit_task. 17691 */ 17692 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17693 un, un->un_busy_timeout / 2); 17694 xp->xb_nr_retry_count++; 17695 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17696 return; 17697 17698 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17699 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17700 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17701 "unit does not respond to selection\n"); 17702 } 17703 break; 17704 17705 case 0x3A: /* MEDIUM NOT PRESENT */ 17706 if (sd_error_level >= SCSI_ERR_FATAL) { 17707 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17708 "Caddy not inserted in drive\n"); 17709 } 17710 17711 sr_ejected(un); 17712 un->un_mediastate = DKIO_EJECTED; 17713 /* The state has changed, inform the media watch routines */ 17714 cv_broadcast(&un->un_state_cv); 17715 /* Just fail if no media is present in the drive. */ 17716 goto fail_command; 17717 17718 default: 17719 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17720 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17721 "Unit not Ready. Additional sense code 0x%x\n", 17722 asc); 17723 } 17724 break; 17725 } 17726 17727 do_retry: 17728 17729 /* 17730 * Retry the command, as some targets may report NOT READY for 17731 * several seconds after being reset. 17732 */ 17733 xp->xb_nr_retry_count++; 17734 si.ssi_severity = SCSI_ERR_RETRYABLE; 17735 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17736 &si, EIO, un->un_busy_timeout, NULL); 17737 17738 return; 17739 17740 fail_command: 17741 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17742 sd_return_failed_command(un, bp, EIO); 17743 } 17744 17745 17746 17747 /* 17748 * Function: sd_sense_key_medium_or_hardware_error 17749 * 17750 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17751 * sense key. 17752 * 17753 * Context: May be called from interrupt context 17754 */ 17755 17756 static void 17757 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17758 uint8_t *sense_datap, 17759 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17760 { 17761 struct sd_sense_info si; 17762 uint8_t sense_key = scsi_sense_key(sense_datap); 17763 uint8_t asc = scsi_sense_asc(sense_datap); 17764 17765 ASSERT(un != NULL); 17766 ASSERT(mutex_owned(SD_MUTEX(un))); 17767 ASSERT(bp != NULL); 17768 ASSERT(xp != NULL); 17769 ASSERT(pktp != NULL); 17770 17771 si.ssi_severity = SCSI_ERR_FATAL; 17772 si.ssi_pfa_flag = FALSE; 17773 17774 if (sense_key == KEY_MEDIUM_ERROR) { 17775 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17776 } 17777 17778 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17779 17780 if ((un->un_reset_retry_count != 0) && 17781 (xp->xb_retry_count == un->un_reset_retry_count)) { 17782 mutex_exit(SD_MUTEX(un)); 17783 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17784 if (un->un_f_allow_bus_device_reset == TRUE) { 17785 17786 boolean_t try_resetting_target = B_TRUE; 17787 17788 /* 17789 * We need to be able to handle specific ASC when we are 17790 * handling a KEY_HARDWARE_ERROR. In particular 17791 * taking the default action of resetting the target may 17792 * not be the appropriate way to attempt recovery. 17793 * Resetting a target because of a single LUN failure 17794 * victimizes all LUNs on that target. 17795 * 17796 * This is true for the LSI arrays, if an LSI 17797 * array controller returns an ASC of 0x84 (LUN Dead) we 17798 * should trust it. 17799 */ 17800 17801 if (sense_key == KEY_HARDWARE_ERROR) { 17802 switch (asc) { 17803 case 0x84: 17804 if (SD_IS_LSI(un)) { 17805 try_resetting_target = B_FALSE; 17806 } 17807 break; 17808 default: 17809 break; 17810 } 17811 } 17812 17813 if (try_resetting_target == B_TRUE) { 17814 int reset_retval = 0; 17815 if (un->un_f_lun_reset_enabled == TRUE) { 17816 SD_TRACE(SD_LOG_IO_CORE, un, 17817 "sd_sense_key_medium_or_hardware_" 17818 "error: issuing RESET_LUN\n"); 17819 reset_retval = 17820 scsi_reset(SD_ADDRESS(un), 17821 RESET_LUN); 17822 } 17823 if (reset_retval == 0) { 17824 SD_TRACE(SD_LOG_IO_CORE, un, 17825 "sd_sense_key_medium_or_hardware_" 17826 "error: issuing RESET_TARGET\n"); 17827 (void) scsi_reset(SD_ADDRESS(un), 17828 RESET_TARGET); 17829 } 17830 } 17831 } 17832 mutex_enter(SD_MUTEX(un)); 17833 } 17834 17835 /* 17836 * This really ought to be a fatal error, but we will retry anyway 17837 * as some drives report this as a spurious error. 17838 */ 17839 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17840 &si, EIO, (clock_t)0, NULL); 17841 } 17842 17843 17844 17845 /* 17846 * Function: sd_sense_key_illegal_request 17847 * 17848 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17849 * 17850 * Context: May be called from interrupt context 17851 */ 17852 17853 static void 17854 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17855 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17856 { 17857 struct sd_sense_info si; 17858 17859 ASSERT(un != NULL); 17860 ASSERT(mutex_owned(SD_MUTEX(un))); 17861 ASSERT(bp != NULL); 17862 ASSERT(xp != NULL); 17863 ASSERT(pktp != NULL); 17864 17865 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17866 17867 si.ssi_severity = SCSI_ERR_INFO; 17868 si.ssi_pfa_flag = FALSE; 17869 17870 /* Pointless to retry if the target thinks it's an illegal request */ 17871 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17872 sd_return_failed_command(un, bp, EIO); 17873 } 17874 17875 17876 17877 17878 /* 17879 * Function: sd_sense_key_unit_attention 17880 * 17881 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17882 * 17883 * Context: May be called from interrupt context 17884 */ 17885 17886 static void 17887 sd_sense_key_unit_attention(struct sd_lun *un, 17888 uint8_t *sense_datap, 17889 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17890 { 17891 /* 17892 * For UNIT ATTENTION we allow retries for one minute. Devices 17893 * like Sonoma can return UNIT ATTENTION close to a minute 17894 * under certain conditions. 17895 */ 17896 int retry_check_flag = SD_RETRIES_UA; 17897 boolean_t kstat_updated = B_FALSE; 17898 struct sd_sense_info si; 17899 uint8_t asc = scsi_sense_asc(sense_datap); 17900 uint8_t ascq = scsi_sense_ascq(sense_datap); 17901 17902 ASSERT(un != NULL); 17903 ASSERT(mutex_owned(SD_MUTEX(un))); 17904 ASSERT(bp != NULL); 17905 ASSERT(xp != NULL); 17906 ASSERT(pktp != NULL); 17907 17908 si.ssi_severity = SCSI_ERR_INFO; 17909 si.ssi_pfa_flag = FALSE; 17910 17911 17912 switch (asc) { 17913 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17914 if (sd_report_pfa != 0) { 17915 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17916 si.ssi_pfa_flag = TRUE; 17917 retry_check_flag = SD_RETRIES_STANDARD; 17918 goto do_retry; 17919 } 17920 17921 break; 17922 17923 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17924 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17925 un->un_resvd_status |= 17926 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17927 } 17928 #ifdef _LP64 17929 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17930 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17931 un, KM_NOSLEEP) == 0) { 17932 /* 17933 * If we can't dispatch the task we'll just 17934 * live without descriptor sense. We can 17935 * try again on the next "unit attention" 17936 */ 17937 SD_ERROR(SD_LOG_ERROR, un, 17938 "sd_sense_key_unit_attention: " 17939 "Could not dispatch " 17940 "sd_reenable_dsense_task\n"); 17941 } 17942 } 17943 #endif /* _LP64 */ 17944 /* FALLTHRU */ 17945 17946 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17947 if (!un->un_f_has_removable_media) { 17948 break; 17949 } 17950 17951 /* 17952 * When we get a unit attention from a removable-media device, 17953 * it may be in a state that will take a long time to recover 17954 * (e.g., from a reset). Since we are executing in interrupt 17955 * context here, we cannot wait around for the device to come 17956 * back. So hand this command off to sd_media_change_task() 17957 * for deferred processing under taskq thread context. (Note 17958 * that the command still may be failed if a problem is 17959 * encountered at a later time.) 17960 */ 17961 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17962 KM_NOSLEEP) == 0) { 17963 /* 17964 * Cannot dispatch the request so fail the command. 17965 */ 17966 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17967 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17968 si.ssi_severity = SCSI_ERR_FATAL; 17969 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17970 sd_return_failed_command(un, bp, EIO); 17971 } 17972 17973 /* 17974 * If failed to dispatch sd_media_change_task(), we already 17975 * updated kstat. If succeed to dispatch sd_media_change_task(), 17976 * we should update kstat later if it encounters an error. So, 17977 * we update kstat_updated flag here. 17978 */ 17979 kstat_updated = B_TRUE; 17980 17981 /* 17982 * Either the command has been successfully dispatched to a 17983 * task Q for retrying, or the dispatch failed. In either case 17984 * do NOT retry again by calling sd_retry_command. This sets up 17985 * two retries of the same command and when one completes and 17986 * frees the resources the other will access freed memory, 17987 * a bad thing. 17988 */ 17989 return; 17990 17991 default: 17992 break; 17993 } 17994 17995 /* 17996 * ASC ASCQ 17997 * 2A 09 Capacity data has changed 17998 * 2A 01 Mode parameters changed 17999 * 3F 0E Reported luns data has changed 18000 * Arrays that support logical unit expansion should report 18001 * capacity changes(2Ah/09). Mode parameters changed and 18002 * reported luns data has changed are the approximation. 18003 */ 18004 if (((asc == 0x2a) && (ascq == 0x09)) || 18005 ((asc == 0x2a) && (ascq == 0x01)) || 18006 ((asc == 0x3f) && (ascq == 0x0e))) { 18007 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18008 KM_NOSLEEP) == 0) { 18009 SD_ERROR(SD_LOG_ERROR, un, 18010 "sd_sense_key_unit_attention: " 18011 "Could not dispatch sd_target_change_task\n"); 18012 } 18013 } 18014 18015 /* 18016 * Update kstat if we haven't done that. 18017 */ 18018 if (!kstat_updated) { 18019 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18020 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18021 } 18022 18023 do_retry: 18024 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18025 EIO, SD_UA_RETRY_DELAY, NULL); 18026 } 18027 18028 18029 18030 /* 18031 * Function: sd_sense_key_fail_command 18032 * 18033 * Description: Use to fail a command when we don't like the sense key that 18034 * was returned. 18035 * 18036 * Context: May be called from interrupt context 18037 */ 18038 18039 static void 18040 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18041 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18042 { 18043 struct sd_sense_info si; 18044 18045 ASSERT(un != NULL); 18046 ASSERT(mutex_owned(SD_MUTEX(un))); 18047 ASSERT(bp != NULL); 18048 ASSERT(xp != NULL); 18049 ASSERT(pktp != NULL); 18050 18051 si.ssi_severity = SCSI_ERR_FATAL; 18052 si.ssi_pfa_flag = FALSE; 18053 18054 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18055 sd_return_failed_command(un, bp, EIO); 18056 } 18057 18058 18059 18060 /* 18061 * Function: sd_sense_key_blank_check 18062 * 18063 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18064 * Has no monetary connotation. 18065 * 18066 * Context: May be called from interrupt context 18067 */ 18068 18069 static void 18070 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18071 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18072 { 18073 struct sd_sense_info si; 18074 18075 ASSERT(un != NULL); 18076 ASSERT(mutex_owned(SD_MUTEX(un))); 18077 ASSERT(bp != NULL); 18078 ASSERT(xp != NULL); 18079 ASSERT(pktp != NULL); 18080 18081 /* 18082 * Blank check is not fatal for removable devices, therefore 18083 * it does not require a console message. 18084 */ 18085 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18086 SCSI_ERR_FATAL; 18087 si.ssi_pfa_flag = FALSE; 18088 18089 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18090 sd_return_failed_command(un, bp, EIO); 18091 } 18092 18093 18094 18095 18096 /* 18097 * Function: sd_sense_key_aborted_command 18098 * 18099 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18100 * 18101 * Context: May be called from interrupt context 18102 */ 18103 18104 static void 18105 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18106 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18107 { 18108 struct sd_sense_info si; 18109 18110 ASSERT(un != NULL); 18111 ASSERT(mutex_owned(SD_MUTEX(un))); 18112 ASSERT(bp != NULL); 18113 ASSERT(xp != NULL); 18114 ASSERT(pktp != NULL); 18115 18116 si.ssi_severity = SCSI_ERR_FATAL; 18117 si.ssi_pfa_flag = FALSE; 18118 18119 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18120 18121 /* 18122 * This really ought to be a fatal error, but we will retry anyway 18123 * as some drives report this as a spurious error. 18124 */ 18125 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18126 &si, EIO, drv_usectohz(100000), NULL); 18127 } 18128 18129 18130 18131 /* 18132 * Function: sd_sense_key_default 18133 * 18134 * Description: Default recovery action for several SCSI sense keys (basically 18135 * attempts a retry). 18136 * 18137 * Context: May be called from interrupt context 18138 */ 18139 18140 static void 18141 sd_sense_key_default(struct sd_lun *un, 18142 uint8_t *sense_datap, 18143 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18144 { 18145 struct sd_sense_info si; 18146 uint8_t sense_key = scsi_sense_key(sense_datap); 18147 18148 ASSERT(un != NULL); 18149 ASSERT(mutex_owned(SD_MUTEX(un))); 18150 ASSERT(bp != NULL); 18151 ASSERT(xp != NULL); 18152 ASSERT(pktp != NULL); 18153 18154 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18155 18156 /* 18157 * Undecoded sense key. Attempt retries and hope that will fix 18158 * the problem. Otherwise, we're dead. 18159 */ 18160 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18161 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18162 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18163 } 18164 18165 si.ssi_severity = SCSI_ERR_FATAL; 18166 si.ssi_pfa_flag = FALSE; 18167 18168 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18169 &si, EIO, (clock_t)0, NULL); 18170 } 18171 18172 18173 18174 /* 18175 * Function: sd_print_retry_msg 18176 * 18177 * Description: Print a message indicating the retry action being taken. 18178 * 18179 * Arguments: un - ptr to associated softstate 18180 * bp - ptr to buf(9S) for the command 18181 * arg - not used. 18182 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18183 * or SD_NO_RETRY_ISSUED 18184 * 18185 * Context: May be called from interrupt context 18186 */ 18187 /* ARGSUSED */ 18188 static void 18189 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18190 { 18191 struct sd_xbuf *xp; 18192 struct scsi_pkt *pktp; 18193 char *reasonp; 18194 char *msgp; 18195 18196 ASSERT(un != NULL); 18197 ASSERT(mutex_owned(SD_MUTEX(un))); 18198 ASSERT(bp != NULL); 18199 pktp = SD_GET_PKTP(bp); 18200 ASSERT(pktp != NULL); 18201 xp = SD_GET_XBUF(bp); 18202 ASSERT(xp != NULL); 18203 18204 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18205 mutex_enter(&un->un_pm_mutex); 18206 if ((un->un_state == SD_STATE_SUSPENDED) || 18207 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18208 (pktp->pkt_flags & FLAG_SILENT)) { 18209 mutex_exit(&un->un_pm_mutex); 18210 goto update_pkt_reason; 18211 } 18212 mutex_exit(&un->un_pm_mutex); 18213 18214 /* 18215 * Suppress messages if they are all the same pkt_reason; with 18216 * TQ, many (up to 256) are returned with the same pkt_reason. 18217 * If we are in panic, then suppress the retry messages. 18218 */ 18219 switch (flag) { 18220 case SD_NO_RETRY_ISSUED: 18221 msgp = "giving up"; 18222 break; 18223 case SD_IMMEDIATE_RETRY_ISSUED: 18224 case SD_DELAYED_RETRY_ISSUED: 18225 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18226 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18227 (sd_error_level != SCSI_ERR_ALL))) { 18228 return; 18229 } 18230 msgp = "retrying command"; 18231 break; 18232 default: 18233 goto update_pkt_reason; 18234 } 18235 18236 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18237 scsi_rname(pktp->pkt_reason)); 18238 18239 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18240 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18241 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18242 } 18243 18244 update_pkt_reason: 18245 /* 18246 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18247 * This is to prevent multiple console messages for the same failure 18248 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18249 * when the command is retried successfully because there still may be 18250 * more commands coming back with the same value of pktp->pkt_reason. 18251 */ 18252 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18253 un->un_last_pkt_reason = pktp->pkt_reason; 18254 } 18255 } 18256 18257 18258 /* 18259 * Function: sd_print_cmd_incomplete_msg 18260 * 18261 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18262 * 18263 * Arguments: un - ptr to associated softstate 18264 * bp - ptr to buf(9S) for the command 18265 * arg - passed to sd_print_retry_msg() 18266 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18267 * or SD_NO_RETRY_ISSUED 18268 * 18269 * Context: May be called from interrupt context 18270 */ 18271 18272 static void 18273 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18274 int code) 18275 { 18276 dev_info_t *dip; 18277 18278 ASSERT(un != NULL); 18279 ASSERT(mutex_owned(SD_MUTEX(un))); 18280 ASSERT(bp != NULL); 18281 18282 switch (code) { 18283 case SD_NO_RETRY_ISSUED: 18284 /* Command was failed. Someone turned off this target? */ 18285 if (un->un_state != SD_STATE_OFFLINE) { 18286 /* 18287 * Suppress message if we are detaching and 18288 * device has been disconnected 18289 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18290 * private interface and not part of the DDI 18291 */ 18292 dip = un->un_sd->sd_dev; 18293 if (!(DEVI_IS_DETACHING(dip) && 18294 DEVI_IS_DEVICE_REMOVED(dip))) { 18295 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18296 "disk not responding to selection\n"); 18297 } 18298 New_state(un, SD_STATE_OFFLINE); 18299 } 18300 break; 18301 18302 case SD_DELAYED_RETRY_ISSUED: 18303 case SD_IMMEDIATE_RETRY_ISSUED: 18304 default: 18305 /* Command was successfully queued for retry */ 18306 sd_print_retry_msg(un, bp, arg, code); 18307 break; 18308 } 18309 } 18310 18311 18312 /* 18313 * Function: sd_pkt_reason_cmd_incomplete 18314 * 18315 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18316 * 18317 * Context: May be called from interrupt context 18318 */ 18319 18320 static void 18321 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18322 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18323 { 18324 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18325 18326 ASSERT(un != NULL); 18327 ASSERT(mutex_owned(SD_MUTEX(un))); 18328 ASSERT(bp != NULL); 18329 ASSERT(xp != NULL); 18330 ASSERT(pktp != NULL); 18331 18332 /* Do not do a reset if selection did not complete */ 18333 /* Note: Should this not just check the bit? */ 18334 if (pktp->pkt_state != STATE_GOT_BUS) { 18335 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18336 sd_reset_target(un, pktp); 18337 } 18338 18339 /* 18340 * If the target was not successfully selected, then set 18341 * SD_RETRIES_FAILFAST to indicate that we lost communication 18342 * with the target, and further retries and/or commands are 18343 * likely to take a long time. 18344 */ 18345 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18346 flag |= SD_RETRIES_FAILFAST; 18347 } 18348 18349 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18350 18351 sd_retry_command(un, bp, flag, 18352 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18353 } 18354 18355 18356 18357 /* 18358 * Function: sd_pkt_reason_cmd_tran_err 18359 * 18360 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18361 * 18362 * Context: May be called from interrupt context 18363 */ 18364 18365 static void 18366 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18367 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18368 { 18369 ASSERT(un != NULL); 18370 ASSERT(mutex_owned(SD_MUTEX(un))); 18371 ASSERT(bp != NULL); 18372 ASSERT(xp != NULL); 18373 ASSERT(pktp != NULL); 18374 18375 /* 18376 * Do not reset if we got a parity error, or if 18377 * selection did not complete. 18378 */ 18379 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18380 /* Note: Should this not just check the bit for pkt_state? */ 18381 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18382 (pktp->pkt_state != STATE_GOT_BUS)) { 18383 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18384 sd_reset_target(un, pktp); 18385 } 18386 18387 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18388 18389 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18390 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18391 } 18392 18393 18394 18395 /* 18396 * Function: sd_pkt_reason_cmd_reset 18397 * 18398 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18399 * 18400 * Context: May be called from interrupt context 18401 */ 18402 18403 static void 18404 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18405 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18406 { 18407 ASSERT(un != NULL); 18408 ASSERT(mutex_owned(SD_MUTEX(un))); 18409 ASSERT(bp != NULL); 18410 ASSERT(xp != NULL); 18411 ASSERT(pktp != NULL); 18412 18413 /* The target may still be running the command, so try to reset. */ 18414 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18415 sd_reset_target(un, pktp); 18416 18417 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18418 18419 /* 18420 * If pkt_reason is CMD_RESET chances are that this pkt got 18421 * reset because another target on this bus caused it. The target 18422 * that caused it should get CMD_TIMEOUT with pkt_statistics 18423 * of STAT_TIMEOUT/STAT_DEV_RESET. 18424 */ 18425 18426 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18427 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18428 } 18429 18430 18431 18432 18433 /* 18434 * Function: sd_pkt_reason_cmd_aborted 18435 * 18436 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18437 * 18438 * Context: May be called from interrupt context 18439 */ 18440 18441 static void 18442 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18443 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18444 { 18445 ASSERT(un != NULL); 18446 ASSERT(mutex_owned(SD_MUTEX(un))); 18447 ASSERT(bp != NULL); 18448 ASSERT(xp != NULL); 18449 ASSERT(pktp != NULL); 18450 18451 /* The target may still be running the command, so try to reset. */ 18452 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18453 sd_reset_target(un, pktp); 18454 18455 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18456 18457 /* 18458 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18459 * aborted because another target on this bus caused it. The target 18460 * that caused it should get CMD_TIMEOUT with pkt_statistics 18461 * of STAT_TIMEOUT/STAT_DEV_RESET. 18462 */ 18463 18464 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18465 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18466 } 18467 18468 18469 18470 /* 18471 * Function: sd_pkt_reason_cmd_timeout 18472 * 18473 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18474 * 18475 * Context: May be called from interrupt context 18476 */ 18477 18478 static void 18479 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18480 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18481 { 18482 ASSERT(un != NULL); 18483 ASSERT(mutex_owned(SD_MUTEX(un))); 18484 ASSERT(bp != NULL); 18485 ASSERT(xp != NULL); 18486 ASSERT(pktp != NULL); 18487 18488 18489 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18490 sd_reset_target(un, pktp); 18491 18492 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18493 18494 /* 18495 * A command timeout indicates that we could not establish 18496 * communication with the target, so set SD_RETRIES_FAILFAST 18497 * as further retries/commands are likely to take a long time. 18498 */ 18499 sd_retry_command(un, bp, 18500 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18501 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18502 } 18503 18504 18505 18506 /* 18507 * Function: sd_pkt_reason_cmd_unx_bus_free 18508 * 18509 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18510 * 18511 * Context: May be called from interrupt context 18512 */ 18513 18514 static void 18515 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18516 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18517 { 18518 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18519 18520 ASSERT(un != NULL); 18521 ASSERT(mutex_owned(SD_MUTEX(un))); 18522 ASSERT(bp != NULL); 18523 ASSERT(xp != NULL); 18524 ASSERT(pktp != NULL); 18525 18526 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18527 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18528 18529 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18530 sd_print_retry_msg : NULL; 18531 18532 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18533 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18534 } 18535 18536 18537 /* 18538 * Function: sd_pkt_reason_cmd_tag_reject 18539 * 18540 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18541 * 18542 * Context: May be called from interrupt context 18543 */ 18544 18545 static void 18546 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18547 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18548 { 18549 ASSERT(un != NULL); 18550 ASSERT(mutex_owned(SD_MUTEX(un))); 18551 ASSERT(bp != NULL); 18552 ASSERT(xp != NULL); 18553 ASSERT(pktp != NULL); 18554 18555 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18556 pktp->pkt_flags = 0; 18557 un->un_tagflags = 0; 18558 if (un->un_f_opt_queueing == TRUE) { 18559 un->un_throttle = min(un->un_throttle, 3); 18560 } else { 18561 un->un_throttle = 1; 18562 } 18563 mutex_exit(SD_MUTEX(un)); 18564 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18565 mutex_enter(SD_MUTEX(un)); 18566 18567 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18568 18569 /* Legacy behavior not to check retry counts here. */ 18570 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18571 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18572 } 18573 18574 18575 /* 18576 * Function: sd_pkt_reason_default 18577 * 18578 * Description: Default recovery actions for SCSA pkt_reason values that 18579 * do not have more explicit recovery actions. 18580 * 18581 * Context: May be called from interrupt context 18582 */ 18583 18584 static void 18585 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18586 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18587 { 18588 ASSERT(un != NULL); 18589 ASSERT(mutex_owned(SD_MUTEX(un))); 18590 ASSERT(bp != NULL); 18591 ASSERT(xp != NULL); 18592 ASSERT(pktp != NULL); 18593 18594 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18595 sd_reset_target(un, pktp); 18596 18597 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18598 18599 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18600 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18601 } 18602 18603 18604 18605 /* 18606 * Function: sd_pkt_status_check_condition 18607 * 18608 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18609 * 18610 * Context: May be called from interrupt context 18611 */ 18612 18613 static void 18614 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18615 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18616 { 18617 ASSERT(un != NULL); 18618 ASSERT(mutex_owned(SD_MUTEX(un))); 18619 ASSERT(bp != NULL); 18620 ASSERT(xp != NULL); 18621 ASSERT(pktp != NULL); 18622 18623 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18624 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18625 18626 /* 18627 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18628 * command will be retried after the request sense). Otherwise, retry 18629 * the command. Note: we are issuing the request sense even though the 18630 * retry limit may have been reached for the failed command. 18631 */ 18632 if (un->un_f_arq_enabled == FALSE) { 18633 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18634 "no ARQ, sending request sense command\n"); 18635 sd_send_request_sense_command(un, bp, pktp); 18636 } else { 18637 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18638 "ARQ,retrying request sense command\n"); 18639 #if defined(__i386) || defined(__amd64) 18640 /* 18641 * The SD_RETRY_DELAY value need to be adjusted here 18642 * when SD_RETRY_DELAY change in sddef.h 18643 */ 18644 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18645 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18646 NULL); 18647 #else 18648 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18649 EIO, SD_RETRY_DELAY, NULL); 18650 #endif 18651 } 18652 18653 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18654 } 18655 18656 18657 /* 18658 * Function: sd_pkt_status_busy 18659 * 18660 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18661 * 18662 * Context: May be called from interrupt context 18663 */ 18664 18665 static void 18666 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18667 struct scsi_pkt *pktp) 18668 { 18669 ASSERT(un != NULL); 18670 ASSERT(mutex_owned(SD_MUTEX(un))); 18671 ASSERT(bp != NULL); 18672 ASSERT(xp != NULL); 18673 ASSERT(pktp != NULL); 18674 18675 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18676 "sd_pkt_status_busy: entry\n"); 18677 18678 /* If retries are exhausted, just fail the command. */ 18679 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18680 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18681 "device busy too long\n"); 18682 sd_return_failed_command(un, bp, EIO); 18683 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18684 "sd_pkt_status_busy: exit\n"); 18685 return; 18686 } 18687 xp->xb_retry_count++; 18688 18689 /* 18690 * Try to reset the target. However, we do not want to perform 18691 * more than one reset if the device continues to fail. The reset 18692 * will be performed when the retry count reaches the reset 18693 * threshold. This threshold should be set such that at least 18694 * one retry is issued before the reset is performed. 18695 */ 18696 if (xp->xb_retry_count == 18697 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18698 int rval = 0; 18699 mutex_exit(SD_MUTEX(un)); 18700 if (un->un_f_allow_bus_device_reset == TRUE) { 18701 /* 18702 * First try to reset the LUN; if we cannot then 18703 * try to reset the target. 18704 */ 18705 if (un->un_f_lun_reset_enabled == TRUE) { 18706 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18707 "sd_pkt_status_busy: RESET_LUN\n"); 18708 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18709 } 18710 if (rval == 0) { 18711 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18712 "sd_pkt_status_busy: RESET_TARGET\n"); 18713 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18714 } 18715 } 18716 if (rval == 0) { 18717 /* 18718 * If the RESET_LUN and/or RESET_TARGET failed, 18719 * try RESET_ALL 18720 */ 18721 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18722 "sd_pkt_status_busy: RESET_ALL\n"); 18723 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18724 } 18725 mutex_enter(SD_MUTEX(un)); 18726 if (rval == 0) { 18727 /* 18728 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18729 * At this point we give up & fail the command. 18730 */ 18731 sd_return_failed_command(un, bp, EIO); 18732 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18733 "sd_pkt_status_busy: exit (failed cmd)\n"); 18734 return; 18735 } 18736 } 18737 18738 /* 18739 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18740 * we have already checked the retry counts above. 18741 */ 18742 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18743 EIO, un->un_busy_timeout, NULL); 18744 18745 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18746 "sd_pkt_status_busy: exit\n"); 18747 } 18748 18749 18750 /* 18751 * Function: sd_pkt_status_reservation_conflict 18752 * 18753 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18754 * command status. 18755 * 18756 * Context: May be called from interrupt context 18757 */ 18758 18759 static void 18760 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18761 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18762 { 18763 ASSERT(un != NULL); 18764 ASSERT(mutex_owned(SD_MUTEX(un))); 18765 ASSERT(bp != NULL); 18766 ASSERT(xp != NULL); 18767 ASSERT(pktp != NULL); 18768 18769 /* 18770 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18771 * conflict could be due to various reasons like incorrect keys, not 18772 * registered or not reserved etc. So, we return EACCES to the caller. 18773 */ 18774 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18775 int cmd = SD_GET_PKT_OPCODE(pktp); 18776 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18777 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18778 sd_return_failed_command(un, bp, EACCES); 18779 return; 18780 } 18781 } 18782 18783 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18784 18785 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18786 if (sd_failfast_enable != 0) { 18787 /* By definition, we must panic here.... */ 18788 sd_panic_for_res_conflict(un); 18789 /*NOTREACHED*/ 18790 } 18791 SD_ERROR(SD_LOG_IO, un, 18792 "sd_handle_resv_conflict: Disk Reserved\n"); 18793 sd_return_failed_command(un, bp, EACCES); 18794 return; 18795 } 18796 18797 /* 18798 * 1147670: retry only if sd_retry_on_reservation_conflict 18799 * property is set (default is 1). Retries will not succeed 18800 * on a disk reserved by another initiator. HA systems 18801 * may reset this via sd.conf to avoid these retries. 18802 * 18803 * Note: The legacy return code for this failure is EIO, however EACCES 18804 * seems more appropriate for a reservation conflict. 18805 */ 18806 if (sd_retry_on_reservation_conflict == 0) { 18807 SD_ERROR(SD_LOG_IO, un, 18808 "sd_handle_resv_conflict: Device Reserved\n"); 18809 sd_return_failed_command(un, bp, EIO); 18810 return; 18811 } 18812 18813 /* 18814 * Retry the command if we can. 18815 * 18816 * Note: The legacy return code for this failure is EIO, however EACCES 18817 * seems more appropriate for a reservation conflict. 18818 */ 18819 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18820 (clock_t)2, NULL); 18821 } 18822 18823 18824 18825 /* 18826 * Function: sd_pkt_status_qfull 18827 * 18828 * Description: Handle a QUEUE FULL condition from the target. This can 18829 * occur if the HBA does not handle the queue full condition. 18830 * (Basically this means third-party HBAs as Sun HBAs will 18831 * handle the queue full condition.) Note that if there are 18832 * some commands already in the transport, then the queue full 18833 * has occurred because the queue for this nexus is actually 18834 * full. If there are no commands in the transport, then the 18835 * queue full is resulting from some other initiator or lun 18836 * consuming all the resources at the target. 18837 * 18838 * Context: May be called from interrupt context 18839 */ 18840 18841 static void 18842 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18843 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18844 { 18845 ASSERT(un != NULL); 18846 ASSERT(mutex_owned(SD_MUTEX(un))); 18847 ASSERT(bp != NULL); 18848 ASSERT(xp != NULL); 18849 ASSERT(pktp != NULL); 18850 18851 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18852 "sd_pkt_status_qfull: entry\n"); 18853 18854 /* 18855 * Just lower the QFULL throttle and retry the command. Note that 18856 * we do not limit the number of retries here. 18857 */ 18858 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18859 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18860 SD_RESTART_TIMEOUT, NULL); 18861 18862 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18863 "sd_pkt_status_qfull: exit\n"); 18864 } 18865 18866 18867 /* 18868 * Function: sd_reset_target 18869 * 18870 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18871 * RESET_TARGET, or RESET_ALL. 18872 * 18873 * Context: May be called under interrupt context. 18874 */ 18875 18876 static void 18877 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18878 { 18879 int rval = 0; 18880 18881 ASSERT(un != NULL); 18882 ASSERT(mutex_owned(SD_MUTEX(un))); 18883 ASSERT(pktp != NULL); 18884 18885 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18886 18887 /* 18888 * No need to reset if the transport layer has already done so. 18889 */ 18890 if ((pktp->pkt_statistics & 18891 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18893 "sd_reset_target: no reset\n"); 18894 return; 18895 } 18896 18897 mutex_exit(SD_MUTEX(un)); 18898 18899 if (un->un_f_allow_bus_device_reset == TRUE) { 18900 if (un->un_f_lun_reset_enabled == TRUE) { 18901 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18902 "sd_reset_target: RESET_LUN\n"); 18903 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18904 } 18905 if (rval == 0) { 18906 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18907 "sd_reset_target: RESET_TARGET\n"); 18908 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18909 } 18910 } 18911 18912 if (rval == 0) { 18913 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18914 "sd_reset_target: RESET_ALL\n"); 18915 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18916 } 18917 18918 mutex_enter(SD_MUTEX(un)); 18919 18920 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18921 } 18922 18923 /* 18924 * Function: sd_target_change_task 18925 * 18926 * Description: Handle dynamic target change 18927 * 18928 * Context: Executes in a taskq() thread context 18929 */ 18930 static void 18931 sd_target_change_task(void *arg) 18932 { 18933 struct sd_lun *un = arg; 18934 uint64_t capacity; 18935 diskaddr_t label_cap; 18936 uint_t lbasize; 18937 sd_ssc_t *ssc; 18938 18939 ASSERT(un != NULL); 18940 ASSERT(!mutex_owned(SD_MUTEX(un))); 18941 18942 if ((un->un_f_blockcount_is_valid == FALSE) || 18943 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18944 return; 18945 } 18946 18947 ssc = sd_ssc_init(un); 18948 18949 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18950 &lbasize, SD_PATH_DIRECT) != 0) { 18951 SD_ERROR(SD_LOG_ERROR, un, 18952 "sd_target_change_task: fail to read capacity\n"); 18953 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18954 goto task_exit; 18955 } 18956 18957 mutex_enter(SD_MUTEX(un)); 18958 if (capacity <= un->un_blockcount) { 18959 mutex_exit(SD_MUTEX(un)); 18960 goto task_exit; 18961 } 18962 18963 sd_update_block_info(un, lbasize, capacity); 18964 mutex_exit(SD_MUTEX(un)); 18965 18966 /* 18967 * If lun is EFI labeled and lun capacity is greater than the 18968 * capacity contained in the label, log a sys event. 18969 */ 18970 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18971 (void*)SD_PATH_DIRECT) == 0) { 18972 mutex_enter(SD_MUTEX(un)); 18973 if (un->un_f_blockcount_is_valid && 18974 un->un_blockcount > label_cap) { 18975 mutex_exit(SD_MUTEX(un)); 18976 sd_log_lun_expansion_event(un, KM_SLEEP); 18977 } else { 18978 mutex_exit(SD_MUTEX(un)); 18979 } 18980 } 18981 18982 task_exit: 18983 sd_ssc_fini(ssc); 18984 } 18985 18986 /* 18987 * Function: sd_log_lun_expansion_event 18988 * 18989 * Description: Log lun expansion sys event 18990 * 18991 * Context: Never called from interrupt context 18992 */ 18993 static void 18994 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18995 { 18996 int err; 18997 char *path; 18998 nvlist_t *dle_attr_list; 18999 19000 /* Allocate and build sysevent attribute list */ 19001 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19002 if (err != 0) { 19003 SD_ERROR(SD_LOG_ERROR, un, 19004 "sd_log_lun_expansion_event: fail to allocate space\n"); 19005 return; 19006 } 19007 19008 path = kmem_alloc(MAXPATHLEN, km_flag); 19009 if (path == NULL) { 19010 nvlist_free(dle_attr_list); 19011 SD_ERROR(SD_LOG_ERROR, un, 19012 "sd_log_lun_expansion_event: fail to allocate space\n"); 19013 return; 19014 } 19015 /* 19016 * Add path attribute to identify the lun. 19017 * We are using minor node 'a' as the sysevent attribute. 19018 */ 19019 (void) snprintf(path, MAXPATHLEN, "/devices"); 19020 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19021 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19022 ":a"); 19023 19024 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 19025 if (err != 0) { 19026 nvlist_free(dle_attr_list); 19027 kmem_free(path, MAXPATHLEN); 19028 SD_ERROR(SD_LOG_ERROR, un, 19029 "sd_log_lun_expansion_event: fail to add attribute\n"); 19030 return; 19031 } 19032 19033 /* Log dynamic lun expansion sysevent */ 19034 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19035 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 19036 if (err != DDI_SUCCESS) { 19037 SD_ERROR(SD_LOG_ERROR, un, 19038 "sd_log_lun_expansion_event: fail to log sysevent\n"); 19039 } 19040 19041 nvlist_free(dle_attr_list); 19042 kmem_free(path, MAXPATHLEN); 19043 } 19044 19045 /* 19046 * Function: sd_media_change_task 19047 * 19048 * Description: Recovery action for CDROM to become available. 19049 * 19050 * Context: Executes in a taskq() thread context 19051 */ 19052 19053 static void 19054 sd_media_change_task(void *arg) 19055 { 19056 struct scsi_pkt *pktp = arg; 19057 struct sd_lun *un; 19058 struct buf *bp; 19059 struct sd_xbuf *xp; 19060 int err = 0; 19061 int retry_count = 0; 19062 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19063 struct sd_sense_info si; 19064 19065 ASSERT(pktp != NULL); 19066 bp = (struct buf *)pktp->pkt_private; 19067 ASSERT(bp != NULL); 19068 xp = SD_GET_XBUF(bp); 19069 ASSERT(xp != NULL); 19070 un = SD_GET_UN(bp); 19071 ASSERT(un != NULL); 19072 ASSERT(!mutex_owned(SD_MUTEX(un))); 19073 ASSERT(un->un_f_monitor_media_state); 19074 19075 si.ssi_severity = SCSI_ERR_INFO; 19076 si.ssi_pfa_flag = FALSE; 19077 19078 /* 19079 * When a reset is issued on a CDROM, it takes a long time to 19080 * recover. First few attempts to read capacity and other things 19081 * related to handling unit attention fail (with a ASC 0x4 and 19082 * ASCQ 0x1). In that case we want to do enough retries and we want 19083 * to limit the retries in other cases of genuine failures like 19084 * no media in drive. 19085 */ 19086 while (retry_count++ < retry_limit) { 19087 if ((err = sd_handle_mchange(un)) == 0) { 19088 break; 19089 } 19090 if (err == EAGAIN) { 19091 retry_limit = SD_UNIT_ATTENTION_RETRY; 19092 } 19093 /* Sleep for 0.5 sec. & try again */ 19094 delay(drv_usectohz(500000)); 19095 } 19096 19097 /* 19098 * Dispatch (retry or fail) the original command here, 19099 * along with appropriate console messages.... 19100 * 19101 * Must grab the mutex before calling sd_retry_command, 19102 * sd_print_sense_msg and sd_return_failed_command. 19103 */ 19104 mutex_enter(SD_MUTEX(un)); 19105 if (err != SD_CMD_SUCCESS) { 19106 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19107 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19108 si.ssi_severity = SCSI_ERR_FATAL; 19109 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19110 sd_return_failed_command(un, bp, EIO); 19111 } else { 19112 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19113 &si, EIO, (clock_t)0, NULL); 19114 } 19115 mutex_exit(SD_MUTEX(un)); 19116 } 19117 19118 19119 19120 /* 19121 * Function: sd_handle_mchange 19122 * 19123 * Description: Perform geometry validation & other recovery when CDROM 19124 * has been removed from drive. 19125 * 19126 * Return Code: 0 for success 19127 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19128 * sd_send_scsi_READ_CAPACITY() 19129 * 19130 * Context: Executes in a taskq() thread context 19131 */ 19132 19133 static int 19134 sd_handle_mchange(struct sd_lun *un) 19135 { 19136 uint64_t capacity; 19137 uint32_t lbasize; 19138 int rval; 19139 sd_ssc_t *ssc; 19140 19141 ASSERT(!mutex_owned(SD_MUTEX(un))); 19142 ASSERT(un->un_f_monitor_media_state); 19143 19144 ssc = sd_ssc_init(un); 19145 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19146 SD_PATH_DIRECT_PRIORITY); 19147 19148 if (rval != 0) 19149 goto failed; 19150 19151 mutex_enter(SD_MUTEX(un)); 19152 sd_update_block_info(un, lbasize, capacity); 19153 19154 if (un->un_errstats != NULL) { 19155 struct sd_errstats *stp = 19156 (struct sd_errstats *)un->un_errstats->ks_data; 19157 stp->sd_capacity.value.ui64 = (uint64_t) 19158 ((uint64_t)un->un_blockcount * 19159 (uint64_t)un->un_tgt_blocksize); 19160 } 19161 19162 /* 19163 * Check if the media in the device is writable or not 19164 */ 19165 if (ISCD(un)) { 19166 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19167 } 19168 19169 /* 19170 * Note: Maybe let the strategy/partitioning chain worry about getting 19171 * valid geometry. 19172 */ 19173 mutex_exit(SD_MUTEX(un)); 19174 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19175 19176 19177 if (cmlb_validate(un->un_cmlbhandle, 0, 19178 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19179 sd_ssc_fini(ssc); 19180 return (EIO); 19181 } else { 19182 if (un->un_f_pkstats_enabled) { 19183 sd_set_pstats(un); 19184 SD_TRACE(SD_LOG_IO_PARTITION, un, 19185 "sd_handle_mchange: un:0x%p pstats created and " 19186 "set\n", un); 19187 } 19188 } 19189 19190 /* 19191 * Try to lock the door 19192 */ 19193 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19194 SD_PATH_DIRECT_PRIORITY); 19195 failed: 19196 if (rval != 0) 19197 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19198 sd_ssc_fini(ssc); 19199 return (rval); 19200 } 19201 19202 19203 /* 19204 * Function: sd_send_scsi_DOORLOCK 19205 * 19206 * Description: Issue the scsi DOOR LOCK command 19207 * 19208 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19209 * structure for this target. 19210 * flag - SD_REMOVAL_ALLOW 19211 * SD_REMOVAL_PREVENT 19212 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19213 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19214 * to use the USCSI "direct" chain and bypass the normal 19215 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19216 * command is issued as part of an error recovery action. 19217 * 19218 * Return Code: 0 - Success 19219 * errno return code from sd_ssc_send() 19220 * 19221 * Context: Can sleep. 19222 */ 19223 19224 static int 19225 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19226 { 19227 struct scsi_extended_sense sense_buf; 19228 union scsi_cdb cdb; 19229 struct uscsi_cmd ucmd_buf; 19230 int status; 19231 struct sd_lun *un; 19232 19233 ASSERT(ssc != NULL); 19234 un = ssc->ssc_un; 19235 ASSERT(un != NULL); 19236 ASSERT(!mutex_owned(SD_MUTEX(un))); 19237 19238 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19239 19240 /* already determined doorlock is not supported, fake success */ 19241 if (un->un_f_doorlock_supported == FALSE) { 19242 return (0); 19243 } 19244 19245 /* 19246 * If we are ejecting and see an SD_REMOVAL_PREVENT 19247 * ignore the command so we can complete the eject 19248 * operation. 19249 */ 19250 if (flag == SD_REMOVAL_PREVENT) { 19251 mutex_enter(SD_MUTEX(un)); 19252 if (un->un_f_ejecting == TRUE) { 19253 mutex_exit(SD_MUTEX(un)); 19254 return (EAGAIN); 19255 } 19256 mutex_exit(SD_MUTEX(un)); 19257 } 19258 19259 bzero(&cdb, sizeof (cdb)); 19260 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19261 19262 cdb.scc_cmd = SCMD_DOORLOCK; 19263 cdb.cdb_opaque[4] = (uchar_t)flag; 19264 19265 ucmd_buf.uscsi_cdb = (char *)&cdb; 19266 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19267 ucmd_buf.uscsi_bufaddr = NULL; 19268 ucmd_buf.uscsi_buflen = 0; 19269 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19270 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19271 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19272 ucmd_buf.uscsi_timeout = 15; 19273 19274 SD_TRACE(SD_LOG_IO, un, 19275 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19276 19277 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19278 UIO_SYSSPACE, path_flag); 19279 19280 if (status == 0) 19281 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19282 19283 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19284 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19285 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19286 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19287 19288 /* fake success and skip subsequent doorlock commands */ 19289 un->un_f_doorlock_supported = FALSE; 19290 return (0); 19291 } 19292 19293 return (status); 19294 } 19295 19296 /* 19297 * Function: sd_send_scsi_READ_CAPACITY 19298 * 19299 * Description: This routine uses the scsi READ CAPACITY command to determine 19300 * the device capacity in number of blocks and the device native 19301 * block size. If this function returns a failure, then the 19302 * values in *capp and *lbap are undefined. If the capacity 19303 * returned is 0xffffffff then the lun is too large for a 19304 * normal READ CAPACITY command and the results of a 19305 * READ CAPACITY 16 will be used instead. 19306 * 19307 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19308 * capp - ptr to unsigned 64-bit variable to receive the 19309 * capacity value from the command. 19310 * lbap - ptr to unsigned 32-bit varaible to receive the 19311 * block size value from the command 19312 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19313 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19314 * to use the USCSI "direct" chain and bypass the normal 19315 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19316 * command is issued as part of an error recovery action. 19317 * 19318 * Return Code: 0 - Success 19319 * EIO - IO error 19320 * EACCES - Reservation conflict detected 19321 * EAGAIN - Device is becoming ready 19322 * errno return code from sd_ssc_send() 19323 * 19324 * Context: Can sleep. Blocks until command completes. 19325 */ 19326 19327 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19328 19329 static int 19330 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19331 int path_flag) 19332 { 19333 struct scsi_extended_sense sense_buf; 19334 struct uscsi_cmd ucmd_buf; 19335 union scsi_cdb cdb; 19336 uint32_t *capacity_buf; 19337 uint64_t capacity; 19338 uint32_t lbasize; 19339 int status; 19340 struct sd_lun *un; 19341 19342 ASSERT(ssc != NULL); 19343 19344 un = ssc->ssc_un; 19345 ASSERT(un != NULL); 19346 ASSERT(!mutex_owned(SD_MUTEX(un))); 19347 ASSERT(capp != NULL); 19348 ASSERT(lbap != NULL); 19349 19350 SD_TRACE(SD_LOG_IO, un, 19351 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19352 19353 /* 19354 * First send a READ_CAPACITY command to the target. 19355 * (This command is mandatory under SCSI-2.) 19356 * 19357 * Set up the CDB for the READ_CAPACITY command. The Partial 19358 * Medium Indicator bit is cleared. The address field must be 19359 * zero if the PMI bit is zero. 19360 */ 19361 bzero(&cdb, sizeof (cdb)); 19362 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19363 19364 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19365 19366 cdb.scc_cmd = SCMD_READ_CAPACITY; 19367 19368 ucmd_buf.uscsi_cdb = (char *)&cdb; 19369 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19370 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19371 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19372 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19373 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19374 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19375 ucmd_buf.uscsi_timeout = 60; 19376 19377 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19378 UIO_SYSSPACE, path_flag); 19379 19380 switch (status) { 19381 case 0: 19382 /* Return failure if we did not get valid capacity data. */ 19383 if (ucmd_buf.uscsi_resid != 0) { 19384 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19385 "sd_send_scsi_READ_CAPACITY received invalid " 19386 "capacity data"); 19387 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19388 return (EIO); 19389 } 19390 /* 19391 * Read capacity and block size from the READ CAPACITY 10 data. 19392 * This data may be adjusted later due to device specific 19393 * issues. 19394 * 19395 * According to the SCSI spec, the READ CAPACITY 10 19396 * command returns the following: 19397 * 19398 * bytes 0-3: Maximum logical block address available. 19399 * (MSB in byte:0 & LSB in byte:3) 19400 * 19401 * bytes 4-7: Block length in bytes 19402 * (MSB in byte:4 & LSB in byte:7) 19403 * 19404 */ 19405 capacity = BE_32(capacity_buf[0]); 19406 lbasize = BE_32(capacity_buf[1]); 19407 19408 /* 19409 * Done with capacity_buf 19410 */ 19411 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19412 19413 /* 19414 * if the reported capacity is set to all 0xf's, then 19415 * this disk is too large and requires SBC-2 commands. 19416 * Reissue the request using READ CAPACITY 16. 19417 */ 19418 if (capacity == 0xffffffff) { 19419 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19420 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19421 &lbasize, path_flag); 19422 if (status != 0) { 19423 return (status); 19424 } 19425 } 19426 break; /* Success! */ 19427 case EIO: 19428 switch (ucmd_buf.uscsi_status) { 19429 case STATUS_RESERVATION_CONFLICT: 19430 status = EACCES; 19431 break; 19432 case STATUS_CHECK: 19433 /* 19434 * Check condition; look for ASC/ASCQ of 0x04/0x01 19435 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19436 */ 19437 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19438 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19439 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19440 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19441 return (EAGAIN); 19442 } 19443 break; 19444 default: 19445 break; 19446 } 19447 /* FALLTHRU */ 19448 default: 19449 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19450 return (status); 19451 } 19452 19453 /* 19454 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19455 * (2352 and 0 are common) so for these devices always force the value 19456 * to 2048 as required by the ATAPI specs. 19457 */ 19458 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19459 lbasize = 2048; 19460 } 19461 19462 /* 19463 * Get the maximum LBA value from the READ CAPACITY data. 19464 * Here we assume that the Partial Medium Indicator (PMI) bit 19465 * was cleared when issuing the command. This means that the LBA 19466 * returned from the device is the LBA of the last logical block 19467 * on the logical unit. The actual logical block count will be 19468 * this value plus one. 19469 * 19470 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19471 * so scale the capacity value to reflect this. 19472 */ 19473 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19474 19475 /* 19476 * Copy the values from the READ CAPACITY command into the space 19477 * provided by the caller. 19478 */ 19479 *capp = capacity; 19480 *lbap = lbasize; 19481 19482 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19483 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19484 19485 /* 19486 * Both the lbasize and capacity from the device must be nonzero, 19487 * otherwise we assume that the values are not valid and return 19488 * failure to the caller. (4203735) 19489 */ 19490 if ((capacity == 0) || (lbasize == 0)) { 19491 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19492 "sd_send_scsi_READ_CAPACITY received invalid value " 19493 "capacity %llu lbasize %d", capacity, lbasize); 19494 return (EIO); 19495 } 19496 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19497 return (0); 19498 } 19499 19500 /* 19501 * Function: sd_send_scsi_READ_CAPACITY_16 19502 * 19503 * Description: This routine uses the scsi READ CAPACITY 16 command to 19504 * determine the device capacity in number of blocks and the 19505 * device native block size. If this function returns a failure, 19506 * then the values in *capp and *lbap are undefined. 19507 * This routine should always be called by 19508 * sd_send_scsi_READ_CAPACITY which will appy any device 19509 * specific adjustments to capacity and lbasize. 19510 * 19511 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19512 * capp - ptr to unsigned 64-bit variable to receive the 19513 * capacity value from the command. 19514 * lbap - ptr to unsigned 32-bit varaible to receive the 19515 * block size value from the command 19516 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19517 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19518 * to use the USCSI "direct" chain and bypass the normal 19519 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19520 * this command is issued as part of an error recovery 19521 * action. 19522 * 19523 * Return Code: 0 - Success 19524 * EIO - IO error 19525 * EACCES - Reservation conflict detected 19526 * EAGAIN - Device is becoming ready 19527 * errno return code from sd_ssc_send() 19528 * 19529 * Context: Can sleep. Blocks until command completes. 19530 */ 19531 19532 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19533 19534 static int 19535 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19536 uint32_t *lbap, int path_flag) 19537 { 19538 struct scsi_extended_sense sense_buf; 19539 struct uscsi_cmd ucmd_buf; 19540 union scsi_cdb cdb; 19541 uint64_t *capacity16_buf; 19542 uint64_t capacity; 19543 uint32_t lbasize; 19544 int status; 19545 struct sd_lun *un; 19546 19547 ASSERT(ssc != NULL); 19548 19549 un = ssc->ssc_un; 19550 ASSERT(un != NULL); 19551 ASSERT(!mutex_owned(SD_MUTEX(un))); 19552 ASSERT(capp != NULL); 19553 ASSERT(lbap != NULL); 19554 19555 SD_TRACE(SD_LOG_IO, un, 19556 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19557 19558 /* 19559 * First send a READ_CAPACITY_16 command to the target. 19560 * 19561 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19562 * Medium Indicator bit is cleared. The address field must be 19563 * zero if the PMI bit is zero. 19564 */ 19565 bzero(&cdb, sizeof (cdb)); 19566 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19567 19568 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19569 19570 ucmd_buf.uscsi_cdb = (char *)&cdb; 19571 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19572 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19573 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19574 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19575 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19576 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19577 ucmd_buf.uscsi_timeout = 60; 19578 19579 /* 19580 * Read Capacity (16) is a Service Action In command. One 19581 * command byte (0x9E) is overloaded for multiple operations, 19582 * with the second CDB byte specifying the desired operation 19583 */ 19584 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19585 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19586 19587 /* 19588 * Fill in allocation length field 19589 */ 19590 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19591 19592 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19593 UIO_SYSSPACE, path_flag); 19594 19595 switch (status) { 19596 case 0: 19597 /* Return failure if we did not get valid capacity data. */ 19598 if (ucmd_buf.uscsi_resid > 20) { 19599 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19600 "sd_send_scsi_READ_CAPACITY_16 received invalid " 19601 "capacity data"); 19602 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19603 return (EIO); 19604 } 19605 19606 /* 19607 * Read capacity and block size from the READ CAPACITY 10 data. 19608 * This data may be adjusted later due to device specific 19609 * issues. 19610 * 19611 * According to the SCSI spec, the READ CAPACITY 10 19612 * command returns the following: 19613 * 19614 * bytes 0-7: Maximum logical block address available. 19615 * (MSB in byte:0 & LSB in byte:7) 19616 * 19617 * bytes 8-11: Block length in bytes 19618 * (MSB in byte:8 & LSB in byte:11) 19619 * 19620 */ 19621 capacity = BE_64(capacity16_buf[0]); 19622 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19623 19624 /* 19625 * Done with capacity16_buf 19626 */ 19627 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19628 19629 /* 19630 * if the reported capacity is set to all 0xf's, then 19631 * this disk is too large. This could only happen with 19632 * a device that supports LBAs larger than 64 bits which 19633 * are not defined by any current T10 standards. 19634 */ 19635 if (capacity == 0xffffffffffffffff) { 19636 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19637 "disk is too large"); 19638 return (EIO); 19639 } 19640 break; /* Success! */ 19641 case EIO: 19642 switch (ucmd_buf.uscsi_status) { 19643 case STATUS_RESERVATION_CONFLICT: 19644 status = EACCES; 19645 break; 19646 case STATUS_CHECK: 19647 /* 19648 * Check condition; look for ASC/ASCQ of 0x04/0x01 19649 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19650 */ 19651 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19652 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19653 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19654 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19655 return (EAGAIN); 19656 } 19657 break; 19658 default: 19659 break; 19660 } 19661 /* FALLTHRU */ 19662 default: 19663 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19664 return (status); 19665 } 19666 19667 *capp = capacity; 19668 *lbap = lbasize; 19669 19670 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19671 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19672 19673 return (0); 19674 } 19675 19676 19677 /* 19678 * Function: sd_send_scsi_START_STOP_UNIT 19679 * 19680 * Description: Issue a scsi START STOP UNIT command to the target. 19681 * 19682 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19683 * structure for this target. 19684 * flag - SD_TARGET_START 19685 * SD_TARGET_STOP 19686 * SD_TARGET_EJECT 19687 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19688 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19689 * to use the USCSI "direct" chain and bypass the normal 19690 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19691 * command is issued as part of an error recovery action. 19692 * 19693 * Return Code: 0 - Success 19694 * EIO - IO error 19695 * EACCES - Reservation conflict detected 19696 * ENXIO - Not Ready, medium not present 19697 * errno return code from sd_ssc_send() 19698 * 19699 * Context: Can sleep. 19700 */ 19701 19702 static int 19703 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19704 { 19705 struct scsi_extended_sense sense_buf; 19706 union scsi_cdb cdb; 19707 struct uscsi_cmd ucmd_buf; 19708 int status; 19709 struct sd_lun *un; 19710 19711 ASSERT(ssc != NULL); 19712 un = ssc->ssc_un; 19713 ASSERT(un != NULL); 19714 ASSERT(!mutex_owned(SD_MUTEX(un))); 19715 19716 SD_TRACE(SD_LOG_IO, un, 19717 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19718 19719 if (un->un_f_check_start_stop && 19720 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19721 (un->un_f_start_stop_supported != TRUE)) { 19722 return (0); 19723 } 19724 19725 /* 19726 * If we are performing an eject operation and 19727 * we receive any command other than SD_TARGET_EJECT 19728 * we should immediately return. 19729 */ 19730 if (flag != SD_TARGET_EJECT) { 19731 mutex_enter(SD_MUTEX(un)); 19732 if (un->un_f_ejecting == TRUE) { 19733 mutex_exit(SD_MUTEX(un)); 19734 return (EAGAIN); 19735 } 19736 mutex_exit(SD_MUTEX(un)); 19737 } 19738 19739 bzero(&cdb, sizeof (cdb)); 19740 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19741 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19742 19743 cdb.scc_cmd = SCMD_START_STOP; 19744 cdb.cdb_opaque[4] = (uchar_t)flag; 19745 19746 ucmd_buf.uscsi_cdb = (char *)&cdb; 19747 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19748 ucmd_buf.uscsi_bufaddr = NULL; 19749 ucmd_buf.uscsi_buflen = 0; 19750 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19751 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19752 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19753 ucmd_buf.uscsi_timeout = 200; 19754 19755 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19756 UIO_SYSSPACE, path_flag); 19757 19758 switch (status) { 19759 case 0: 19760 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19761 break; /* Success! */ 19762 case EIO: 19763 switch (ucmd_buf.uscsi_status) { 19764 case STATUS_RESERVATION_CONFLICT: 19765 status = EACCES; 19766 break; 19767 case STATUS_CHECK: 19768 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19769 switch (scsi_sense_key( 19770 (uint8_t *)&sense_buf)) { 19771 case KEY_ILLEGAL_REQUEST: 19772 status = ENOTSUP; 19773 break; 19774 case KEY_NOT_READY: 19775 if (scsi_sense_asc( 19776 (uint8_t *)&sense_buf) 19777 == 0x3A) { 19778 status = ENXIO; 19779 } 19780 break; 19781 default: 19782 break; 19783 } 19784 } 19785 break; 19786 default: 19787 break; 19788 } 19789 break; 19790 default: 19791 break; 19792 } 19793 19794 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19795 19796 return (status); 19797 } 19798 19799 19800 /* 19801 * Function: sd_start_stop_unit_callback 19802 * 19803 * Description: timeout(9F) callback to begin recovery process for a 19804 * device that has spun down. 19805 * 19806 * Arguments: arg - pointer to associated softstate struct. 19807 * 19808 * Context: Executes in a timeout(9F) thread context 19809 */ 19810 19811 static void 19812 sd_start_stop_unit_callback(void *arg) 19813 { 19814 struct sd_lun *un = arg; 19815 ASSERT(un != NULL); 19816 ASSERT(!mutex_owned(SD_MUTEX(un))); 19817 19818 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19819 19820 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19821 } 19822 19823 19824 /* 19825 * Function: sd_start_stop_unit_task 19826 * 19827 * Description: Recovery procedure when a drive is spun down. 19828 * 19829 * Arguments: arg - pointer to associated softstate struct. 19830 * 19831 * Context: Executes in a taskq() thread context 19832 */ 19833 19834 static void 19835 sd_start_stop_unit_task(void *arg) 19836 { 19837 struct sd_lun *un = arg; 19838 sd_ssc_t *ssc; 19839 int rval; 19840 19841 ASSERT(un != NULL); 19842 ASSERT(!mutex_owned(SD_MUTEX(un))); 19843 19844 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19845 19846 /* 19847 * Some unformatted drives report not ready error, no need to 19848 * restart if format has been initiated. 19849 */ 19850 mutex_enter(SD_MUTEX(un)); 19851 if (un->un_f_format_in_progress == TRUE) { 19852 mutex_exit(SD_MUTEX(un)); 19853 return; 19854 } 19855 mutex_exit(SD_MUTEX(un)); 19856 19857 /* 19858 * When a START STOP command is issued from here, it is part of a 19859 * failure recovery operation and must be issued before any other 19860 * commands, including any pending retries. Thus it must be sent 19861 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19862 * succeeds or not, we will start I/O after the attempt. 19863 */ 19864 ssc = sd_ssc_init(un); 19865 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19866 SD_PATH_DIRECT_PRIORITY); 19867 if (rval != 0) 19868 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19869 sd_ssc_fini(ssc); 19870 /* 19871 * The above call blocks until the START_STOP_UNIT command completes. 19872 * Now that it has completed, we must re-try the original IO that 19873 * received the NOT READY condition in the first place. There are 19874 * three possible conditions here: 19875 * 19876 * (1) The original IO is on un_retry_bp. 19877 * (2) The original IO is on the regular wait queue, and un_retry_bp 19878 * is NULL. 19879 * (3) The original IO is on the regular wait queue, and un_retry_bp 19880 * points to some other, unrelated bp. 19881 * 19882 * For each case, we must call sd_start_cmds() with un_retry_bp 19883 * as the argument. If un_retry_bp is NULL, this will initiate 19884 * processing of the regular wait queue. If un_retry_bp is not NULL, 19885 * then this will process the bp on un_retry_bp. That may or may not 19886 * be the original IO, but that does not matter: the important thing 19887 * is to keep the IO processing going at this point. 19888 * 19889 * Note: This is a very specific error recovery sequence associated 19890 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19891 * serialize the I/O with completion of the spin-up. 19892 */ 19893 mutex_enter(SD_MUTEX(un)); 19894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19895 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19896 un, un->un_retry_bp); 19897 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19898 sd_start_cmds(un, un->un_retry_bp); 19899 mutex_exit(SD_MUTEX(un)); 19900 19901 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19902 } 19903 19904 19905 /* 19906 * Function: sd_send_scsi_INQUIRY 19907 * 19908 * Description: Issue the scsi INQUIRY command. 19909 * 19910 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19911 * structure for this target. 19912 * bufaddr 19913 * buflen 19914 * evpd 19915 * page_code 19916 * page_length 19917 * 19918 * Return Code: 0 - Success 19919 * errno return code from sd_ssc_send() 19920 * 19921 * Context: Can sleep. Does not return until command is completed. 19922 */ 19923 19924 static int 19925 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19926 uchar_t evpd, uchar_t page_code, size_t *residp) 19927 { 19928 union scsi_cdb cdb; 19929 struct uscsi_cmd ucmd_buf; 19930 int status; 19931 struct sd_lun *un; 19932 19933 ASSERT(ssc != NULL); 19934 un = ssc->ssc_un; 19935 ASSERT(un != NULL); 19936 ASSERT(!mutex_owned(SD_MUTEX(un))); 19937 ASSERT(bufaddr != NULL); 19938 19939 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19940 19941 bzero(&cdb, sizeof (cdb)); 19942 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19943 bzero(bufaddr, buflen); 19944 19945 cdb.scc_cmd = SCMD_INQUIRY; 19946 cdb.cdb_opaque[1] = evpd; 19947 cdb.cdb_opaque[2] = page_code; 19948 FORMG0COUNT(&cdb, buflen); 19949 19950 ucmd_buf.uscsi_cdb = (char *)&cdb; 19951 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19952 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19953 ucmd_buf.uscsi_buflen = buflen; 19954 ucmd_buf.uscsi_rqbuf = NULL; 19955 ucmd_buf.uscsi_rqlen = 0; 19956 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19957 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19958 19959 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19960 UIO_SYSSPACE, SD_PATH_DIRECT); 19961 19962 /* 19963 * Only handle status == 0, the upper-level caller 19964 * will put different assessment based on the context. 19965 */ 19966 if (status == 0) 19967 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19968 19969 if ((status == 0) && (residp != NULL)) { 19970 *residp = ucmd_buf.uscsi_resid; 19971 } 19972 19973 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19974 19975 return (status); 19976 } 19977 19978 19979 /* 19980 * Function: sd_send_scsi_TEST_UNIT_READY 19981 * 19982 * Description: Issue the scsi TEST UNIT READY command. 19983 * This routine can be told to set the flag USCSI_DIAGNOSE to 19984 * prevent retrying failed commands. Use this when the intent 19985 * is either to check for device readiness, to clear a Unit 19986 * Attention, or to clear any outstanding sense data. 19987 * However under specific conditions the expected behavior 19988 * is for retries to bring a device ready, so use the flag 19989 * with caution. 19990 * 19991 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19992 * structure for this target. 19993 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19994 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19995 * 0: dont check for media present, do retries on cmd. 19996 * 19997 * Return Code: 0 - Success 19998 * EIO - IO error 19999 * EACCES - Reservation conflict detected 20000 * ENXIO - Not Ready, medium not present 20001 * errno return code from sd_ssc_send() 20002 * 20003 * Context: Can sleep. Does not return until command is completed. 20004 */ 20005 20006 static int 20007 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20008 { 20009 struct scsi_extended_sense sense_buf; 20010 union scsi_cdb cdb; 20011 struct uscsi_cmd ucmd_buf; 20012 int status; 20013 struct sd_lun *un; 20014 20015 ASSERT(ssc != NULL); 20016 un = ssc->ssc_un; 20017 ASSERT(un != NULL); 20018 ASSERT(!mutex_owned(SD_MUTEX(un))); 20019 20020 SD_TRACE(SD_LOG_IO, un, 20021 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20022 20023 /* 20024 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20025 * timeouts when they receive a TUR and the queue is not empty. Check 20026 * the configuration flag set during attach (indicating the drive has 20027 * this firmware bug) and un_ncmds_in_transport before issuing the 20028 * TUR. If there are 20029 * pending commands return success, this is a bit arbitrary but is ok 20030 * for non-removables (i.e. the eliteI disks) and non-clustering 20031 * configurations. 20032 */ 20033 if (un->un_f_cfg_tur_check == TRUE) { 20034 mutex_enter(SD_MUTEX(un)); 20035 if (un->un_ncmds_in_transport != 0) { 20036 mutex_exit(SD_MUTEX(un)); 20037 return (0); 20038 } 20039 mutex_exit(SD_MUTEX(un)); 20040 } 20041 20042 bzero(&cdb, sizeof (cdb)); 20043 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20044 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20045 20046 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20047 20048 ucmd_buf.uscsi_cdb = (char *)&cdb; 20049 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20050 ucmd_buf.uscsi_bufaddr = NULL; 20051 ucmd_buf.uscsi_buflen = 0; 20052 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20053 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20054 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20055 20056 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20057 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20058 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20059 } 20060 ucmd_buf.uscsi_timeout = 60; 20061 20062 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20063 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20064 SD_PATH_STANDARD)); 20065 20066 switch (status) { 20067 case 0: 20068 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20069 break; /* Success! */ 20070 case EIO: 20071 switch (ucmd_buf.uscsi_status) { 20072 case STATUS_RESERVATION_CONFLICT: 20073 status = EACCES; 20074 break; 20075 case STATUS_CHECK: 20076 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20077 break; 20078 } 20079 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20080 (scsi_sense_key((uint8_t *)&sense_buf) == 20081 KEY_NOT_READY) && 20082 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20083 status = ENXIO; 20084 } 20085 break; 20086 default: 20087 break; 20088 } 20089 break; 20090 default: 20091 break; 20092 } 20093 20094 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20095 20096 return (status); 20097 } 20098 20099 /* 20100 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20101 * 20102 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20103 * 20104 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20105 * structure for this target. 20106 * 20107 * Return Code: 0 - Success 20108 * EACCES 20109 * ENOTSUP 20110 * errno return code from sd_ssc_send() 20111 * 20112 * Context: Can sleep. Does not return until command is completed. 20113 */ 20114 20115 static int 20116 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20117 uint16_t data_len, uchar_t *data_bufp) 20118 { 20119 struct scsi_extended_sense sense_buf; 20120 union scsi_cdb cdb; 20121 struct uscsi_cmd ucmd_buf; 20122 int status; 20123 int no_caller_buf = FALSE; 20124 struct sd_lun *un; 20125 20126 ASSERT(ssc != NULL); 20127 un = ssc->ssc_un; 20128 ASSERT(un != NULL); 20129 ASSERT(!mutex_owned(SD_MUTEX(un))); 20130 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20131 20132 SD_TRACE(SD_LOG_IO, un, 20133 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20134 20135 bzero(&cdb, sizeof (cdb)); 20136 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20137 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20138 if (data_bufp == NULL) { 20139 /* Allocate a default buf if the caller did not give one */ 20140 ASSERT(data_len == 0); 20141 data_len = MHIOC_RESV_KEY_SIZE; 20142 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20143 no_caller_buf = TRUE; 20144 } 20145 20146 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20147 cdb.cdb_opaque[1] = usr_cmd; 20148 FORMG1COUNT(&cdb, data_len); 20149 20150 ucmd_buf.uscsi_cdb = (char *)&cdb; 20151 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20152 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20153 ucmd_buf.uscsi_buflen = data_len; 20154 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20155 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20156 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20157 ucmd_buf.uscsi_timeout = 60; 20158 20159 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20160 UIO_SYSSPACE, SD_PATH_STANDARD); 20161 20162 switch (status) { 20163 case 0: 20164 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20165 20166 break; /* Success! */ 20167 case EIO: 20168 switch (ucmd_buf.uscsi_status) { 20169 case STATUS_RESERVATION_CONFLICT: 20170 status = EACCES; 20171 break; 20172 case STATUS_CHECK: 20173 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20174 (scsi_sense_key((uint8_t *)&sense_buf) == 20175 KEY_ILLEGAL_REQUEST)) { 20176 status = ENOTSUP; 20177 } 20178 break; 20179 default: 20180 break; 20181 } 20182 break; 20183 default: 20184 break; 20185 } 20186 20187 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20188 20189 if (no_caller_buf == TRUE) { 20190 kmem_free(data_bufp, data_len); 20191 } 20192 20193 return (status); 20194 } 20195 20196 20197 /* 20198 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20199 * 20200 * Description: This routine is the driver entry point for handling CD-ROM 20201 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20202 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20203 * device. 20204 * 20205 * Arguments: ssc - ssc contains un - pointer to soft state struct 20206 * for the target. 20207 * usr_cmd SCSI-3 reservation facility command (one of 20208 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20209 * SD_SCSI3_PREEMPTANDABORT) 20210 * usr_bufp - user provided pointer register, reserve descriptor or 20211 * preempt and abort structure (mhioc_register_t, 20212 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20213 * 20214 * Return Code: 0 - Success 20215 * EACCES 20216 * ENOTSUP 20217 * errno return code from sd_ssc_send() 20218 * 20219 * Context: Can sleep. Does not return until command is completed. 20220 */ 20221 20222 static int 20223 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20224 uchar_t *usr_bufp) 20225 { 20226 struct scsi_extended_sense sense_buf; 20227 union scsi_cdb cdb; 20228 struct uscsi_cmd ucmd_buf; 20229 int status; 20230 uchar_t data_len = sizeof (sd_prout_t); 20231 sd_prout_t *prp; 20232 struct sd_lun *un; 20233 20234 ASSERT(ssc != NULL); 20235 un = ssc->ssc_un; 20236 ASSERT(un != NULL); 20237 ASSERT(!mutex_owned(SD_MUTEX(un))); 20238 ASSERT(data_len == 24); /* required by scsi spec */ 20239 20240 SD_TRACE(SD_LOG_IO, un, 20241 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20242 20243 if (usr_bufp == NULL) { 20244 return (EINVAL); 20245 } 20246 20247 bzero(&cdb, sizeof (cdb)); 20248 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20249 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20250 prp = kmem_zalloc(data_len, KM_SLEEP); 20251 20252 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20253 cdb.cdb_opaque[1] = usr_cmd; 20254 FORMG1COUNT(&cdb, data_len); 20255 20256 ucmd_buf.uscsi_cdb = (char *)&cdb; 20257 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20258 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20259 ucmd_buf.uscsi_buflen = data_len; 20260 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20261 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20262 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20263 ucmd_buf.uscsi_timeout = 60; 20264 20265 switch (usr_cmd) { 20266 case SD_SCSI3_REGISTER: { 20267 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20268 20269 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20270 bcopy(ptr->newkey.key, prp->service_key, 20271 MHIOC_RESV_KEY_SIZE); 20272 prp->aptpl = ptr->aptpl; 20273 break; 20274 } 20275 case SD_SCSI3_RESERVE: 20276 case SD_SCSI3_RELEASE: { 20277 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20278 20279 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20280 prp->scope_address = BE_32(ptr->scope_specific_addr); 20281 cdb.cdb_opaque[2] = ptr->type; 20282 break; 20283 } 20284 case SD_SCSI3_PREEMPTANDABORT: { 20285 mhioc_preemptandabort_t *ptr = 20286 (mhioc_preemptandabort_t *)usr_bufp; 20287 20288 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20289 bcopy(ptr->victim_key.key, prp->service_key, 20290 MHIOC_RESV_KEY_SIZE); 20291 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20292 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20293 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20294 break; 20295 } 20296 case SD_SCSI3_REGISTERANDIGNOREKEY: 20297 { 20298 mhioc_registerandignorekey_t *ptr; 20299 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20300 bcopy(ptr->newkey.key, 20301 prp->service_key, MHIOC_RESV_KEY_SIZE); 20302 prp->aptpl = ptr->aptpl; 20303 break; 20304 } 20305 default: 20306 ASSERT(FALSE); 20307 break; 20308 } 20309 20310 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20311 UIO_SYSSPACE, SD_PATH_STANDARD); 20312 20313 switch (status) { 20314 case 0: 20315 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20316 break; /* Success! */ 20317 case EIO: 20318 switch (ucmd_buf.uscsi_status) { 20319 case STATUS_RESERVATION_CONFLICT: 20320 status = EACCES; 20321 break; 20322 case STATUS_CHECK: 20323 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20324 (scsi_sense_key((uint8_t *)&sense_buf) == 20325 KEY_ILLEGAL_REQUEST)) { 20326 status = ENOTSUP; 20327 } 20328 break; 20329 default: 20330 break; 20331 } 20332 break; 20333 default: 20334 break; 20335 } 20336 20337 kmem_free(prp, data_len); 20338 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20339 return (status); 20340 } 20341 20342 20343 /* 20344 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20345 * 20346 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20347 * 20348 * Arguments: un - pointer to the target's soft state struct 20349 * dkc - pointer to the callback structure 20350 * 20351 * Return Code: 0 - success 20352 * errno-type error code 20353 * 20354 * Context: kernel thread context only. 20355 * 20356 * _______________________________________________________________ 20357 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20358 * |FLUSH_VOLATILE| | operation | 20359 * |______________|______________|_________________________________| 20360 * | 0 | NULL | Synchronous flush on both | 20361 * | | | volatile and non-volatile cache | 20362 * |______________|______________|_________________________________| 20363 * | 1 | NULL | Synchronous flush on volatile | 20364 * | | | cache; disk drivers may suppress| 20365 * | | | flush if disk table indicates | 20366 * | | | non-volatile cache | 20367 * |______________|______________|_________________________________| 20368 * | 0 | !NULL | Asynchronous flush on both | 20369 * | | | volatile and non-volatile cache;| 20370 * |______________|______________|_________________________________| 20371 * | 1 | !NULL | Asynchronous flush on volatile | 20372 * | | | cache; disk drivers may suppress| 20373 * | | | flush if disk table indicates | 20374 * | | | non-volatile cache | 20375 * |______________|______________|_________________________________| 20376 * 20377 */ 20378 20379 static int 20380 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20381 { 20382 struct sd_uscsi_info *uip; 20383 struct uscsi_cmd *uscmd; 20384 union scsi_cdb *cdb; 20385 struct buf *bp; 20386 int rval = 0; 20387 int is_async; 20388 20389 SD_TRACE(SD_LOG_IO, un, 20390 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20391 20392 ASSERT(un != NULL); 20393 ASSERT(!mutex_owned(SD_MUTEX(un))); 20394 20395 if (dkc == NULL || dkc->dkc_callback == NULL) { 20396 is_async = FALSE; 20397 } else { 20398 is_async = TRUE; 20399 } 20400 20401 mutex_enter(SD_MUTEX(un)); 20402 /* check whether cache flush should be suppressed */ 20403 if (un->un_f_suppress_cache_flush == TRUE) { 20404 mutex_exit(SD_MUTEX(un)); 20405 /* 20406 * suppress the cache flush if the device is told to do 20407 * so by sd.conf or disk table 20408 */ 20409 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20410 skip the cache flush since suppress_cache_flush is %d!\n", 20411 un->un_f_suppress_cache_flush); 20412 20413 if (is_async == TRUE) { 20414 /* invoke callback for asynchronous flush */ 20415 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20416 } 20417 return (rval); 20418 } 20419 mutex_exit(SD_MUTEX(un)); 20420 20421 /* 20422 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20423 * set properly 20424 */ 20425 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20426 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20427 20428 mutex_enter(SD_MUTEX(un)); 20429 if (dkc != NULL && un->un_f_sync_nv_supported && 20430 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20431 /* 20432 * if the device supports SYNC_NV bit, turn on 20433 * the SYNC_NV bit to only flush volatile cache 20434 */ 20435 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20436 } 20437 mutex_exit(SD_MUTEX(un)); 20438 20439 /* 20440 * First get some memory for the uscsi_cmd struct and cdb 20441 * and initialize for SYNCHRONIZE_CACHE cmd. 20442 */ 20443 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20444 uscmd->uscsi_cdblen = CDB_GROUP1; 20445 uscmd->uscsi_cdb = (caddr_t)cdb; 20446 uscmd->uscsi_bufaddr = NULL; 20447 uscmd->uscsi_buflen = 0; 20448 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20449 uscmd->uscsi_rqlen = SENSE_LENGTH; 20450 uscmd->uscsi_rqresid = SENSE_LENGTH; 20451 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20452 uscmd->uscsi_timeout = sd_io_time; 20453 20454 /* 20455 * Allocate an sd_uscsi_info struct and fill it with the info 20456 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20457 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20458 * since we allocate the buf here in this function, we do not 20459 * need to preserve the prior contents of b_private. 20460 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20461 */ 20462 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20463 uip->ui_flags = SD_PATH_DIRECT; 20464 uip->ui_cmdp = uscmd; 20465 20466 bp = getrbuf(KM_SLEEP); 20467 bp->b_private = uip; 20468 20469 /* 20470 * Setup buffer to carry uscsi request. 20471 */ 20472 bp->b_flags = B_BUSY; 20473 bp->b_bcount = 0; 20474 bp->b_blkno = 0; 20475 20476 if (is_async == TRUE) { 20477 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20478 uip->ui_dkc = *dkc; 20479 } 20480 20481 bp->b_edev = SD_GET_DEV(un); 20482 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20483 20484 /* 20485 * Unset un_f_sync_cache_required flag 20486 */ 20487 mutex_enter(SD_MUTEX(un)); 20488 un->un_f_sync_cache_required = FALSE; 20489 mutex_exit(SD_MUTEX(un)); 20490 20491 (void) sd_uscsi_strategy(bp); 20492 20493 /* 20494 * If synchronous request, wait for completion 20495 * If async just return and let b_iodone callback 20496 * cleanup. 20497 * NOTE: On return, u_ncmds_in_driver will be decremented, 20498 * but it was also incremented in sd_uscsi_strategy(), so 20499 * we should be ok. 20500 */ 20501 if (is_async == FALSE) { 20502 (void) biowait(bp); 20503 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20504 } 20505 20506 return (rval); 20507 } 20508 20509 20510 static int 20511 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20512 { 20513 struct sd_uscsi_info *uip; 20514 struct uscsi_cmd *uscmd; 20515 uint8_t *sense_buf; 20516 struct sd_lun *un; 20517 int status; 20518 union scsi_cdb *cdb; 20519 20520 uip = (struct sd_uscsi_info *)(bp->b_private); 20521 ASSERT(uip != NULL); 20522 20523 uscmd = uip->ui_cmdp; 20524 ASSERT(uscmd != NULL); 20525 20526 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20527 ASSERT(sense_buf != NULL); 20528 20529 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20530 ASSERT(un != NULL); 20531 20532 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20533 20534 status = geterror(bp); 20535 switch (status) { 20536 case 0: 20537 break; /* Success! */ 20538 case EIO: 20539 switch (uscmd->uscsi_status) { 20540 case STATUS_RESERVATION_CONFLICT: 20541 /* Ignore reservation conflict */ 20542 status = 0; 20543 goto done; 20544 20545 case STATUS_CHECK: 20546 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20547 (scsi_sense_key(sense_buf) == 20548 KEY_ILLEGAL_REQUEST)) { 20549 /* Ignore Illegal Request error */ 20550 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20551 mutex_enter(SD_MUTEX(un)); 20552 un->un_f_sync_nv_supported = FALSE; 20553 mutex_exit(SD_MUTEX(un)); 20554 status = 0; 20555 SD_TRACE(SD_LOG_IO, un, 20556 "un_f_sync_nv_supported \ 20557 is set to false.\n"); 20558 goto done; 20559 } 20560 20561 mutex_enter(SD_MUTEX(un)); 20562 un->un_f_sync_cache_supported = FALSE; 20563 mutex_exit(SD_MUTEX(un)); 20564 SD_TRACE(SD_LOG_IO, un, 20565 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20566 un_f_sync_cache_supported set to false \ 20567 with asc = %x, ascq = %x\n", 20568 scsi_sense_asc(sense_buf), 20569 scsi_sense_ascq(sense_buf)); 20570 status = ENOTSUP; 20571 goto done; 20572 } 20573 break; 20574 default: 20575 break; 20576 } 20577 /* FALLTHRU */ 20578 default: 20579 /* 20580 * Turn on the un_f_sync_cache_required flag 20581 * since the SYNC CACHE command failed 20582 */ 20583 mutex_enter(SD_MUTEX(un)); 20584 un->un_f_sync_cache_required = TRUE; 20585 mutex_exit(SD_MUTEX(un)); 20586 20587 /* 20588 * Don't log an error message if this device 20589 * has removable media. 20590 */ 20591 if (!un->un_f_has_removable_media) { 20592 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20593 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20594 } 20595 break; 20596 } 20597 20598 done: 20599 if (uip->ui_dkc.dkc_callback != NULL) { 20600 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20601 } 20602 20603 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20604 freerbuf(bp); 20605 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20606 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20607 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20608 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20609 20610 return (status); 20611 } 20612 20613 20614 /* 20615 * Function: sd_send_scsi_GET_CONFIGURATION 20616 * 20617 * Description: Issues the get configuration command to the device. 20618 * Called from sd_check_for_writable_cd & sd_get_media_info 20619 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20620 * Arguments: ssc 20621 * ucmdbuf 20622 * rqbuf 20623 * rqbuflen 20624 * bufaddr 20625 * buflen 20626 * path_flag 20627 * 20628 * Return Code: 0 - Success 20629 * errno return code from sd_ssc_send() 20630 * 20631 * Context: Can sleep. Does not return until command is completed. 20632 * 20633 */ 20634 20635 static int 20636 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20637 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20638 int path_flag) 20639 { 20640 char cdb[CDB_GROUP1]; 20641 int status; 20642 struct sd_lun *un; 20643 20644 ASSERT(ssc != NULL); 20645 un = ssc->ssc_un; 20646 ASSERT(un != NULL); 20647 ASSERT(!mutex_owned(SD_MUTEX(un))); 20648 ASSERT(bufaddr != NULL); 20649 ASSERT(ucmdbuf != NULL); 20650 ASSERT(rqbuf != NULL); 20651 20652 SD_TRACE(SD_LOG_IO, un, 20653 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20654 20655 bzero(cdb, sizeof (cdb)); 20656 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20657 bzero(rqbuf, rqbuflen); 20658 bzero(bufaddr, buflen); 20659 20660 /* 20661 * Set up cdb field for the get configuration command. 20662 */ 20663 cdb[0] = SCMD_GET_CONFIGURATION; 20664 cdb[1] = 0x02; /* Requested Type */ 20665 cdb[8] = SD_PROFILE_HEADER_LEN; 20666 ucmdbuf->uscsi_cdb = cdb; 20667 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20668 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20669 ucmdbuf->uscsi_buflen = buflen; 20670 ucmdbuf->uscsi_timeout = sd_io_time; 20671 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20672 ucmdbuf->uscsi_rqlen = rqbuflen; 20673 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20674 20675 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20676 UIO_SYSSPACE, path_flag); 20677 20678 switch (status) { 20679 case 0: 20680 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20681 break; /* Success! */ 20682 case EIO: 20683 switch (ucmdbuf->uscsi_status) { 20684 case STATUS_RESERVATION_CONFLICT: 20685 status = EACCES; 20686 break; 20687 default: 20688 break; 20689 } 20690 break; 20691 default: 20692 break; 20693 } 20694 20695 if (status == 0) { 20696 SD_DUMP_MEMORY(un, SD_LOG_IO, 20697 "sd_send_scsi_GET_CONFIGURATION: data", 20698 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20699 } 20700 20701 SD_TRACE(SD_LOG_IO, un, 20702 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20703 20704 return (status); 20705 } 20706 20707 /* 20708 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20709 * 20710 * Description: Issues the get configuration command to the device to 20711 * retrieve a specific feature. Called from 20712 * sd_check_for_writable_cd & sd_set_mmc_caps. 20713 * Arguments: ssc 20714 * ucmdbuf 20715 * rqbuf 20716 * rqbuflen 20717 * bufaddr 20718 * buflen 20719 * feature 20720 * 20721 * Return Code: 0 - Success 20722 * errno return code from sd_ssc_send() 20723 * 20724 * Context: Can sleep. Does not return until command is completed. 20725 * 20726 */ 20727 static int 20728 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20729 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20730 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20731 { 20732 char cdb[CDB_GROUP1]; 20733 int status; 20734 struct sd_lun *un; 20735 20736 ASSERT(ssc != NULL); 20737 un = ssc->ssc_un; 20738 ASSERT(un != NULL); 20739 ASSERT(!mutex_owned(SD_MUTEX(un))); 20740 ASSERT(bufaddr != NULL); 20741 ASSERT(ucmdbuf != NULL); 20742 ASSERT(rqbuf != NULL); 20743 20744 SD_TRACE(SD_LOG_IO, un, 20745 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20746 20747 bzero(cdb, sizeof (cdb)); 20748 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20749 bzero(rqbuf, rqbuflen); 20750 bzero(bufaddr, buflen); 20751 20752 /* 20753 * Set up cdb field for the get configuration command. 20754 */ 20755 cdb[0] = SCMD_GET_CONFIGURATION; 20756 cdb[1] = 0x02; /* Requested Type */ 20757 cdb[3] = feature; 20758 cdb[8] = buflen; 20759 ucmdbuf->uscsi_cdb = cdb; 20760 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20761 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20762 ucmdbuf->uscsi_buflen = buflen; 20763 ucmdbuf->uscsi_timeout = sd_io_time; 20764 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20765 ucmdbuf->uscsi_rqlen = rqbuflen; 20766 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20767 20768 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20769 UIO_SYSSPACE, path_flag); 20770 20771 switch (status) { 20772 case 0: 20773 20774 break; /* Success! */ 20775 case EIO: 20776 switch (ucmdbuf->uscsi_status) { 20777 case STATUS_RESERVATION_CONFLICT: 20778 status = EACCES; 20779 break; 20780 default: 20781 break; 20782 } 20783 break; 20784 default: 20785 break; 20786 } 20787 20788 if (status == 0) { 20789 SD_DUMP_MEMORY(un, SD_LOG_IO, 20790 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20791 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20792 } 20793 20794 SD_TRACE(SD_LOG_IO, un, 20795 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20796 20797 return (status); 20798 } 20799 20800 20801 /* 20802 * Function: sd_send_scsi_MODE_SENSE 20803 * 20804 * Description: Utility function for issuing a scsi MODE SENSE command. 20805 * Note: This routine uses a consistent implementation for Group0, 20806 * Group1, and Group2 commands across all platforms. ATAPI devices 20807 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20808 * 20809 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20810 * structure for this target. 20811 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20812 * CDB_GROUP[1|2] (10 byte). 20813 * bufaddr - buffer for page data retrieved from the target. 20814 * buflen - size of page to be retrieved. 20815 * page_code - page code of data to be retrieved from the target. 20816 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20817 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20818 * to use the USCSI "direct" chain and bypass the normal 20819 * command waitq. 20820 * 20821 * Return Code: 0 - Success 20822 * errno return code from sd_ssc_send() 20823 * 20824 * Context: Can sleep. Does not return until command is completed. 20825 */ 20826 20827 static int 20828 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20829 size_t buflen, uchar_t page_code, int path_flag) 20830 { 20831 struct scsi_extended_sense sense_buf; 20832 union scsi_cdb cdb; 20833 struct uscsi_cmd ucmd_buf; 20834 int status; 20835 int headlen; 20836 struct sd_lun *un; 20837 20838 ASSERT(ssc != NULL); 20839 un = ssc->ssc_un; 20840 ASSERT(un != NULL); 20841 ASSERT(!mutex_owned(SD_MUTEX(un))); 20842 ASSERT(bufaddr != NULL); 20843 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20844 (cdbsize == CDB_GROUP2)); 20845 20846 SD_TRACE(SD_LOG_IO, un, 20847 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20848 20849 bzero(&cdb, sizeof (cdb)); 20850 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20851 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20852 bzero(bufaddr, buflen); 20853 20854 if (cdbsize == CDB_GROUP0) { 20855 cdb.scc_cmd = SCMD_MODE_SENSE; 20856 cdb.cdb_opaque[2] = page_code; 20857 FORMG0COUNT(&cdb, buflen); 20858 headlen = MODE_HEADER_LENGTH; 20859 } else { 20860 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20861 cdb.cdb_opaque[2] = page_code; 20862 FORMG1COUNT(&cdb, buflen); 20863 headlen = MODE_HEADER_LENGTH_GRP2; 20864 } 20865 20866 ASSERT(headlen <= buflen); 20867 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20868 20869 ucmd_buf.uscsi_cdb = (char *)&cdb; 20870 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20871 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20872 ucmd_buf.uscsi_buflen = buflen; 20873 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20874 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20875 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20876 ucmd_buf.uscsi_timeout = 60; 20877 20878 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20879 UIO_SYSSPACE, path_flag); 20880 20881 switch (status) { 20882 case 0: 20883 /* 20884 * sr_check_wp() uses 0x3f page code and check the header of 20885 * mode page to determine if target device is write-protected. 20886 * But some USB devices return 0 bytes for 0x3f page code. For 20887 * this case, make sure that mode page header is returned at 20888 * least. 20889 */ 20890 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20891 status = EIO; 20892 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20893 "mode page header is not returned"); 20894 } 20895 break; /* Success! */ 20896 case EIO: 20897 switch (ucmd_buf.uscsi_status) { 20898 case STATUS_RESERVATION_CONFLICT: 20899 status = EACCES; 20900 break; 20901 default: 20902 break; 20903 } 20904 break; 20905 default: 20906 break; 20907 } 20908 20909 if (status == 0) { 20910 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20911 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20912 } 20913 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20914 20915 return (status); 20916 } 20917 20918 20919 /* 20920 * Function: sd_send_scsi_MODE_SELECT 20921 * 20922 * Description: Utility function for issuing a scsi MODE SELECT command. 20923 * Note: This routine uses a consistent implementation for Group0, 20924 * Group1, and Group2 commands across all platforms. ATAPI devices 20925 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20926 * 20927 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20928 * structure for this target. 20929 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20930 * CDB_GROUP[1|2] (10 byte). 20931 * bufaddr - buffer for page data retrieved from the target. 20932 * buflen - size of page to be retrieved. 20933 * save_page - boolean to determin if SP bit should be set. 20934 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20935 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20936 * to use the USCSI "direct" chain and bypass the normal 20937 * command waitq. 20938 * 20939 * Return Code: 0 - Success 20940 * errno return code from sd_ssc_send() 20941 * 20942 * Context: Can sleep. Does not return until command is completed. 20943 */ 20944 20945 static int 20946 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20947 size_t buflen, uchar_t save_page, int path_flag) 20948 { 20949 struct scsi_extended_sense sense_buf; 20950 union scsi_cdb cdb; 20951 struct uscsi_cmd ucmd_buf; 20952 int status; 20953 struct sd_lun *un; 20954 20955 ASSERT(ssc != NULL); 20956 un = ssc->ssc_un; 20957 ASSERT(un != NULL); 20958 ASSERT(!mutex_owned(SD_MUTEX(un))); 20959 ASSERT(bufaddr != NULL); 20960 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20961 (cdbsize == CDB_GROUP2)); 20962 20963 SD_TRACE(SD_LOG_IO, un, 20964 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20965 20966 bzero(&cdb, sizeof (cdb)); 20967 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20968 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20969 20970 /* Set the PF bit for many third party drives */ 20971 cdb.cdb_opaque[1] = 0x10; 20972 20973 /* Set the savepage(SP) bit if given */ 20974 if (save_page == SD_SAVE_PAGE) { 20975 cdb.cdb_opaque[1] |= 0x01; 20976 } 20977 20978 if (cdbsize == CDB_GROUP0) { 20979 cdb.scc_cmd = SCMD_MODE_SELECT; 20980 FORMG0COUNT(&cdb, buflen); 20981 } else { 20982 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20983 FORMG1COUNT(&cdb, buflen); 20984 } 20985 20986 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20987 20988 ucmd_buf.uscsi_cdb = (char *)&cdb; 20989 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20990 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20991 ucmd_buf.uscsi_buflen = buflen; 20992 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20993 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20994 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20995 ucmd_buf.uscsi_timeout = 60; 20996 20997 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20998 UIO_SYSSPACE, path_flag); 20999 21000 switch (status) { 21001 case 0: 21002 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21003 break; /* Success! */ 21004 case EIO: 21005 switch (ucmd_buf.uscsi_status) { 21006 case STATUS_RESERVATION_CONFLICT: 21007 status = EACCES; 21008 break; 21009 default: 21010 break; 21011 } 21012 break; 21013 default: 21014 break; 21015 } 21016 21017 if (status == 0) { 21018 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21019 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21020 } 21021 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21022 21023 return (status); 21024 } 21025 21026 21027 /* 21028 * Function: sd_send_scsi_RDWR 21029 * 21030 * Description: Issue a scsi READ or WRITE command with the given parameters. 21031 * 21032 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21033 * structure for this target. 21034 * cmd: SCMD_READ or SCMD_WRITE 21035 * bufaddr: Address of caller's buffer to receive the RDWR data 21036 * buflen: Length of caller's buffer receive the RDWR data. 21037 * start_block: Block number for the start of the RDWR operation. 21038 * (Assumes target-native block size.) 21039 * residp: Pointer to variable to receive the redisual of the 21040 * RDWR operation (may be NULL of no residual requested). 21041 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21042 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21043 * to use the USCSI "direct" chain and bypass the normal 21044 * command waitq. 21045 * 21046 * Return Code: 0 - Success 21047 * errno return code from sd_ssc_send() 21048 * 21049 * Context: Can sleep. Does not return until command is completed. 21050 */ 21051 21052 static int 21053 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21054 size_t buflen, daddr_t start_block, int path_flag) 21055 { 21056 struct scsi_extended_sense sense_buf; 21057 union scsi_cdb cdb; 21058 struct uscsi_cmd ucmd_buf; 21059 uint32_t block_count; 21060 int status; 21061 int cdbsize; 21062 uchar_t flag; 21063 struct sd_lun *un; 21064 21065 ASSERT(ssc != NULL); 21066 un = ssc->ssc_un; 21067 ASSERT(un != NULL); 21068 ASSERT(!mutex_owned(SD_MUTEX(un))); 21069 ASSERT(bufaddr != NULL); 21070 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21071 21072 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21073 21074 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21075 return (EINVAL); 21076 } 21077 21078 mutex_enter(SD_MUTEX(un)); 21079 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21080 mutex_exit(SD_MUTEX(un)); 21081 21082 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21083 21084 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21085 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21086 bufaddr, buflen, start_block, block_count); 21087 21088 bzero(&cdb, sizeof (cdb)); 21089 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21090 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21091 21092 /* Compute CDB size to use */ 21093 if (start_block > 0xffffffff) 21094 cdbsize = CDB_GROUP4; 21095 else if ((start_block & 0xFFE00000) || 21096 (un->un_f_cfg_is_atapi == TRUE)) 21097 cdbsize = CDB_GROUP1; 21098 else 21099 cdbsize = CDB_GROUP0; 21100 21101 switch (cdbsize) { 21102 case CDB_GROUP0: /* 6-byte CDBs */ 21103 cdb.scc_cmd = cmd; 21104 FORMG0ADDR(&cdb, start_block); 21105 FORMG0COUNT(&cdb, block_count); 21106 break; 21107 case CDB_GROUP1: /* 10-byte CDBs */ 21108 cdb.scc_cmd = cmd | SCMD_GROUP1; 21109 FORMG1ADDR(&cdb, start_block); 21110 FORMG1COUNT(&cdb, block_count); 21111 break; 21112 case CDB_GROUP4: /* 16-byte CDBs */ 21113 cdb.scc_cmd = cmd | SCMD_GROUP4; 21114 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21115 FORMG4COUNT(&cdb, block_count); 21116 break; 21117 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21118 default: 21119 /* All others reserved */ 21120 return (EINVAL); 21121 } 21122 21123 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21124 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21125 21126 ucmd_buf.uscsi_cdb = (char *)&cdb; 21127 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21128 ucmd_buf.uscsi_bufaddr = bufaddr; 21129 ucmd_buf.uscsi_buflen = buflen; 21130 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21131 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21132 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21133 ucmd_buf.uscsi_timeout = 60; 21134 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21135 UIO_SYSSPACE, path_flag); 21136 21137 switch (status) { 21138 case 0: 21139 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21140 break; /* Success! */ 21141 case EIO: 21142 switch (ucmd_buf.uscsi_status) { 21143 case STATUS_RESERVATION_CONFLICT: 21144 status = EACCES; 21145 break; 21146 default: 21147 break; 21148 } 21149 break; 21150 default: 21151 break; 21152 } 21153 21154 if (status == 0) { 21155 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21156 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21157 } 21158 21159 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21160 21161 return (status); 21162 } 21163 21164 21165 /* 21166 * Function: sd_send_scsi_LOG_SENSE 21167 * 21168 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21169 * 21170 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21171 * structure for this target. 21172 * 21173 * Return Code: 0 - Success 21174 * errno return code from sd_ssc_send() 21175 * 21176 * Context: Can sleep. Does not return until command is completed. 21177 */ 21178 21179 static int 21180 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21181 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21182 int path_flag) 21183 21184 { 21185 struct scsi_extended_sense sense_buf; 21186 union scsi_cdb cdb; 21187 struct uscsi_cmd ucmd_buf; 21188 int status; 21189 struct sd_lun *un; 21190 21191 ASSERT(ssc != NULL); 21192 un = ssc->ssc_un; 21193 ASSERT(un != NULL); 21194 ASSERT(!mutex_owned(SD_MUTEX(un))); 21195 21196 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21197 21198 bzero(&cdb, sizeof (cdb)); 21199 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21200 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21201 21202 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21203 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21204 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21205 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21206 FORMG1COUNT(&cdb, buflen); 21207 21208 ucmd_buf.uscsi_cdb = (char *)&cdb; 21209 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21210 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21211 ucmd_buf.uscsi_buflen = buflen; 21212 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21213 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21214 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21215 ucmd_buf.uscsi_timeout = 60; 21216 21217 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21218 UIO_SYSSPACE, path_flag); 21219 21220 switch (status) { 21221 case 0: 21222 break; 21223 case EIO: 21224 switch (ucmd_buf.uscsi_status) { 21225 case STATUS_RESERVATION_CONFLICT: 21226 status = EACCES; 21227 break; 21228 case STATUS_CHECK: 21229 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21230 (scsi_sense_key((uint8_t *)&sense_buf) == 21231 KEY_ILLEGAL_REQUEST) && 21232 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21233 /* 21234 * ASC 0x24: INVALID FIELD IN CDB 21235 */ 21236 switch (page_code) { 21237 case START_STOP_CYCLE_PAGE: 21238 /* 21239 * The start stop cycle counter is 21240 * implemented as page 0x31 in earlier 21241 * generation disks. In new generation 21242 * disks the start stop cycle counter is 21243 * implemented as page 0xE. To properly 21244 * handle this case if an attempt for 21245 * log page 0xE is made and fails we 21246 * will try again using page 0x31. 21247 * 21248 * Network storage BU committed to 21249 * maintain the page 0x31 for this 21250 * purpose and will not have any other 21251 * page implemented with page code 0x31 21252 * until all disks transition to the 21253 * standard page. 21254 */ 21255 mutex_enter(SD_MUTEX(un)); 21256 un->un_start_stop_cycle_page = 21257 START_STOP_CYCLE_VU_PAGE; 21258 cdb.cdb_opaque[2] = 21259 (char)(page_control << 6) | 21260 un->un_start_stop_cycle_page; 21261 mutex_exit(SD_MUTEX(un)); 21262 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21263 status = sd_ssc_send( 21264 ssc, &ucmd_buf, FKIOCTL, 21265 UIO_SYSSPACE, path_flag); 21266 21267 break; 21268 case TEMPERATURE_PAGE: 21269 status = ENOTTY; 21270 break; 21271 default: 21272 break; 21273 } 21274 } 21275 break; 21276 default: 21277 break; 21278 } 21279 break; 21280 default: 21281 break; 21282 } 21283 21284 if (status == 0) { 21285 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21286 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21287 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21288 } 21289 21290 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21291 21292 return (status); 21293 } 21294 21295 21296 /* 21297 * Function: sdioctl 21298 * 21299 * Description: Driver's ioctl(9e) entry point function. 21300 * 21301 * Arguments: dev - device number 21302 * cmd - ioctl operation to be performed 21303 * arg - user argument, contains data to be set or reference 21304 * parameter for get 21305 * flag - bit flag, indicating open settings, 32/64 bit type 21306 * cred_p - user credential pointer 21307 * rval_p - calling process return value (OPT) 21308 * 21309 * Return Code: EINVAL 21310 * ENOTTY 21311 * ENXIO 21312 * EIO 21313 * EFAULT 21314 * ENOTSUP 21315 * EPERM 21316 * 21317 * Context: Called from the device switch at normal priority. 21318 */ 21319 21320 static int 21321 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21322 { 21323 struct sd_lun *un = NULL; 21324 int err = 0; 21325 int i = 0; 21326 cred_t *cr; 21327 int tmprval = EINVAL; 21328 boolean_t is_valid; 21329 sd_ssc_t *ssc; 21330 21331 /* 21332 * All device accesses go thru sdstrategy where we check on suspend 21333 * status 21334 */ 21335 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21336 return (ENXIO); 21337 } 21338 21339 ASSERT(!mutex_owned(SD_MUTEX(un))); 21340 21341 /* Initialize sd_ssc_t for internal uscsi commands */ 21342 ssc = sd_ssc_init(un); 21343 21344 is_valid = SD_IS_VALID_LABEL(un); 21345 21346 /* 21347 * Moved this wait from sd_uscsi_strategy to here for 21348 * reasons of deadlock prevention. Internal driver commands, 21349 * specifically those to change a devices power level, result 21350 * in a call to sd_uscsi_strategy. 21351 */ 21352 mutex_enter(SD_MUTEX(un)); 21353 while ((un->un_state == SD_STATE_SUSPENDED) || 21354 (un->un_state == SD_STATE_PM_CHANGING)) { 21355 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21356 } 21357 /* 21358 * Twiddling the counter here protects commands from now 21359 * through to the top of sd_uscsi_strategy. Without the 21360 * counter inc. a power down, for example, could get in 21361 * after the above check for state is made and before 21362 * execution gets to the top of sd_uscsi_strategy. 21363 * That would cause problems. 21364 */ 21365 un->un_ncmds_in_driver++; 21366 21367 if (!is_valid && 21368 (flag & (FNDELAY | FNONBLOCK))) { 21369 switch (cmd) { 21370 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21371 case DKIOCGVTOC: 21372 case DKIOCGEXTVTOC: 21373 case DKIOCGAPART: 21374 case DKIOCPARTINFO: 21375 case DKIOCEXTPARTINFO: 21376 case DKIOCSGEOM: 21377 case DKIOCSAPART: 21378 case DKIOCGETEFI: 21379 case DKIOCPARTITION: 21380 case DKIOCSVTOC: 21381 case DKIOCSEXTVTOC: 21382 case DKIOCSETEFI: 21383 case DKIOCGMBOOT: 21384 case DKIOCSMBOOT: 21385 case DKIOCG_PHYGEOM: 21386 case DKIOCG_VIRTGEOM: 21387 /* let cmlb handle it */ 21388 goto skip_ready_valid; 21389 21390 case CDROMPAUSE: 21391 case CDROMRESUME: 21392 case CDROMPLAYMSF: 21393 case CDROMPLAYTRKIND: 21394 case CDROMREADTOCHDR: 21395 case CDROMREADTOCENTRY: 21396 case CDROMSTOP: 21397 case CDROMSTART: 21398 case CDROMVOLCTRL: 21399 case CDROMSUBCHNL: 21400 case CDROMREADMODE2: 21401 case CDROMREADMODE1: 21402 case CDROMREADOFFSET: 21403 case CDROMSBLKMODE: 21404 case CDROMGBLKMODE: 21405 case CDROMGDRVSPEED: 21406 case CDROMSDRVSPEED: 21407 case CDROMCDDA: 21408 case CDROMCDXA: 21409 case CDROMSUBCODE: 21410 if (!ISCD(un)) { 21411 un->un_ncmds_in_driver--; 21412 ASSERT(un->un_ncmds_in_driver >= 0); 21413 mutex_exit(SD_MUTEX(un)); 21414 err = ENOTTY; 21415 goto done_without_assess; 21416 } 21417 break; 21418 case FDEJECT: 21419 case DKIOCEJECT: 21420 case CDROMEJECT: 21421 if (!un->un_f_eject_media_supported) { 21422 un->un_ncmds_in_driver--; 21423 ASSERT(un->un_ncmds_in_driver >= 0); 21424 mutex_exit(SD_MUTEX(un)); 21425 err = ENOTTY; 21426 goto done_without_assess; 21427 } 21428 break; 21429 case DKIOCFLUSHWRITECACHE: 21430 mutex_exit(SD_MUTEX(un)); 21431 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21432 if (err != 0) { 21433 mutex_enter(SD_MUTEX(un)); 21434 un->un_ncmds_in_driver--; 21435 ASSERT(un->un_ncmds_in_driver >= 0); 21436 mutex_exit(SD_MUTEX(un)); 21437 err = EIO; 21438 goto done_quick_assess; 21439 } 21440 mutex_enter(SD_MUTEX(un)); 21441 /* FALLTHROUGH */ 21442 case DKIOCREMOVABLE: 21443 case DKIOCHOTPLUGGABLE: 21444 case DKIOCINFO: 21445 case DKIOCGMEDIAINFO: 21446 case MHIOCENFAILFAST: 21447 case MHIOCSTATUS: 21448 case MHIOCTKOWN: 21449 case MHIOCRELEASE: 21450 case MHIOCGRP_INKEYS: 21451 case MHIOCGRP_INRESV: 21452 case MHIOCGRP_REGISTER: 21453 case MHIOCGRP_RESERVE: 21454 case MHIOCGRP_PREEMPTANDABORT: 21455 case MHIOCGRP_REGISTERANDIGNOREKEY: 21456 case CDROMCLOSETRAY: 21457 case USCSICMD: 21458 goto skip_ready_valid; 21459 default: 21460 break; 21461 } 21462 21463 mutex_exit(SD_MUTEX(un)); 21464 err = sd_ready_and_valid(ssc, SDPART(dev)); 21465 mutex_enter(SD_MUTEX(un)); 21466 21467 if (err != SD_READY_VALID) { 21468 switch (cmd) { 21469 case DKIOCSTATE: 21470 case CDROMGDRVSPEED: 21471 case CDROMSDRVSPEED: 21472 case FDEJECT: /* for eject command */ 21473 case DKIOCEJECT: 21474 case CDROMEJECT: 21475 case DKIOCREMOVABLE: 21476 case DKIOCHOTPLUGGABLE: 21477 break; 21478 default: 21479 if (un->un_f_has_removable_media) { 21480 err = ENXIO; 21481 } else { 21482 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21483 if (err == SD_RESERVED_BY_OTHERS) { 21484 err = EACCES; 21485 } else { 21486 err = EIO; 21487 } 21488 } 21489 un->un_ncmds_in_driver--; 21490 ASSERT(un->un_ncmds_in_driver >= 0); 21491 mutex_exit(SD_MUTEX(un)); 21492 21493 goto done_without_assess; 21494 } 21495 } 21496 } 21497 21498 skip_ready_valid: 21499 mutex_exit(SD_MUTEX(un)); 21500 21501 switch (cmd) { 21502 case DKIOCINFO: 21503 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21504 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21505 break; 21506 21507 case DKIOCGMEDIAINFO: 21508 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21509 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21510 break; 21511 21512 case DKIOCGGEOM: 21513 case DKIOCGVTOC: 21514 case DKIOCGEXTVTOC: 21515 case DKIOCGAPART: 21516 case DKIOCPARTINFO: 21517 case DKIOCEXTPARTINFO: 21518 case DKIOCSGEOM: 21519 case DKIOCSAPART: 21520 case DKIOCGETEFI: 21521 case DKIOCPARTITION: 21522 case DKIOCSVTOC: 21523 case DKIOCSEXTVTOC: 21524 case DKIOCSETEFI: 21525 case DKIOCGMBOOT: 21526 case DKIOCSMBOOT: 21527 case DKIOCG_PHYGEOM: 21528 case DKIOCG_VIRTGEOM: 21529 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21530 21531 /* TUR should spin up */ 21532 21533 if (un->un_f_has_removable_media) 21534 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21535 SD_CHECK_FOR_MEDIA); 21536 21537 else 21538 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21539 21540 if (err != 0) 21541 goto done_with_assess; 21542 21543 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21544 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21545 21546 if ((err == 0) && 21547 ((cmd == DKIOCSETEFI) || 21548 (un->un_f_pkstats_enabled) && 21549 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 21550 cmd == DKIOCSEXTVTOC))) { 21551 21552 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21553 (void *)SD_PATH_DIRECT); 21554 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21555 sd_set_pstats(un); 21556 SD_TRACE(SD_LOG_IO_PARTITION, un, 21557 "sd_ioctl: un:0x%p pstats created and " 21558 "set\n", un); 21559 } 21560 } 21561 21562 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 21563 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21564 21565 mutex_enter(SD_MUTEX(un)); 21566 if (un->un_f_devid_supported && 21567 (un->un_f_opt_fab_devid == TRUE)) { 21568 if (un->un_devid == NULL) { 21569 sd_register_devid(ssc, SD_DEVINFO(un), 21570 SD_TARGET_IS_UNRESERVED); 21571 } else { 21572 /* 21573 * The device id for this disk 21574 * has been fabricated. The 21575 * device id must be preserved 21576 * by writing it back out to 21577 * disk. 21578 */ 21579 if (sd_write_deviceid(ssc) != 0) { 21580 ddi_devid_free(un->un_devid); 21581 un->un_devid = NULL; 21582 } 21583 } 21584 } 21585 mutex_exit(SD_MUTEX(un)); 21586 } 21587 21588 break; 21589 21590 case DKIOCLOCK: 21591 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21592 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21593 SD_PATH_STANDARD); 21594 goto done_with_assess; 21595 21596 case DKIOCUNLOCK: 21597 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21598 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21599 SD_PATH_STANDARD); 21600 goto done_with_assess; 21601 21602 case DKIOCSTATE: { 21603 enum dkio_state state; 21604 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21605 21606 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21607 err = EFAULT; 21608 } else { 21609 err = sd_check_media(dev, state); 21610 if (err == 0) { 21611 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21612 sizeof (int), flag) != 0) 21613 err = EFAULT; 21614 } 21615 } 21616 break; 21617 } 21618 21619 case DKIOCREMOVABLE: 21620 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21621 i = un->un_f_has_removable_media ? 1 : 0; 21622 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21623 err = EFAULT; 21624 } else { 21625 err = 0; 21626 } 21627 break; 21628 21629 case DKIOCHOTPLUGGABLE: 21630 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21631 i = un->un_f_is_hotpluggable ? 1 : 0; 21632 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21633 err = EFAULT; 21634 } else { 21635 err = 0; 21636 } 21637 break; 21638 21639 case DKIOCGTEMPERATURE: 21640 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21641 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21642 break; 21643 21644 case MHIOCENFAILFAST: 21645 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21646 if ((err = drv_priv(cred_p)) == 0) { 21647 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21648 } 21649 break; 21650 21651 case MHIOCTKOWN: 21652 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21653 if ((err = drv_priv(cred_p)) == 0) { 21654 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21655 } 21656 break; 21657 21658 case MHIOCRELEASE: 21659 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21660 if ((err = drv_priv(cred_p)) == 0) { 21661 err = sd_mhdioc_release(dev); 21662 } 21663 break; 21664 21665 case MHIOCSTATUS: 21666 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21667 if ((err = drv_priv(cred_p)) == 0) { 21668 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21669 case 0: 21670 err = 0; 21671 break; 21672 case EACCES: 21673 *rval_p = 1; 21674 err = 0; 21675 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21676 break; 21677 default: 21678 err = EIO; 21679 goto done_with_assess; 21680 } 21681 } 21682 break; 21683 21684 case MHIOCQRESERVE: 21685 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21686 if ((err = drv_priv(cred_p)) == 0) { 21687 err = sd_reserve_release(dev, SD_RESERVE); 21688 } 21689 break; 21690 21691 case MHIOCREREGISTERDEVID: 21692 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21693 if (drv_priv(cred_p) == EPERM) { 21694 err = EPERM; 21695 } else if (!un->un_f_devid_supported) { 21696 err = ENOTTY; 21697 } else { 21698 err = sd_mhdioc_register_devid(dev); 21699 } 21700 break; 21701 21702 case MHIOCGRP_INKEYS: 21703 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21704 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21705 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21706 err = ENOTSUP; 21707 } else { 21708 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21709 flag); 21710 } 21711 } 21712 break; 21713 21714 case MHIOCGRP_INRESV: 21715 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21716 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21717 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21718 err = ENOTSUP; 21719 } else { 21720 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21721 } 21722 } 21723 break; 21724 21725 case MHIOCGRP_REGISTER: 21726 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21727 if ((err = drv_priv(cred_p)) != EPERM) { 21728 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21729 err = ENOTSUP; 21730 } else if (arg != NULL) { 21731 mhioc_register_t reg; 21732 if (ddi_copyin((void *)arg, ®, 21733 sizeof (mhioc_register_t), flag) != 0) { 21734 err = EFAULT; 21735 } else { 21736 err = 21737 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21738 ssc, SD_SCSI3_REGISTER, 21739 (uchar_t *)®); 21740 if (err != 0) 21741 goto done_with_assess; 21742 } 21743 } 21744 } 21745 break; 21746 21747 case MHIOCGRP_RESERVE: 21748 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21749 if ((err = drv_priv(cred_p)) != EPERM) { 21750 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21751 err = ENOTSUP; 21752 } else if (arg != NULL) { 21753 mhioc_resv_desc_t resv_desc; 21754 if (ddi_copyin((void *)arg, &resv_desc, 21755 sizeof (mhioc_resv_desc_t), flag) != 0) { 21756 err = EFAULT; 21757 } else { 21758 err = 21759 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21760 ssc, SD_SCSI3_RESERVE, 21761 (uchar_t *)&resv_desc); 21762 if (err != 0) 21763 goto done_with_assess; 21764 } 21765 } 21766 } 21767 break; 21768 21769 case MHIOCGRP_PREEMPTANDABORT: 21770 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21771 if ((err = drv_priv(cred_p)) != EPERM) { 21772 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21773 err = ENOTSUP; 21774 } else if (arg != NULL) { 21775 mhioc_preemptandabort_t preempt_abort; 21776 if (ddi_copyin((void *)arg, &preempt_abort, 21777 sizeof (mhioc_preemptandabort_t), 21778 flag) != 0) { 21779 err = EFAULT; 21780 } else { 21781 err = 21782 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21783 ssc, SD_SCSI3_PREEMPTANDABORT, 21784 (uchar_t *)&preempt_abort); 21785 if (err != 0) 21786 goto done_with_assess; 21787 } 21788 } 21789 } 21790 break; 21791 21792 case MHIOCGRP_REGISTERANDIGNOREKEY: 21793 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21794 if ((err = drv_priv(cred_p)) != EPERM) { 21795 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21796 err = ENOTSUP; 21797 } else if (arg != NULL) { 21798 mhioc_registerandignorekey_t r_and_i; 21799 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21800 sizeof (mhioc_registerandignorekey_t), 21801 flag) != 0) { 21802 err = EFAULT; 21803 } else { 21804 err = 21805 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21806 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21807 (uchar_t *)&r_and_i); 21808 if (err != 0) 21809 goto done_with_assess; 21810 } 21811 } 21812 } 21813 break; 21814 21815 case USCSICMD: 21816 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21817 cr = ddi_get_cred(); 21818 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21819 err = EPERM; 21820 } else { 21821 enum uio_seg uioseg; 21822 21823 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21824 UIO_USERSPACE; 21825 if (un->un_f_format_in_progress == TRUE) { 21826 err = EAGAIN; 21827 break; 21828 } 21829 21830 err = sd_ssc_send(ssc, 21831 (struct uscsi_cmd *)arg, 21832 flag, uioseg, SD_PATH_STANDARD); 21833 if (err != 0) 21834 goto done_with_assess; 21835 else 21836 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21837 } 21838 break; 21839 21840 case CDROMPAUSE: 21841 case CDROMRESUME: 21842 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21843 if (!ISCD(un)) { 21844 err = ENOTTY; 21845 } else { 21846 err = sr_pause_resume(dev, cmd); 21847 } 21848 break; 21849 21850 case CDROMPLAYMSF: 21851 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21852 if (!ISCD(un)) { 21853 err = ENOTTY; 21854 } else { 21855 err = sr_play_msf(dev, (caddr_t)arg, flag); 21856 } 21857 break; 21858 21859 case CDROMPLAYTRKIND: 21860 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21861 #if defined(__i386) || defined(__amd64) 21862 /* 21863 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21864 */ 21865 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21866 #else 21867 if (!ISCD(un)) { 21868 #endif 21869 err = ENOTTY; 21870 } else { 21871 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21872 } 21873 break; 21874 21875 case CDROMREADTOCHDR: 21876 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21877 if (!ISCD(un)) { 21878 err = ENOTTY; 21879 } else { 21880 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21881 } 21882 break; 21883 21884 case CDROMREADTOCENTRY: 21885 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21886 if (!ISCD(un)) { 21887 err = ENOTTY; 21888 } else { 21889 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21890 } 21891 break; 21892 21893 case CDROMSTOP: 21894 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21895 if (!ISCD(un)) { 21896 err = ENOTTY; 21897 } else { 21898 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21899 SD_PATH_STANDARD); 21900 goto done_with_assess; 21901 } 21902 break; 21903 21904 case CDROMSTART: 21905 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21906 if (!ISCD(un)) { 21907 err = ENOTTY; 21908 } else { 21909 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21910 SD_PATH_STANDARD); 21911 goto done_with_assess; 21912 } 21913 break; 21914 21915 case CDROMCLOSETRAY: 21916 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21917 if (!ISCD(un)) { 21918 err = ENOTTY; 21919 } else { 21920 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21921 SD_PATH_STANDARD); 21922 goto done_with_assess; 21923 } 21924 break; 21925 21926 case FDEJECT: /* for eject command */ 21927 case DKIOCEJECT: 21928 case CDROMEJECT: 21929 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21930 if (!un->un_f_eject_media_supported) { 21931 err = ENOTTY; 21932 } else { 21933 err = sr_eject(dev); 21934 } 21935 break; 21936 21937 case CDROMVOLCTRL: 21938 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21939 if (!ISCD(un)) { 21940 err = ENOTTY; 21941 } else { 21942 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21943 } 21944 break; 21945 21946 case CDROMSUBCHNL: 21947 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21948 if (!ISCD(un)) { 21949 err = ENOTTY; 21950 } else { 21951 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21952 } 21953 break; 21954 21955 case CDROMREADMODE2: 21956 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21957 if (!ISCD(un)) { 21958 err = ENOTTY; 21959 } else if (un->un_f_cfg_is_atapi == TRUE) { 21960 /* 21961 * If the drive supports READ CD, use that instead of 21962 * switching the LBA size via a MODE SELECT 21963 * Block Descriptor 21964 */ 21965 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21966 } else { 21967 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21968 } 21969 break; 21970 21971 case CDROMREADMODE1: 21972 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21973 if (!ISCD(un)) { 21974 err = ENOTTY; 21975 } else { 21976 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21977 } 21978 break; 21979 21980 case CDROMREADOFFSET: 21981 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21982 if (!ISCD(un)) { 21983 err = ENOTTY; 21984 } else { 21985 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21986 flag); 21987 } 21988 break; 21989 21990 case CDROMSBLKMODE: 21991 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21992 /* 21993 * There is no means of changing block size in case of atapi 21994 * drives, thus return ENOTTY if drive type is atapi 21995 */ 21996 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21997 err = ENOTTY; 21998 } else if (un->un_f_mmc_cap == TRUE) { 21999 22000 /* 22001 * MMC Devices do not support changing the 22002 * logical block size 22003 * 22004 * Note: EINVAL is being returned instead of ENOTTY to 22005 * maintain consistancy with the original mmc 22006 * driver update. 22007 */ 22008 err = EINVAL; 22009 } else { 22010 mutex_enter(SD_MUTEX(un)); 22011 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22012 (un->un_ncmds_in_transport > 0)) { 22013 mutex_exit(SD_MUTEX(un)); 22014 err = EINVAL; 22015 } else { 22016 mutex_exit(SD_MUTEX(un)); 22017 err = sr_change_blkmode(dev, cmd, arg, flag); 22018 } 22019 } 22020 break; 22021 22022 case CDROMGBLKMODE: 22023 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22024 if (!ISCD(un)) { 22025 err = ENOTTY; 22026 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22027 (un->un_f_blockcount_is_valid != FALSE)) { 22028 /* 22029 * Drive is an ATAPI drive so return target block 22030 * size for ATAPI drives since we cannot change the 22031 * blocksize on ATAPI drives. Used primarily to detect 22032 * if an ATAPI cdrom is present. 22033 */ 22034 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22035 sizeof (int), flag) != 0) { 22036 err = EFAULT; 22037 } else { 22038 err = 0; 22039 } 22040 22041 } else { 22042 /* 22043 * Drive supports changing block sizes via a Mode 22044 * Select. 22045 */ 22046 err = sr_change_blkmode(dev, cmd, arg, flag); 22047 } 22048 break; 22049 22050 case CDROMGDRVSPEED: 22051 case CDROMSDRVSPEED: 22052 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22053 if (!ISCD(un)) { 22054 err = ENOTTY; 22055 } else if (un->un_f_mmc_cap == TRUE) { 22056 /* 22057 * Note: In the future the driver implementation 22058 * for getting and 22059 * setting cd speed should entail: 22060 * 1) If non-mmc try the Toshiba mode page 22061 * (sr_change_speed) 22062 * 2) If mmc but no support for Real Time Streaming try 22063 * the SET CD SPEED (0xBB) command 22064 * (sr_atapi_change_speed) 22065 * 3) If mmc and support for Real Time Streaming 22066 * try the GET PERFORMANCE and SET STREAMING 22067 * commands (not yet implemented, 4380808) 22068 */ 22069 /* 22070 * As per recent MMC spec, CD-ROM speed is variable 22071 * and changes with LBA. Since there is no such 22072 * things as drive speed now, fail this ioctl. 22073 * 22074 * Note: EINVAL is returned for consistancy of original 22075 * implementation which included support for getting 22076 * the drive speed of mmc devices but not setting 22077 * the drive speed. Thus EINVAL would be returned 22078 * if a set request was made for an mmc device. 22079 * We no longer support get or set speed for 22080 * mmc but need to remain consistent with regard 22081 * to the error code returned. 22082 */ 22083 err = EINVAL; 22084 } else if (un->un_f_cfg_is_atapi == TRUE) { 22085 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22086 } else { 22087 err = sr_change_speed(dev, cmd, arg, flag); 22088 } 22089 break; 22090 22091 case CDROMCDDA: 22092 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22093 if (!ISCD(un)) { 22094 err = ENOTTY; 22095 } else { 22096 err = sr_read_cdda(dev, (void *)arg, flag); 22097 } 22098 break; 22099 22100 case CDROMCDXA: 22101 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22102 if (!ISCD(un)) { 22103 err = ENOTTY; 22104 } else { 22105 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22106 } 22107 break; 22108 22109 case CDROMSUBCODE: 22110 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22111 if (!ISCD(un)) { 22112 err = ENOTTY; 22113 } else { 22114 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22115 } 22116 break; 22117 22118 22119 #ifdef SDDEBUG 22120 /* RESET/ABORTS testing ioctls */ 22121 case DKIOCRESET: { 22122 int reset_level; 22123 22124 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22125 err = EFAULT; 22126 } else { 22127 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22128 "reset_level = 0x%lx\n", reset_level); 22129 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22130 err = 0; 22131 } else { 22132 err = EIO; 22133 } 22134 } 22135 break; 22136 } 22137 22138 case DKIOCABORT: 22139 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22140 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22141 err = 0; 22142 } else { 22143 err = EIO; 22144 } 22145 break; 22146 #endif 22147 22148 #ifdef SD_FAULT_INJECTION 22149 /* SDIOC FaultInjection testing ioctls */ 22150 case SDIOCSTART: 22151 case SDIOCSTOP: 22152 case SDIOCINSERTPKT: 22153 case SDIOCINSERTXB: 22154 case SDIOCINSERTUN: 22155 case SDIOCINSERTARQ: 22156 case SDIOCPUSH: 22157 case SDIOCRETRIEVE: 22158 case SDIOCRUN: 22159 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22160 "SDIOC detected cmd:0x%X:\n", cmd); 22161 /* call error generator */ 22162 sd_faultinjection_ioctl(cmd, arg, un); 22163 err = 0; 22164 break; 22165 22166 #endif /* SD_FAULT_INJECTION */ 22167 22168 case DKIOCFLUSHWRITECACHE: 22169 { 22170 struct dk_callback *dkc = (struct dk_callback *)arg; 22171 22172 mutex_enter(SD_MUTEX(un)); 22173 if (!un->un_f_sync_cache_supported || 22174 !un->un_f_write_cache_enabled) { 22175 err = un->un_f_sync_cache_supported ? 22176 0 : ENOTSUP; 22177 mutex_exit(SD_MUTEX(un)); 22178 if ((flag & FKIOCTL) && dkc != NULL && 22179 dkc->dkc_callback != NULL) { 22180 (*dkc->dkc_callback)(dkc->dkc_cookie, 22181 err); 22182 /* 22183 * Did callback and reported error. 22184 * Since we did a callback, ioctl 22185 * should return 0. 22186 */ 22187 err = 0; 22188 } 22189 break; 22190 } 22191 mutex_exit(SD_MUTEX(un)); 22192 22193 if ((flag & FKIOCTL) && dkc != NULL && 22194 dkc->dkc_callback != NULL) { 22195 /* async SYNC CACHE request */ 22196 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22197 } else { 22198 /* synchronous SYNC CACHE request */ 22199 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22200 } 22201 } 22202 break; 22203 22204 case DKIOCGETWCE: { 22205 22206 int wce; 22207 22208 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22209 break; 22210 } 22211 22212 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22213 err = EFAULT; 22214 } 22215 break; 22216 } 22217 22218 case DKIOCSETWCE: { 22219 22220 int wce, sync_supported; 22221 22222 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22223 err = EFAULT; 22224 break; 22225 } 22226 22227 /* 22228 * Synchronize multiple threads trying to enable 22229 * or disable the cache via the un_f_wcc_cv 22230 * condition variable. 22231 */ 22232 mutex_enter(SD_MUTEX(un)); 22233 22234 /* 22235 * Don't allow the cache to be enabled if the 22236 * config file has it disabled. 22237 */ 22238 if (un->un_f_opt_disable_cache && wce) { 22239 mutex_exit(SD_MUTEX(un)); 22240 err = EINVAL; 22241 break; 22242 } 22243 22244 /* 22245 * Wait for write cache change in progress 22246 * bit to be clear before proceeding. 22247 */ 22248 while (un->un_f_wcc_inprog) 22249 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22250 22251 un->un_f_wcc_inprog = 1; 22252 22253 if (un->un_f_write_cache_enabled && wce == 0) { 22254 /* 22255 * Disable the write cache. Don't clear 22256 * un_f_write_cache_enabled until after 22257 * the mode select and flush are complete. 22258 */ 22259 sync_supported = un->un_f_sync_cache_supported; 22260 22261 /* 22262 * If cache flush is suppressed, we assume that the 22263 * controller firmware will take care of managing the 22264 * write cache for us: no need to explicitly 22265 * disable it. 22266 */ 22267 if (!un->un_f_suppress_cache_flush) { 22268 mutex_exit(SD_MUTEX(un)); 22269 if ((err = sd_cache_control(ssc, 22270 SD_CACHE_NOCHANGE, 22271 SD_CACHE_DISABLE)) == 0 && 22272 sync_supported) { 22273 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22274 NULL); 22275 } 22276 } else { 22277 mutex_exit(SD_MUTEX(un)); 22278 } 22279 22280 mutex_enter(SD_MUTEX(un)); 22281 if (err == 0) { 22282 un->un_f_write_cache_enabled = 0; 22283 } 22284 22285 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22286 /* 22287 * Set un_f_write_cache_enabled first, so there is 22288 * no window where the cache is enabled, but the 22289 * bit says it isn't. 22290 */ 22291 un->un_f_write_cache_enabled = 1; 22292 22293 /* 22294 * If cache flush is suppressed, we assume that the 22295 * controller firmware will take care of managing the 22296 * write cache for us: no need to explicitly 22297 * enable it. 22298 */ 22299 if (!un->un_f_suppress_cache_flush) { 22300 mutex_exit(SD_MUTEX(un)); 22301 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22302 SD_CACHE_ENABLE); 22303 } else { 22304 mutex_exit(SD_MUTEX(un)); 22305 } 22306 22307 mutex_enter(SD_MUTEX(un)); 22308 22309 if (err) { 22310 un->un_f_write_cache_enabled = 0; 22311 } 22312 } 22313 22314 un->un_f_wcc_inprog = 0; 22315 cv_broadcast(&un->un_wcc_cv); 22316 mutex_exit(SD_MUTEX(un)); 22317 break; 22318 } 22319 22320 default: 22321 err = ENOTTY; 22322 break; 22323 } 22324 mutex_enter(SD_MUTEX(un)); 22325 un->un_ncmds_in_driver--; 22326 ASSERT(un->un_ncmds_in_driver >= 0); 22327 mutex_exit(SD_MUTEX(un)); 22328 22329 22330 done_without_assess: 22331 sd_ssc_fini(ssc); 22332 22333 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22334 return (err); 22335 22336 done_with_assess: 22337 mutex_enter(SD_MUTEX(un)); 22338 un->un_ncmds_in_driver--; 22339 ASSERT(un->un_ncmds_in_driver >= 0); 22340 mutex_exit(SD_MUTEX(un)); 22341 22342 done_quick_assess: 22343 if (err != 0) 22344 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22345 /* Uninitialize sd_ssc_t pointer */ 22346 sd_ssc_fini(ssc); 22347 22348 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22349 return (err); 22350 } 22351 22352 22353 /* 22354 * Function: sd_dkio_ctrl_info 22355 * 22356 * Description: This routine is the driver entry point for handling controller 22357 * information ioctl requests (DKIOCINFO). 22358 * 22359 * Arguments: dev - the device number 22360 * arg - pointer to user provided dk_cinfo structure 22361 * specifying the controller type and attributes. 22362 * flag - this argument is a pass through to ddi_copyxxx() 22363 * directly from the mode argument of ioctl(). 22364 * 22365 * Return Code: 0 22366 * EFAULT 22367 * ENXIO 22368 */ 22369 22370 static int 22371 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22372 { 22373 struct sd_lun *un = NULL; 22374 struct dk_cinfo *info; 22375 dev_info_t *pdip; 22376 int lun, tgt; 22377 22378 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22379 return (ENXIO); 22380 } 22381 22382 info = (struct dk_cinfo *) 22383 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22384 22385 switch (un->un_ctype) { 22386 case CTYPE_CDROM: 22387 info->dki_ctype = DKC_CDROM; 22388 break; 22389 default: 22390 info->dki_ctype = DKC_SCSI_CCS; 22391 break; 22392 } 22393 pdip = ddi_get_parent(SD_DEVINFO(un)); 22394 info->dki_cnum = ddi_get_instance(pdip); 22395 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22396 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22397 } else { 22398 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22399 DK_DEVLEN - 1); 22400 } 22401 22402 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22403 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22404 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22405 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22406 22407 /* Unit Information */ 22408 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22409 info->dki_slave = ((tgt << 3) | lun); 22410 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22411 DK_DEVLEN - 1); 22412 info->dki_flags = DKI_FMTVOL; 22413 info->dki_partition = SDPART(dev); 22414 22415 /* Max Transfer size of this device in blocks */ 22416 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22417 info->dki_addr = 0; 22418 info->dki_space = 0; 22419 info->dki_prio = 0; 22420 info->dki_vec = 0; 22421 22422 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22423 kmem_free(info, sizeof (struct dk_cinfo)); 22424 return (EFAULT); 22425 } else { 22426 kmem_free(info, sizeof (struct dk_cinfo)); 22427 return (0); 22428 } 22429 } 22430 22431 22432 /* 22433 * Function: sd_get_media_info 22434 * 22435 * Description: This routine is the driver entry point for handling ioctl 22436 * requests for the media type or command set profile used by the 22437 * drive to operate on the media (DKIOCGMEDIAINFO). 22438 * 22439 * Arguments: dev - the device number 22440 * arg - pointer to user provided dk_minfo structure 22441 * specifying the media type, logical block size and 22442 * drive capacity. 22443 * flag - this argument is a pass through to ddi_copyxxx() 22444 * directly from the mode argument of ioctl(). 22445 * 22446 * Return Code: 0 22447 * EACCESS 22448 * EFAULT 22449 * ENXIO 22450 * EIO 22451 */ 22452 22453 static int 22454 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22455 { 22456 struct sd_lun *un = NULL; 22457 struct uscsi_cmd com; 22458 struct scsi_inquiry *sinq; 22459 struct dk_minfo media_info; 22460 u_longlong_t media_capacity; 22461 uint64_t capacity; 22462 uint_t lbasize; 22463 uchar_t *out_data; 22464 uchar_t *rqbuf; 22465 int rval = 0; 22466 int rtn; 22467 sd_ssc_t *ssc; 22468 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22469 (un->un_state == SD_STATE_OFFLINE)) { 22470 return (ENXIO); 22471 } 22472 22473 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22474 22475 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22476 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22477 22478 /* Issue a TUR to determine if the drive is ready with media present */ 22479 ssc = sd_ssc_init(un); 22480 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22481 if (rval == ENXIO) { 22482 goto done; 22483 } else if (rval != 0) { 22484 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22485 } 22486 22487 /* Now get configuration data */ 22488 if (ISCD(un)) { 22489 media_info.dki_media_type = DK_CDROM; 22490 22491 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22492 if (un->un_f_mmc_cap == TRUE) { 22493 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22494 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22495 SD_PATH_STANDARD); 22496 22497 if (rtn) { 22498 /* 22499 * We ignore all failures for CD and need to 22500 * put the assessment before processing code 22501 * to avoid missing assessment for FMA. 22502 */ 22503 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22504 /* 22505 * Failed for other than an illegal request 22506 * or command not supported 22507 */ 22508 if ((com.uscsi_status == STATUS_CHECK) && 22509 (com.uscsi_rqstatus == STATUS_GOOD)) { 22510 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22511 (rqbuf[12] != 0x20)) { 22512 rval = EIO; 22513 goto no_assessment; 22514 } 22515 } 22516 } else { 22517 /* 22518 * The GET CONFIGURATION command succeeded 22519 * so set the media type according to the 22520 * returned data 22521 */ 22522 media_info.dki_media_type = out_data[6]; 22523 media_info.dki_media_type <<= 8; 22524 media_info.dki_media_type |= out_data[7]; 22525 } 22526 } 22527 } else { 22528 /* 22529 * The profile list is not available, so we attempt to identify 22530 * the media type based on the inquiry data 22531 */ 22532 sinq = un->un_sd->sd_inq; 22533 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22534 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22535 /* This is a direct access device or optical disk */ 22536 media_info.dki_media_type = DK_FIXED_DISK; 22537 22538 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22539 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22540 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22541 media_info.dki_media_type = DK_ZIP; 22542 } else if ( 22543 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22544 media_info.dki_media_type = DK_JAZ; 22545 } 22546 } 22547 } else { 22548 /* 22549 * Not a CD, direct access or optical disk so return 22550 * unknown media 22551 */ 22552 media_info.dki_media_type = DK_UNKNOWN; 22553 } 22554 } 22555 22556 /* Now read the capacity so we can provide the lbasize and capacity */ 22557 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22558 SD_PATH_DIRECT); 22559 switch (rval) { 22560 case 0: 22561 break; 22562 case EACCES: 22563 rval = EACCES; 22564 goto done; 22565 default: 22566 rval = EIO; 22567 goto done; 22568 } 22569 22570 /* 22571 * If lun is expanded dynamically, update the un structure. 22572 */ 22573 mutex_enter(SD_MUTEX(un)); 22574 if ((un->un_f_blockcount_is_valid == TRUE) && 22575 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22576 (capacity > un->un_blockcount)) { 22577 sd_update_block_info(un, lbasize, capacity); 22578 } 22579 mutex_exit(SD_MUTEX(un)); 22580 22581 media_info.dki_lbsize = lbasize; 22582 media_capacity = capacity; 22583 22584 /* 22585 * sd_send_scsi_READ_CAPACITY() reports capacity in 22586 * un->un_sys_blocksize chunks. So we need to convert it into 22587 * cap.lbasize chunks. 22588 */ 22589 media_capacity *= un->un_sys_blocksize; 22590 media_capacity /= lbasize; 22591 media_info.dki_capacity = media_capacity; 22592 22593 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22594 rval = EFAULT; 22595 /* Put goto. Anybody might add some code below in future */ 22596 goto no_assessment; 22597 } 22598 done: 22599 if (rval != 0) { 22600 if (rval == EIO) 22601 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22602 else 22603 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22604 } 22605 no_assessment: 22606 sd_ssc_fini(ssc); 22607 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22608 kmem_free(rqbuf, SENSE_LENGTH); 22609 return (rval); 22610 } 22611 22612 22613 /* 22614 * Function: sd_check_media 22615 * 22616 * Description: This utility routine implements the functionality for the 22617 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22618 * driver state changes from that specified by the user 22619 * (inserted or ejected). For example, if the user specifies 22620 * DKIO_EJECTED and the current media state is inserted this 22621 * routine will immediately return DKIO_INSERTED. However, if the 22622 * current media state is not inserted the user thread will be 22623 * blocked until the drive state changes. If DKIO_NONE is specified 22624 * the user thread will block until a drive state change occurs. 22625 * 22626 * Arguments: dev - the device number 22627 * state - user pointer to a dkio_state, updated with the current 22628 * drive state at return. 22629 * 22630 * Return Code: ENXIO 22631 * EIO 22632 * EAGAIN 22633 * EINTR 22634 */ 22635 22636 static int 22637 sd_check_media(dev_t dev, enum dkio_state state) 22638 { 22639 struct sd_lun *un = NULL; 22640 enum dkio_state prev_state; 22641 opaque_t token = NULL; 22642 int rval = 0; 22643 sd_ssc_t *ssc; 22644 dev_t sub_dev; 22645 22646 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22647 return (ENXIO); 22648 } 22649 22650 /* 22651 * sub_dev is used when submitting request to scsi watch. 22652 * All submissions are unified to use same device number. 22653 */ 22654 sub_dev = sd_make_device(SD_DEVINFO(un)); 22655 22656 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22657 22658 ssc = sd_ssc_init(un); 22659 22660 mutex_enter(SD_MUTEX(un)); 22661 22662 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22663 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22664 22665 prev_state = un->un_mediastate; 22666 22667 /* is there anything to do? */ 22668 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22669 /* 22670 * submit the request to the scsi_watch service; 22671 * scsi_media_watch_cb() does the real work 22672 */ 22673 mutex_exit(SD_MUTEX(un)); 22674 22675 /* 22676 * This change handles the case where a scsi watch request is 22677 * added to a device that is powered down. To accomplish this 22678 * we power up the device before adding the scsi watch request, 22679 * since the scsi watch sends a TUR directly to the device 22680 * which the device cannot handle if it is powered down. 22681 */ 22682 if (sd_pm_entry(un) != DDI_SUCCESS) { 22683 mutex_enter(SD_MUTEX(un)); 22684 goto done; 22685 } 22686 22687 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22688 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22689 (caddr_t)sub_dev); 22690 22691 sd_pm_exit(un); 22692 22693 mutex_enter(SD_MUTEX(un)); 22694 if (token == NULL) { 22695 rval = EAGAIN; 22696 goto done; 22697 } 22698 22699 /* 22700 * This is a special case IOCTL that doesn't return 22701 * until the media state changes. Routine sdpower 22702 * knows about and handles this so don't count it 22703 * as an active cmd in the driver, which would 22704 * keep the device busy to the pm framework. 22705 * If the count isn't decremented the device can't 22706 * be powered down. 22707 */ 22708 un->un_ncmds_in_driver--; 22709 ASSERT(un->un_ncmds_in_driver >= 0); 22710 22711 /* 22712 * if a prior request had been made, this will be the same 22713 * token, as scsi_watch was designed that way. 22714 */ 22715 un->un_swr_token = token; 22716 un->un_specified_mediastate = state; 22717 22718 /* 22719 * now wait for media change 22720 * we will not be signalled unless mediastate == state but it is 22721 * still better to test for this condition, since there is a 22722 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22723 */ 22724 SD_TRACE(SD_LOG_COMMON, un, 22725 "sd_check_media: waiting for media state change\n"); 22726 while (un->un_mediastate == state) { 22727 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22728 SD_TRACE(SD_LOG_COMMON, un, 22729 "sd_check_media: waiting for media state " 22730 "was interrupted\n"); 22731 un->un_ncmds_in_driver++; 22732 rval = EINTR; 22733 goto done; 22734 } 22735 SD_TRACE(SD_LOG_COMMON, un, 22736 "sd_check_media: received signal, state=%x\n", 22737 un->un_mediastate); 22738 } 22739 /* 22740 * Inc the counter to indicate the device once again 22741 * has an active outstanding cmd. 22742 */ 22743 un->un_ncmds_in_driver++; 22744 } 22745 22746 /* invalidate geometry */ 22747 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22748 sr_ejected(un); 22749 } 22750 22751 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22752 uint64_t capacity; 22753 uint_t lbasize; 22754 22755 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22756 mutex_exit(SD_MUTEX(un)); 22757 /* 22758 * Since the following routines use SD_PATH_DIRECT, we must 22759 * call PM directly before the upcoming disk accesses. This 22760 * may cause the disk to be power/spin up. 22761 */ 22762 22763 if (sd_pm_entry(un) == DDI_SUCCESS) { 22764 rval = sd_send_scsi_READ_CAPACITY(ssc, 22765 &capacity, &lbasize, SD_PATH_DIRECT); 22766 if (rval != 0) { 22767 sd_pm_exit(un); 22768 if (rval == EIO) 22769 sd_ssc_assessment(ssc, 22770 SD_FMT_STATUS_CHECK); 22771 else 22772 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22773 mutex_enter(SD_MUTEX(un)); 22774 goto done; 22775 } 22776 } else { 22777 rval = EIO; 22778 mutex_enter(SD_MUTEX(un)); 22779 goto done; 22780 } 22781 mutex_enter(SD_MUTEX(un)); 22782 22783 sd_update_block_info(un, lbasize, capacity); 22784 22785 /* 22786 * Check if the media in the device is writable or not 22787 */ 22788 if (ISCD(un)) { 22789 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22790 } 22791 22792 mutex_exit(SD_MUTEX(un)); 22793 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22794 if ((cmlb_validate(un->un_cmlbhandle, 0, 22795 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22796 sd_set_pstats(un); 22797 SD_TRACE(SD_LOG_IO_PARTITION, un, 22798 "sd_check_media: un:0x%p pstats created and " 22799 "set\n", un); 22800 } 22801 22802 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22803 SD_PATH_DIRECT); 22804 22805 sd_pm_exit(un); 22806 22807 if (rval != 0) { 22808 if (rval == EIO) 22809 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22810 else 22811 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22812 } 22813 22814 mutex_enter(SD_MUTEX(un)); 22815 } 22816 done: 22817 sd_ssc_fini(ssc); 22818 un->un_f_watcht_stopped = FALSE; 22819 if (token != NULL && un->un_swr_token != NULL) { 22820 /* 22821 * Use of this local token and the mutex ensures that we avoid 22822 * some race conditions associated with terminating the 22823 * scsi watch. 22824 */ 22825 token = un->un_swr_token; 22826 mutex_exit(SD_MUTEX(un)); 22827 (void) scsi_watch_request_terminate(token, 22828 SCSI_WATCH_TERMINATE_WAIT); 22829 if (scsi_watch_get_ref_count(token) == 0) { 22830 mutex_enter(SD_MUTEX(un)); 22831 un->un_swr_token = (opaque_t)NULL; 22832 } else { 22833 mutex_enter(SD_MUTEX(un)); 22834 } 22835 } 22836 22837 /* 22838 * Update the capacity kstat value, if no media previously 22839 * (capacity kstat is 0) and a media has been inserted 22840 * (un_f_blockcount_is_valid == TRUE) 22841 */ 22842 if (un->un_errstats) { 22843 struct sd_errstats *stp = NULL; 22844 22845 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22846 if ((stp->sd_capacity.value.ui64 == 0) && 22847 (un->un_f_blockcount_is_valid == TRUE)) { 22848 stp->sd_capacity.value.ui64 = 22849 (uint64_t)((uint64_t)un->un_blockcount * 22850 un->un_sys_blocksize); 22851 } 22852 } 22853 mutex_exit(SD_MUTEX(un)); 22854 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22855 return (rval); 22856 } 22857 22858 22859 /* 22860 * Function: sd_delayed_cv_broadcast 22861 * 22862 * Description: Delayed cv_broadcast to allow for target to recover from media 22863 * insertion. 22864 * 22865 * Arguments: arg - driver soft state (unit) structure 22866 */ 22867 22868 static void 22869 sd_delayed_cv_broadcast(void *arg) 22870 { 22871 struct sd_lun *un = arg; 22872 22873 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22874 22875 mutex_enter(SD_MUTEX(un)); 22876 un->un_dcvb_timeid = NULL; 22877 cv_broadcast(&un->un_state_cv); 22878 mutex_exit(SD_MUTEX(un)); 22879 } 22880 22881 22882 /* 22883 * Function: sd_media_watch_cb 22884 * 22885 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22886 * routine processes the TUR sense data and updates the driver 22887 * state if a transition has occurred. The user thread 22888 * (sd_check_media) is then signalled. 22889 * 22890 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22891 * among multiple watches that share this callback function 22892 * resultp - scsi watch facility result packet containing scsi 22893 * packet, status byte and sense data 22894 * 22895 * Return Code: 0 for success, -1 for failure 22896 */ 22897 22898 static int 22899 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22900 { 22901 struct sd_lun *un; 22902 struct scsi_status *statusp = resultp->statusp; 22903 uint8_t *sensep = (uint8_t *)resultp->sensep; 22904 enum dkio_state state = DKIO_NONE; 22905 dev_t dev = (dev_t)arg; 22906 uchar_t actual_sense_length; 22907 uint8_t skey, asc, ascq; 22908 22909 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22910 return (-1); 22911 } 22912 actual_sense_length = resultp->actual_sense_length; 22913 22914 mutex_enter(SD_MUTEX(un)); 22915 SD_TRACE(SD_LOG_COMMON, un, 22916 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22917 *((char *)statusp), (void *)sensep, actual_sense_length); 22918 22919 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22920 un->un_mediastate = DKIO_DEV_GONE; 22921 cv_broadcast(&un->un_state_cv); 22922 mutex_exit(SD_MUTEX(un)); 22923 22924 return (0); 22925 } 22926 22927 /* 22928 * If there was a check condition then sensep points to valid sense data 22929 * If status was not a check condition but a reservation or busy status 22930 * then the new state is DKIO_NONE 22931 */ 22932 if (sensep != NULL) { 22933 skey = scsi_sense_key(sensep); 22934 asc = scsi_sense_asc(sensep); 22935 ascq = scsi_sense_ascq(sensep); 22936 22937 SD_INFO(SD_LOG_COMMON, un, 22938 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22939 skey, asc, ascq); 22940 /* This routine only uses up to 13 bytes of sense data. */ 22941 if (actual_sense_length >= 13) { 22942 if (skey == KEY_UNIT_ATTENTION) { 22943 if (asc == 0x28) { 22944 state = DKIO_INSERTED; 22945 } 22946 } else if (skey == KEY_NOT_READY) { 22947 /* 22948 * Sense data of 02/06/00 means that the 22949 * drive could not read the media (No 22950 * reference position found). In this case 22951 * to prevent a hang on the DKIOCSTATE IOCTL 22952 * we set the media state to DKIO_INSERTED. 22953 */ 22954 if (asc == 0x06 && ascq == 0x00) 22955 state = DKIO_INSERTED; 22956 22957 /* 22958 * if 02/04/02 means that the host 22959 * should send start command. Explicitly 22960 * leave the media state as is 22961 * (inserted) as the media is inserted 22962 * and host has stopped device for PM 22963 * reasons. Upon next true read/write 22964 * to this media will bring the 22965 * device to the right state good for 22966 * media access. 22967 */ 22968 if (asc == 0x3a) { 22969 state = DKIO_EJECTED; 22970 } else { 22971 /* 22972 * If the drive is busy with an 22973 * operation or long write, keep the 22974 * media in an inserted state. 22975 */ 22976 22977 if ((asc == 0x04) && 22978 ((ascq == 0x02) || 22979 (ascq == 0x07) || 22980 (ascq == 0x08))) { 22981 state = DKIO_INSERTED; 22982 } 22983 } 22984 } else if (skey == KEY_NO_SENSE) { 22985 if ((asc == 0x00) && (ascq == 0x00)) { 22986 /* 22987 * Sense Data 00/00/00 does not provide 22988 * any information about the state of 22989 * the media. Ignore it. 22990 */ 22991 mutex_exit(SD_MUTEX(un)); 22992 return (0); 22993 } 22994 } 22995 } 22996 } else if ((*((char *)statusp) == STATUS_GOOD) && 22997 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22998 state = DKIO_INSERTED; 22999 } 23000 23001 SD_TRACE(SD_LOG_COMMON, un, 23002 "sd_media_watch_cb: state=%x, specified=%x\n", 23003 state, un->un_specified_mediastate); 23004 23005 /* 23006 * now signal the waiting thread if this is *not* the specified state; 23007 * delay the signal if the state is DKIO_INSERTED to allow the target 23008 * to recover 23009 */ 23010 if (state != un->un_specified_mediastate) { 23011 un->un_mediastate = state; 23012 if (state == DKIO_INSERTED) { 23013 /* 23014 * delay the signal to give the drive a chance 23015 * to do what it apparently needs to do 23016 */ 23017 SD_TRACE(SD_LOG_COMMON, un, 23018 "sd_media_watch_cb: delayed cv_broadcast\n"); 23019 if (un->un_dcvb_timeid == NULL) { 23020 un->un_dcvb_timeid = 23021 timeout(sd_delayed_cv_broadcast, un, 23022 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23023 } 23024 } else { 23025 SD_TRACE(SD_LOG_COMMON, un, 23026 "sd_media_watch_cb: immediate cv_broadcast\n"); 23027 cv_broadcast(&un->un_state_cv); 23028 } 23029 } 23030 mutex_exit(SD_MUTEX(un)); 23031 return (0); 23032 } 23033 23034 23035 /* 23036 * Function: sd_dkio_get_temp 23037 * 23038 * Description: This routine is the driver entry point for handling ioctl 23039 * requests to get the disk temperature. 23040 * 23041 * Arguments: dev - the device number 23042 * arg - pointer to user provided dk_temperature structure. 23043 * flag - this argument is a pass through to ddi_copyxxx() 23044 * directly from the mode argument of ioctl(). 23045 * 23046 * Return Code: 0 23047 * EFAULT 23048 * ENXIO 23049 * EAGAIN 23050 */ 23051 23052 static int 23053 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23054 { 23055 struct sd_lun *un = NULL; 23056 struct dk_temperature *dktemp = NULL; 23057 uchar_t *temperature_page; 23058 int rval = 0; 23059 int path_flag = SD_PATH_STANDARD; 23060 sd_ssc_t *ssc; 23061 23062 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23063 return (ENXIO); 23064 } 23065 23066 ssc = sd_ssc_init(un); 23067 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23068 23069 /* copyin the disk temp argument to get the user flags */ 23070 if (ddi_copyin((void *)arg, dktemp, 23071 sizeof (struct dk_temperature), flag) != 0) { 23072 rval = EFAULT; 23073 goto done; 23074 } 23075 23076 /* Initialize the temperature to invalid. */ 23077 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23078 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23079 23080 /* 23081 * Note: Investigate removing the "bypass pm" semantic. 23082 * Can we just bypass PM always? 23083 */ 23084 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23085 path_flag = SD_PATH_DIRECT; 23086 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23087 mutex_enter(&un->un_pm_mutex); 23088 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23089 /* 23090 * If DKT_BYPASS_PM is set, and the drive happens to be 23091 * in low power mode, we can not wake it up, Need to 23092 * return EAGAIN. 23093 */ 23094 mutex_exit(&un->un_pm_mutex); 23095 rval = EAGAIN; 23096 goto done; 23097 } else { 23098 /* 23099 * Indicate to PM the device is busy. This is required 23100 * to avoid a race - i.e. the ioctl is issuing a 23101 * command and the pm framework brings down the device 23102 * to low power mode (possible power cut-off on some 23103 * platforms). 23104 */ 23105 mutex_exit(&un->un_pm_mutex); 23106 if (sd_pm_entry(un) != DDI_SUCCESS) { 23107 rval = EAGAIN; 23108 goto done; 23109 } 23110 } 23111 } 23112 23113 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23114 23115 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23116 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23117 if (rval != 0) 23118 goto done2; 23119 23120 /* 23121 * For the current temperature verify that the parameter length is 0x02 23122 * and the parameter code is 0x00 23123 */ 23124 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23125 (temperature_page[5] == 0x00)) { 23126 if (temperature_page[9] == 0xFF) { 23127 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23128 } else { 23129 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23130 } 23131 } 23132 23133 /* 23134 * For the reference temperature verify that the parameter 23135 * length is 0x02 and the parameter code is 0x01 23136 */ 23137 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23138 (temperature_page[11] == 0x01)) { 23139 if (temperature_page[15] == 0xFF) { 23140 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23141 } else { 23142 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23143 } 23144 } 23145 23146 /* Do the copyout regardless of the temperature commands status. */ 23147 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23148 flag) != 0) { 23149 rval = EFAULT; 23150 goto done1; 23151 } 23152 23153 done2: 23154 if (rval != 0) { 23155 if (rval == EIO) 23156 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23157 else 23158 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23159 } 23160 done1: 23161 if (path_flag == SD_PATH_DIRECT) { 23162 sd_pm_exit(un); 23163 } 23164 23165 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23166 done: 23167 sd_ssc_fini(ssc); 23168 if (dktemp != NULL) { 23169 kmem_free(dktemp, sizeof (struct dk_temperature)); 23170 } 23171 23172 return (rval); 23173 } 23174 23175 23176 /* 23177 * Function: sd_log_page_supported 23178 * 23179 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23180 * supported log pages. 23181 * 23182 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23183 * structure for this target. 23184 * log_page - 23185 * 23186 * Return Code: -1 - on error (log sense is optional and may not be supported). 23187 * 0 - log page not found. 23188 * 1 - log page found. 23189 */ 23190 23191 static int 23192 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23193 { 23194 uchar_t *log_page_data; 23195 int i; 23196 int match = 0; 23197 int log_size; 23198 int status = 0; 23199 struct sd_lun *un; 23200 23201 ASSERT(ssc != NULL); 23202 un = ssc->ssc_un; 23203 ASSERT(un != NULL); 23204 23205 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23206 23207 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23208 SD_PATH_DIRECT); 23209 23210 if (status != 0) { 23211 if (status == EIO) { 23212 /* 23213 * Some disks do not support log sense, we 23214 * should ignore this kind of error(sense key is 23215 * 0x5 - illegal request). 23216 */ 23217 uint8_t *sensep; 23218 int senlen; 23219 23220 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23221 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23222 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23223 23224 if (senlen > 0 && 23225 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23226 sd_ssc_assessment(ssc, 23227 SD_FMT_IGNORE_COMPROMISE); 23228 } else { 23229 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23230 } 23231 } else { 23232 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23233 } 23234 23235 SD_ERROR(SD_LOG_COMMON, un, 23236 "sd_log_page_supported: failed log page retrieval\n"); 23237 kmem_free(log_page_data, 0xFF); 23238 return (-1); 23239 } 23240 23241 log_size = log_page_data[3]; 23242 23243 /* 23244 * The list of supported log pages start from the fourth byte. Check 23245 * until we run out of log pages or a match is found. 23246 */ 23247 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23248 if (log_page_data[i] == log_page) { 23249 match++; 23250 } 23251 } 23252 kmem_free(log_page_data, 0xFF); 23253 return (match); 23254 } 23255 23256 23257 /* 23258 * Function: sd_mhdioc_failfast 23259 * 23260 * Description: This routine is the driver entry point for handling ioctl 23261 * requests to enable/disable the multihost failfast option. 23262 * (MHIOCENFAILFAST) 23263 * 23264 * Arguments: dev - the device number 23265 * arg - user specified probing interval. 23266 * flag - this argument is a pass through to ddi_copyxxx() 23267 * directly from the mode argument of ioctl(). 23268 * 23269 * Return Code: 0 23270 * EFAULT 23271 * ENXIO 23272 */ 23273 23274 static int 23275 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23276 { 23277 struct sd_lun *un = NULL; 23278 int mh_time; 23279 int rval = 0; 23280 23281 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23282 return (ENXIO); 23283 } 23284 23285 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23286 return (EFAULT); 23287 23288 if (mh_time) { 23289 mutex_enter(SD_MUTEX(un)); 23290 un->un_resvd_status |= SD_FAILFAST; 23291 mutex_exit(SD_MUTEX(un)); 23292 /* 23293 * If mh_time is INT_MAX, then this ioctl is being used for 23294 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23295 */ 23296 if (mh_time != INT_MAX) { 23297 rval = sd_check_mhd(dev, mh_time); 23298 } 23299 } else { 23300 (void) sd_check_mhd(dev, 0); 23301 mutex_enter(SD_MUTEX(un)); 23302 un->un_resvd_status &= ~SD_FAILFAST; 23303 mutex_exit(SD_MUTEX(un)); 23304 } 23305 return (rval); 23306 } 23307 23308 23309 /* 23310 * Function: sd_mhdioc_takeown 23311 * 23312 * Description: This routine is the driver entry point for handling ioctl 23313 * requests to forcefully acquire exclusive access rights to the 23314 * multihost disk (MHIOCTKOWN). 23315 * 23316 * Arguments: dev - the device number 23317 * arg - user provided structure specifying the delay 23318 * parameters in milliseconds 23319 * flag - this argument is a pass through to ddi_copyxxx() 23320 * directly from the mode argument of ioctl(). 23321 * 23322 * Return Code: 0 23323 * EFAULT 23324 * ENXIO 23325 */ 23326 23327 static int 23328 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23329 { 23330 struct sd_lun *un = NULL; 23331 struct mhioctkown *tkown = NULL; 23332 int rval = 0; 23333 23334 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23335 return (ENXIO); 23336 } 23337 23338 if (arg != NULL) { 23339 tkown = (struct mhioctkown *) 23340 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23341 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23342 if (rval != 0) { 23343 rval = EFAULT; 23344 goto error; 23345 } 23346 } 23347 23348 rval = sd_take_ownership(dev, tkown); 23349 mutex_enter(SD_MUTEX(un)); 23350 if (rval == 0) { 23351 un->un_resvd_status |= SD_RESERVE; 23352 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23353 sd_reinstate_resv_delay = 23354 tkown->reinstate_resv_delay * 1000; 23355 } else { 23356 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23357 } 23358 /* 23359 * Give the scsi_watch routine interval set by 23360 * the MHIOCENFAILFAST ioctl precedence here. 23361 */ 23362 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23363 mutex_exit(SD_MUTEX(un)); 23364 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23365 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23366 "sd_mhdioc_takeown : %d\n", 23367 sd_reinstate_resv_delay); 23368 } else { 23369 mutex_exit(SD_MUTEX(un)); 23370 } 23371 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23372 sd_mhd_reset_notify_cb, (caddr_t)un); 23373 } else { 23374 un->un_resvd_status &= ~SD_RESERVE; 23375 mutex_exit(SD_MUTEX(un)); 23376 } 23377 23378 error: 23379 if (tkown != NULL) { 23380 kmem_free(tkown, sizeof (struct mhioctkown)); 23381 } 23382 return (rval); 23383 } 23384 23385 23386 /* 23387 * Function: sd_mhdioc_release 23388 * 23389 * Description: This routine is the driver entry point for handling ioctl 23390 * requests to release exclusive access rights to the multihost 23391 * disk (MHIOCRELEASE). 23392 * 23393 * Arguments: dev - the device number 23394 * 23395 * Return Code: 0 23396 * ENXIO 23397 */ 23398 23399 static int 23400 sd_mhdioc_release(dev_t dev) 23401 { 23402 struct sd_lun *un = NULL; 23403 timeout_id_t resvd_timeid_save; 23404 int resvd_status_save; 23405 int rval = 0; 23406 23407 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23408 return (ENXIO); 23409 } 23410 23411 mutex_enter(SD_MUTEX(un)); 23412 resvd_status_save = un->un_resvd_status; 23413 un->un_resvd_status &= 23414 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23415 if (un->un_resvd_timeid) { 23416 resvd_timeid_save = un->un_resvd_timeid; 23417 un->un_resvd_timeid = NULL; 23418 mutex_exit(SD_MUTEX(un)); 23419 (void) untimeout(resvd_timeid_save); 23420 } else { 23421 mutex_exit(SD_MUTEX(un)); 23422 } 23423 23424 /* 23425 * destroy any pending timeout thread that may be attempting to 23426 * reinstate reservation on this device. 23427 */ 23428 sd_rmv_resv_reclaim_req(dev); 23429 23430 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23431 mutex_enter(SD_MUTEX(un)); 23432 if ((un->un_mhd_token) && 23433 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23434 mutex_exit(SD_MUTEX(un)); 23435 (void) sd_check_mhd(dev, 0); 23436 } else { 23437 mutex_exit(SD_MUTEX(un)); 23438 } 23439 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23440 sd_mhd_reset_notify_cb, (caddr_t)un); 23441 } else { 23442 /* 23443 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23444 */ 23445 mutex_enter(SD_MUTEX(un)); 23446 un->un_resvd_status = resvd_status_save; 23447 mutex_exit(SD_MUTEX(un)); 23448 } 23449 return (rval); 23450 } 23451 23452 23453 /* 23454 * Function: sd_mhdioc_register_devid 23455 * 23456 * Description: This routine is the driver entry point for handling ioctl 23457 * requests to register the device id (MHIOCREREGISTERDEVID). 23458 * 23459 * Note: The implementation for this ioctl has been updated to 23460 * be consistent with the original PSARC case (1999/357) 23461 * (4375899, 4241671, 4220005) 23462 * 23463 * Arguments: dev - the device number 23464 * 23465 * Return Code: 0 23466 * ENXIO 23467 */ 23468 23469 static int 23470 sd_mhdioc_register_devid(dev_t dev) 23471 { 23472 struct sd_lun *un = NULL; 23473 int rval = 0; 23474 sd_ssc_t *ssc; 23475 23476 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23477 return (ENXIO); 23478 } 23479 23480 ASSERT(!mutex_owned(SD_MUTEX(un))); 23481 23482 mutex_enter(SD_MUTEX(un)); 23483 23484 /* If a devid already exists, de-register it */ 23485 if (un->un_devid != NULL) { 23486 ddi_devid_unregister(SD_DEVINFO(un)); 23487 /* 23488 * After unregister devid, needs to free devid memory 23489 */ 23490 ddi_devid_free(un->un_devid); 23491 un->un_devid = NULL; 23492 } 23493 23494 /* Check for reservation conflict */ 23495 mutex_exit(SD_MUTEX(un)); 23496 ssc = sd_ssc_init(un); 23497 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23498 mutex_enter(SD_MUTEX(un)); 23499 23500 switch (rval) { 23501 case 0: 23502 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23503 break; 23504 case EACCES: 23505 break; 23506 default: 23507 rval = EIO; 23508 } 23509 23510 mutex_exit(SD_MUTEX(un)); 23511 if (rval != 0) { 23512 if (rval == EIO) 23513 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23514 else 23515 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23516 } 23517 sd_ssc_fini(ssc); 23518 return (rval); 23519 } 23520 23521 23522 /* 23523 * Function: sd_mhdioc_inkeys 23524 * 23525 * Description: This routine is the driver entry point for handling ioctl 23526 * requests to issue the SCSI-3 Persistent In Read Keys command 23527 * to the device (MHIOCGRP_INKEYS). 23528 * 23529 * Arguments: dev - the device number 23530 * arg - user provided in_keys structure 23531 * flag - this argument is a pass through to ddi_copyxxx() 23532 * directly from the mode argument of ioctl(). 23533 * 23534 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23535 * ENXIO 23536 * EFAULT 23537 */ 23538 23539 static int 23540 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23541 { 23542 struct sd_lun *un; 23543 mhioc_inkeys_t inkeys; 23544 int rval = 0; 23545 23546 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23547 return (ENXIO); 23548 } 23549 23550 #ifdef _MULTI_DATAMODEL 23551 switch (ddi_model_convert_from(flag & FMODELS)) { 23552 case DDI_MODEL_ILP32: { 23553 struct mhioc_inkeys32 inkeys32; 23554 23555 if (ddi_copyin(arg, &inkeys32, 23556 sizeof (struct mhioc_inkeys32), flag) != 0) { 23557 return (EFAULT); 23558 } 23559 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23560 if ((rval = sd_persistent_reservation_in_read_keys(un, 23561 &inkeys, flag)) != 0) { 23562 return (rval); 23563 } 23564 inkeys32.generation = inkeys.generation; 23565 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23566 flag) != 0) { 23567 return (EFAULT); 23568 } 23569 break; 23570 } 23571 case DDI_MODEL_NONE: 23572 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23573 flag) != 0) { 23574 return (EFAULT); 23575 } 23576 if ((rval = sd_persistent_reservation_in_read_keys(un, 23577 &inkeys, flag)) != 0) { 23578 return (rval); 23579 } 23580 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23581 flag) != 0) { 23582 return (EFAULT); 23583 } 23584 break; 23585 } 23586 23587 #else /* ! _MULTI_DATAMODEL */ 23588 23589 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23590 return (EFAULT); 23591 } 23592 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23593 if (rval != 0) { 23594 return (rval); 23595 } 23596 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23597 return (EFAULT); 23598 } 23599 23600 #endif /* _MULTI_DATAMODEL */ 23601 23602 return (rval); 23603 } 23604 23605 23606 /* 23607 * Function: sd_mhdioc_inresv 23608 * 23609 * Description: This routine is the driver entry point for handling ioctl 23610 * requests to issue the SCSI-3 Persistent In Read Reservations 23611 * command to the device (MHIOCGRP_INKEYS). 23612 * 23613 * Arguments: dev - the device number 23614 * arg - user provided in_resv structure 23615 * flag - this argument is a pass through to ddi_copyxxx() 23616 * directly from the mode argument of ioctl(). 23617 * 23618 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23619 * ENXIO 23620 * EFAULT 23621 */ 23622 23623 static int 23624 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23625 { 23626 struct sd_lun *un; 23627 mhioc_inresvs_t inresvs; 23628 int rval = 0; 23629 23630 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23631 return (ENXIO); 23632 } 23633 23634 #ifdef _MULTI_DATAMODEL 23635 23636 switch (ddi_model_convert_from(flag & FMODELS)) { 23637 case DDI_MODEL_ILP32: { 23638 struct mhioc_inresvs32 inresvs32; 23639 23640 if (ddi_copyin(arg, &inresvs32, 23641 sizeof (struct mhioc_inresvs32), flag) != 0) { 23642 return (EFAULT); 23643 } 23644 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23645 if ((rval = sd_persistent_reservation_in_read_resv(un, 23646 &inresvs, flag)) != 0) { 23647 return (rval); 23648 } 23649 inresvs32.generation = inresvs.generation; 23650 if (ddi_copyout(&inresvs32, arg, 23651 sizeof (struct mhioc_inresvs32), flag) != 0) { 23652 return (EFAULT); 23653 } 23654 break; 23655 } 23656 case DDI_MODEL_NONE: 23657 if (ddi_copyin(arg, &inresvs, 23658 sizeof (mhioc_inresvs_t), flag) != 0) { 23659 return (EFAULT); 23660 } 23661 if ((rval = sd_persistent_reservation_in_read_resv(un, 23662 &inresvs, flag)) != 0) { 23663 return (rval); 23664 } 23665 if (ddi_copyout(&inresvs, arg, 23666 sizeof (mhioc_inresvs_t), flag) != 0) { 23667 return (EFAULT); 23668 } 23669 break; 23670 } 23671 23672 #else /* ! _MULTI_DATAMODEL */ 23673 23674 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23675 return (EFAULT); 23676 } 23677 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23678 if (rval != 0) { 23679 return (rval); 23680 } 23681 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23682 return (EFAULT); 23683 } 23684 23685 #endif /* ! _MULTI_DATAMODEL */ 23686 23687 return (rval); 23688 } 23689 23690 23691 /* 23692 * The following routines support the clustering functionality described below 23693 * and implement lost reservation reclaim functionality. 23694 * 23695 * Clustering 23696 * ---------- 23697 * The clustering code uses two different, independent forms of SCSI 23698 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23699 * Persistent Group Reservations. For any particular disk, it will use either 23700 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23701 * 23702 * SCSI-2 23703 * The cluster software takes ownership of a multi-hosted disk by issuing the 23704 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23705 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23706 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23707 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23708 * driver. The meaning of failfast is that if the driver (on this host) ever 23709 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23710 * it should immediately panic the host. The motivation for this ioctl is that 23711 * if this host does encounter reservation conflict, the underlying cause is 23712 * that some other host of the cluster has decided that this host is no longer 23713 * in the cluster and has seized control of the disks for itself. Since this 23714 * host is no longer in the cluster, it ought to panic itself. The 23715 * MHIOCENFAILFAST ioctl does two things: 23716 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23717 * error to panic the host 23718 * (b) it sets up a periodic timer to test whether this host still has 23719 * "access" (in that no other host has reserved the device): if the 23720 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23721 * purpose of that periodic timer is to handle scenarios where the host is 23722 * otherwise temporarily quiescent, temporarily doing no real i/o. 23723 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23724 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23725 * the device itself. 23726 * 23727 * SCSI-3 PGR 23728 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23729 * facility is supported through the shared multihost disk ioctls 23730 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23731 * MHIOCGRP_PREEMPTANDABORT) 23732 * 23733 * Reservation Reclaim: 23734 * -------------------- 23735 * To support the lost reservation reclaim operations this driver creates a 23736 * single thread to handle reinstating reservations on all devices that have 23737 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23738 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23739 * and the reservation reclaim thread loops through the requests to regain the 23740 * lost reservations. 23741 */ 23742 23743 /* 23744 * Function: sd_check_mhd() 23745 * 23746 * Description: This function sets up and submits a scsi watch request or 23747 * terminates an existing watch request. This routine is used in 23748 * support of reservation reclaim. 23749 * 23750 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23751 * among multiple watches that share the callback function 23752 * interval - the number of microseconds specifying the watch 23753 * interval for issuing TEST UNIT READY commands. If 23754 * set to 0 the watch should be terminated. If the 23755 * interval is set to 0 and if the device is required 23756 * to hold reservation while disabling failfast, the 23757 * watch is restarted with an interval of 23758 * reinstate_resv_delay. 23759 * 23760 * Return Code: 0 - Successful submit/terminate of scsi watch request 23761 * ENXIO - Indicates an invalid device was specified 23762 * EAGAIN - Unable to submit the scsi watch request 23763 */ 23764 23765 static int 23766 sd_check_mhd(dev_t dev, int interval) 23767 { 23768 struct sd_lun *un; 23769 opaque_t token; 23770 23771 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23772 return (ENXIO); 23773 } 23774 23775 /* is this a watch termination request? */ 23776 if (interval == 0) { 23777 mutex_enter(SD_MUTEX(un)); 23778 /* if there is an existing watch task then terminate it */ 23779 if (un->un_mhd_token) { 23780 token = un->un_mhd_token; 23781 un->un_mhd_token = NULL; 23782 mutex_exit(SD_MUTEX(un)); 23783 (void) scsi_watch_request_terminate(token, 23784 SCSI_WATCH_TERMINATE_ALL_WAIT); 23785 mutex_enter(SD_MUTEX(un)); 23786 } else { 23787 mutex_exit(SD_MUTEX(un)); 23788 /* 23789 * Note: If we return here we don't check for the 23790 * failfast case. This is the original legacy 23791 * implementation but perhaps we should be checking 23792 * the failfast case. 23793 */ 23794 return (0); 23795 } 23796 /* 23797 * If the device is required to hold reservation while 23798 * disabling failfast, we need to restart the scsi_watch 23799 * routine with an interval of reinstate_resv_delay. 23800 */ 23801 if (un->un_resvd_status & SD_RESERVE) { 23802 interval = sd_reinstate_resv_delay/1000; 23803 } else { 23804 /* no failfast so bail */ 23805 mutex_exit(SD_MUTEX(un)); 23806 return (0); 23807 } 23808 mutex_exit(SD_MUTEX(un)); 23809 } 23810 23811 /* 23812 * adjust minimum time interval to 1 second, 23813 * and convert from msecs to usecs 23814 */ 23815 if (interval > 0 && interval < 1000) { 23816 interval = 1000; 23817 } 23818 interval *= 1000; 23819 23820 /* 23821 * submit the request to the scsi_watch service 23822 */ 23823 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23824 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23825 if (token == NULL) { 23826 return (EAGAIN); 23827 } 23828 23829 /* 23830 * save token for termination later on 23831 */ 23832 mutex_enter(SD_MUTEX(un)); 23833 un->un_mhd_token = token; 23834 mutex_exit(SD_MUTEX(un)); 23835 return (0); 23836 } 23837 23838 23839 /* 23840 * Function: sd_mhd_watch_cb() 23841 * 23842 * Description: This function is the call back function used by the scsi watch 23843 * facility. The scsi watch facility sends the "Test Unit Ready" 23844 * and processes the status. If applicable (i.e. a "Unit Attention" 23845 * status and automatic "Request Sense" not used) the scsi watch 23846 * facility will send a "Request Sense" and retrieve the sense data 23847 * to be passed to this callback function. In either case the 23848 * automatic "Request Sense" or the facility submitting one, this 23849 * callback is passed the status and sense data. 23850 * 23851 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23852 * among multiple watches that share this callback function 23853 * resultp - scsi watch facility result packet containing scsi 23854 * packet, status byte and sense data 23855 * 23856 * Return Code: 0 - continue the watch task 23857 * non-zero - terminate the watch task 23858 */ 23859 23860 static int 23861 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23862 { 23863 struct sd_lun *un; 23864 struct scsi_status *statusp; 23865 uint8_t *sensep; 23866 struct scsi_pkt *pkt; 23867 uchar_t actual_sense_length; 23868 dev_t dev = (dev_t)arg; 23869 23870 ASSERT(resultp != NULL); 23871 statusp = resultp->statusp; 23872 sensep = (uint8_t *)resultp->sensep; 23873 pkt = resultp->pkt; 23874 actual_sense_length = resultp->actual_sense_length; 23875 23876 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23877 return (ENXIO); 23878 } 23879 23880 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23881 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23882 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23883 23884 /* Begin processing of the status and/or sense data */ 23885 if (pkt->pkt_reason != CMD_CMPLT) { 23886 /* Handle the incomplete packet */ 23887 sd_mhd_watch_incomplete(un, pkt); 23888 return (0); 23889 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23890 if (*((unsigned char *)statusp) 23891 == STATUS_RESERVATION_CONFLICT) { 23892 /* 23893 * Handle a reservation conflict by panicking if 23894 * configured for failfast or by logging the conflict 23895 * and updating the reservation status 23896 */ 23897 mutex_enter(SD_MUTEX(un)); 23898 if ((un->un_resvd_status & SD_FAILFAST) && 23899 (sd_failfast_enable)) { 23900 sd_panic_for_res_conflict(un); 23901 /*NOTREACHED*/ 23902 } 23903 SD_INFO(SD_LOG_IOCTL_MHD, un, 23904 "sd_mhd_watch_cb: Reservation Conflict\n"); 23905 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23906 mutex_exit(SD_MUTEX(un)); 23907 } 23908 } 23909 23910 if (sensep != NULL) { 23911 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23912 mutex_enter(SD_MUTEX(un)); 23913 if ((scsi_sense_asc(sensep) == 23914 SD_SCSI_RESET_SENSE_CODE) && 23915 (un->un_resvd_status & SD_RESERVE)) { 23916 /* 23917 * The additional sense code indicates a power 23918 * on or bus device reset has occurred; update 23919 * the reservation status. 23920 */ 23921 un->un_resvd_status |= 23922 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23923 SD_INFO(SD_LOG_IOCTL_MHD, un, 23924 "sd_mhd_watch_cb: Lost Reservation\n"); 23925 } 23926 } else { 23927 return (0); 23928 } 23929 } else { 23930 mutex_enter(SD_MUTEX(un)); 23931 } 23932 23933 if ((un->un_resvd_status & SD_RESERVE) && 23934 (un->un_resvd_status & SD_LOST_RESERVE)) { 23935 if (un->un_resvd_status & SD_WANT_RESERVE) { 23936 /* 23937 * A reset occurred in between the last probe and this 23938 * one so if a timeout is pending cancel it. 23939 */ 23940 if (un->un_resvd_timeid) { 23941 timeout_id_t temp_id = un->un_resvd_timeid; 23942 un->un_resvd_timeid = NULL; 23943 mutex_exit(SD_MUTEX(un)); 23944 (void) untimeout(temp_id); 23945 mutex_enter(SD_MUTEX(un)); 23946 } 23947 un->un_resvd_status &= ~SD_WANT_RESERVE; 23948 } 23949 if (un->un_resvd_timeid == 0) { 23950 /* Schedule a timeout to handle the lost reservation */ 23951 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23952 (void *)dev, 23953 drv_usectohz(sd_reinstate_resv_delay)); 23954 } 23955 } 23956 mutex_exit(SD_MUTEX(un)); 23957 return (0); 23958 } 23959 23960 23961 /* 23962 * Function: sd_mhd_watch_incomplete() 23963 * 23964 * Description: This function is used to find out why a scsi pkt sent by the 23965 * scsi watch facility was not completed. Under some scenarios this 23966 * routine will return. Otherwise it will send a bus reset to see 23967 * if the drive is still online. 23968 * 23969 * Arguments: un - driver soft state (unit) structure 23970 * pkt - incomplete scsi pkt 23971 */ 23972 23973 static void 23974 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23975 { 23976 int be_chatty; 23977 int perr; 23978 23979 ASSERT(pkt != NULL); 23980 ASSERT(un != NULL); 23981 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23982 perr = (pkt->pkt_statistics & STAT_PERR); 23983 23984 mutex_enter(SD_MUTEX(un)); 23985 if (un->un_state == SD_STATE_DUMPING) { 23986 mutex_exit(SD_MUTEX(un)); 23987 return; 23988 } 23989 23990 switch (pkt->pkt_reason) { 23991 case CMD_UNX_BUS_FREE: 23992 /* 23993 * If we had a parity error that caused the target to drop BSY*, 23994 * don't be chatty about it. 23995 */ 23996 if (perr && be_chatty) { 23997 be_chatty = 0; 23998 } 23999 break; 24000 case CMD_TAG_REJECT: 24001 /* 24002 * The SCSI-2 spec states that a tag reject will be sent by the 24003 * target if tagged queuing is not supported. A tag reject may 24004 * also be sent during certain initialization periods or to 24005 * control internal resources. For the latter case the target 24006 * may also return Queue Full. 24007 * 24008 * If this driver receives a tag reject from a target that is 24009 * going through an init period or controlling internal 24010 * resources tagged queuing will be disabled. This is a less 24011 * than optimal behavior but the driver is unable to determine 24012 * the target state and assumes tagged queueing is not supported 24013 */ 24014 pkt->pkt_flags = 0; 24015 un->un_tagflags = 0; 24016 24017 if (un->un_f_opt_queueing == TRUE) { 24018 un->un_throttle = min(un->un_throttle, 3); 24019 } else { 24020 un->un_throttle = 1; 24021 } 24022 mutex_exit(SD_MUTEX(un)); 24023 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24024 mutex_enter(SD_MUTEX(un)); 24025 break; 24026 case CMD_INCOMPLETE: 24027 /* 24028 * The transport stopped with an abnormal state, fallthrough and 24029 * reset the target and/or bus unless selection did not complete 24030 * (indicated by STATE_GOT_BUS) in which case we don't want to 24031 * go through a target/bus reset 24032 */ 24033 if (pkt->pkt_state == STATE_GOT_BUS) { 24034 break; 24035 } 24036 /*FALLTHROUGH*/ 24037 24038 case CMD_TIMEOUT: 24039 default: 24040 /* 24041 * The lun may still be running the command, so a lun reset 24042 * should be attempted. If the lun reset fails or cannot be 24043 * issued, than try a target reset. Lastly try a bus reset. 24044 */ 24045 if ((pkt->pkt_statistics & 24046 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24047 int reset_retval = 0; 24048 mutex_exit(SD_MUTEX(un)); 24049 if (un->un_f_allow_bus_device_reset == TRUE) { 24050 if (un->un_f_lun_reset_enabled == TRUE) { 24051 reset_retval = 24052 scsi_reset(SD_ADDRESS(un), 24053 RESET_LUN); 24054 } 24055 if (reset_retval == 0) { 24056 reset_retval = 24057 scsi_reset(SD_ADDRESS(un), 24058 RESET_TARGET); 24059 } 24060 } 24061 if (reset_retval == 0) { 24062 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24063 } 24064 mutex_enter(SD_MUTEX(un)); 24065 } 24066 break; 24067 } 24068 24069 /* A device/bus reset has occurred; update the reservation status. */ 24070 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24071 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24072 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24073 un->un_resvd_status |= 24074 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24075 SD_INFO(SD_LOG_IOCTL_MHD, un, 24076 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24077 } 24078 } 24079 24080 /* 24081 * The disk has been turned off; Update the device state. 24082 * 24083 * Note: Should we be offlining the disk here? 24084 */ 24085 if (pkt->pkt_state == STATE_GOT_BUS) { 24086 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24087 "Disk not responding to selection\n"); 24088 if (un->un_state != SD_STATE_OFFLINE) { 24089 New_state(un, SD_STATE_OFFLINE); 24090 } 24091 } else if (be_chatty) { 24092 /* 24093 * suppress messages if they are all the same pkt reason; 24094 * with TQ, many (up to 256) are returned with the same 24095 * pkt_reason 24096 */ 24097 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24098 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24099 "sd_mhd_watch_incomplete: " 24100 "SCSI transport failed: reason '%s'\n", 24101 scsi_rname(pkt->pkt_reason)); 24102 } 24103 } 24104 un->un_last_pkt_reason = pkt->pkt_reason; 24105 mutex_exit(SD_MUTEX(un)); 24106 } 24107 24108 24109 /* 24110 * Function: sd_sname() 24111 * 24112 * Description: This is a simple little routine to return a string containing 24113 * a printable description of command status byte for use in 24114 * logging. 24115 * 24116 * Arguments: status - pointer to a status byte 24117 * 24118 * Return Code: char * - string containing status description. 24119 */ 24120 24121 static char * 24122 sd_sname(uchar_t status) 24123 { 24124 switch (status & STATUS_MASK) { 24125 case STATUS_GOOD: 24126 return ("good status"); 24127 case STATUS_CHECK: 24128 return ("check condition"); 24129 case STATUS_MET: 24130 return ("condition met"); 24131 case STATUS_BUSY: 24132 return ("busy"); 24133 case STATUS_INTERMEDIATE: 24134 return ("intermediate"); 24135 case STATUS_INTERMEDIATE_MET: 24136 return ("intermediate - condition met"); 24137 case STATUS_RESERVATION_CONFLICT: 24138 return ("reservation_conflict"); 24139 case STATUS_TERMINATED: 24140 return ("command terminated"); 24141 case STATUS_QFULL: 24142 return ("queue full"); 24143 default: 24144 return ("<unknown status>"); 24145 } 24146 } 24147 24148 24149 /* 24150 * Function: sd_mhd_resvd_recover() 24151 * 24152 * Description: This function adds a reservation entry to the 24153 * sd_resv_reclaim_request list and signals the reservation 24154 * reclaim thread that there is work pending. If the reservation 24155 * reclaim thread has not been previously created this function 24156 * will kick it off. 24157 * 24158 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24159 * among multiple watches that share this callback function 24160 * 24161 * Context: This routine is called by timeout() and is run in interrupt 24162 * context. It must not sleep or call other functions which may 24163 * sleep. 24164 */ 24165 24166 static void 24167 sd_mhd_resvd_recover(void *arg) 24168 { 24169 dev_t dev = (dev_t)arg; 24170 struct sd_lun *un; 24171 struct sd_thr_request *sd_treq = NULL; 24172 struct sd_thr_request *sd_cur = NULL; 24173 struct sd_thr_request *sd_prev = NULL; 24174 int already_there = 0; 24175 24176 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24177 return; 24178 } 24179 24180 mutex_enter(SD_MUTEX(un)); 24181 un->un_resvd_timeid = NULL; 24182 if (un->un_resvd_status & SD_WANT_RESERVE) { 24183 /* 24184 * There was a reset so don't issue the reserve, allow the 24185 * sd_mhd_watch_cb callback function to notice this and 24186 * reschedule the timeout for reservation. 24187 */ 24188 mutex_exit(SD_MUTEX(un)); 24189 return; 24190 } 24191 mutex_exit(SD_MUTEX(un)); 24192 24193 /* 24194 * Add this device to the sd_resv_reclaim_request list and the 24195 * sd_resv_reclaim_thread should take care of the rest. 24196 * 24197 * Note: We can't sleep in this context so if the memory allocation 24198 * fails allow the sd_mhd_watch_cb callback function to notice this and 24199 * reschedule the timeout for reservation. (4378460) 24200 */ 24201 sd_treq = (struct sd_thr_request *) 24202 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24203 if (sd_treq == NULL) { 24204 return; 24205 } 24206 24207 sd_treq->sd_thr_req_next = NULL; 24208 sd_treq->dev = dev; 24209 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24210 if (sd_tr.srq_thr_req_head == NULL) { 24211 sd_tr.srq_thr_req_head = sd_treq; 24212 } else { 24213 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24214 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24215 if (sd_cur->dev == dev) { 24216 /* 24217 * already in Queue so don't log 24218 * another request for the device 24219 */ 24220 already_there = 1; 24221 break; 24222 } 24223 sd_prev = sd_cur; 24224 } 24225 if (!already_there) { 24226 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24227 "logging request for %lx\n", dev); 24228 sd_prev->sd_thr_req_next = sd_treq; 24229 } else { 24230 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24231 } 24232 } 24233 24234 /* 24235 * Create a kernel thread to do the reservation reclaim and free up this 24236 * thread. We cannot block this thread while we go away to do the 24237 * reservation reclaim 24238 */ 24239 if (sd_tr.srq_resv_reclaim_thread == NULL) 24240 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24241 sd_resv_reclaim_thread, NULL, 24242 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24243 24244 /* Tell the reservation reclaim thread that it has work to do */ 24245 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24246 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24247 } 24248 24249 /* 24250 * Function: sd_resv_reclaim_thread() 24251 * 24252 * Description: This function implements the reservation reclaim operations 24253 * 24254 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24255 * among multiple watches that share this callback function 24256 */ 24257 24258 static void 24259 sd_resv_reclaim_thread() 24260 { 24261 struct sd_lun *un; 24262 struct sd_thr_request *sd_mhreq; 24263 24264 /* Wait for work */ 24265 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24266 if (sd_tr.srq_thr_req_head == NULL) { 24267 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24268 &sd_tr.srq_resv_reclaim_mutex); 24269 } 24270 24271 /* Loop while we have work */ 24272 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24273 un = ddi_get_soft_state(sd_state, 24274 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24275 if (un == NULL) { 24276 /* 24277 * softstate structure is NULL so just 24278 * dequeue the request and continue 24279 */ 24280 sd_tr.srq_thr_req_head = 24281 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24282 kmem_free(sd_tr.srq_thr_cur_req, 24283 sizeof (struct sd_thr_request)); 24284 continue; 24285 } 24286 24287 /* dequeue the request */ 24288 sd_mhreq = sd_tr.srq_thr_cur_req; 24289 sd_tr.srq_thr_req_head = 24290 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24291 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24292 24293 /* 24294 * Reclaim reservation only if SD_RESERVE is still set. There 24295 * may have been a call to MHIOCRELEASE before we got here. 24296 */ 24297 mutex_enter(SD_MUTEX(un)); 24298 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24299 /* 24300 * Note: The SD_LOST_RESERVE flag is cleared before 24301 * reclaiming the reservation. If this is done after the 24302 * call to sd_reserve_release a reservation loss in the 24303 * window between pkt completion of reserve cmd and 24304 * mutex_enter below may not be recognized 24305 */ 24306 un->un_resvd_status &= ~SD_LOST_RESERVE; 24307 mutex_exit(SD_MUTEX(un)); 24308 24309 if (sd_reserve_release(sd_mhreq->dev, 24310 SD_RESERVE) == 0) { 24311 mutex_enter(SD_MUTEX(un)); 24312 un->un_resvd_status |= SD_RESERVE; 24313 mutex_exit(SD_MUTEX(un)); 24314 SD_INFO(SD_LOG_IOCTL_MHD, un, 24315 "sd_resv_reclaim_thread: " 24316 "Reservation Recovered\n"); 24317 } else { 24318 mutex_enter(SD_MUTEX(un)); 24319 un->un_resvd_status |= SD_LOST_RESERVE; 24320 mutex_exit(SD_MUTEX(un)); 24321 SD_INFO(SD_LOG_IOCTL_MHD, un, 24322 "sd_resv_reclaim_thread: Failed " 24323 "Reservation Recovery\n"); 24324 } 24325 } else { 24326 mutex_exit(SD_MUTEX(un)); 24327 } 24328 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24329 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24330 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24331 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24332 /* 24333 * wakeup the destroy thread if anyone is waiting on 24334 * us to complete. 24335 */ 24336 cv_signal(&sd_tr.srq_inprocess_cv); 24337 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24338 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24339 } 24340 24341 /* 24342 * cleanup the sd_tr structure now that this thread will not exist 24343 */ 24344 ASSERT(sd_tr.srq_thr_req_head == NULL); 24345 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24346 sd_tr.srq_resv_reclaim_thread = NULL; 24347 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24348 thread_exit(); 24349 } 24350 24351 24352 /* 24353 * Function: sd_rmv_resv_reclaim_req() 24354 * 24355 * Description: This function removes any pending reservation reclaim requests 24356 * for the specified device. 24357 * 24358 * Arguments: dev - the device 'dev_t' 24359 */ 24360 24361 static void 24362 sd_rmv_resv_reclaim_req(dev_t dev) 24363 { 24364 struct sd_thr_request *sd_mhreq; 24365 struct sd_thr_request *sd_prev; 24366 24367 /* Remove a reservation reclaim request from the list */ 24368 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24369 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24370 /* 24371 * We are attempting to reinstate reservation for 24372 * this device. We wait for sd_reserve_release() 24373 * to return before we return. 24374 */ 24375 cv_wait(&sd_tr.srq_inprocess_cv, 24376 &sd_tr.srq_resv_reclaim_mutex); 24377 } else { 24378 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24379 if (sd_mhreq && sd_mhreq->dev == dev) { 24380 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24381 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24382 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24383 return; 24384 } 24385 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24386 if (sd_mhreq && sd_mhreq->dev == dev) { 24387 break; 24388 } 24389 sd_prev = sd_mhreq; 24390 } 24391 if (sd_mhreq != NULL) { 24392 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24393 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24394 } 24395 } 24396 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24397 } 24398 24399 24400 /* 24401 * Function: sd_mhd_reset_notify_cb() 24402 * 24403 * Description: This is a call back function for scsi_reset_notify. This 24404 * function updates the softstate reserved status and logs the 24405 * reset. The driver scsi watch facility callback function 24406 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24407 * will reclaim the reservation. 24408 * 24409 * Arguments: arg - driver soft state (unit) structure 24410 */ 24411 24412 static void 24413 sd_mhd_reset_notify_cb(caddr_t arg) 24414 { 24415 struct sd_lun *un = (struct sd_lun *)arg; 24416 24417 mutex_enter(SD_MUTEX(un)); 24418 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24419 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24420 SD_INFO(SD_LOG_IOCTL_MHD, un, 24421 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24422 } 24423 mutex_exit(SD_MUTEX(un)); 24424 } 24425 24426 24427 /* 24428 * Function: sd_take_ownership() 24429 * 24430 * Description: This routine implements an algorithm to achieve a stable 24431 * reservation on disks which don't implement priority reserve, 24432 * and makes sure that other host lose re-reservation attempts. 24433 * This algorithm contains of a loop that keeps issuing the RESERVE 24434 * for some period of time (min_ownership_delay, default 6 seconds) 24435 * During that loop, it looks to see if there has been a bus device 24436 * reset or bus reset (both of which cause an existing reservation 24437 * to be lost). If the reservation is lost issue RESERVE until a 24438 * period of min_ownership_delay with no resets has gone by, or 24439 * until max_ownership_delay has expired. This loop ensures that 24440 * the host really did manage to reserve the device, in spite of 24441 * resets. The looping for min_ownership_delay (default six 24442 * seconds) is important to early generation clustering products, 24443 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24444 * MHIOCENFAILFAST periodic timer of two seconds. By having 24445 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24446 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24447 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24448 * have already noticed, via the MHIOCENFAILFAST polling, that it 24449 * no longer "owns" the disk and will have panicked itself. Thus, 24450 * the host issuing the MHIOCTKOWN is assured (with timing 24451 * dependencies) that by the time it actually starts to use the 24452 * disk for real work, the old owner is no longer accessing it. 24453 * 24454 * min_ownership_delay is the minimum amount of time for which the 24455 * disk must be reserved continuously devoid of resets before the 24456 * MHIOCTKOWN ioctl will return success. 24457 * 24458 * max_ownership_delay indicates the amount of time by which the 24459 * take ownership should succeed or timeout with an error. 24460 * 24461 * Arguments: dev - the device 'dev_t' 24462 * *p - struct containing timing info. 24463 * 24464 * Return Code: 0 for success or error code 24465 */ 24466 24467 static int 24468 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24469 { 24470 struct sd_lun *un; 24471 int rval; 24472 int err; 24473 int reservation_count = 0; 24474 int min_ownership_delay = 6000000; /* in usec */ 24475 int max_ownership_delay = 30000000; /* in usec */ 24476 clock_t start_time; /* starting time of this algorithm */ 24477 clock_t end_time; /* time limit for giving up */ 24478 clock_t ownership_time; /* time limit for stable ownership */ 24479 clock_t current_time; 24480 clock_t previous_current_time; 24481 24482 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24483 return (ENXIO); 24484 } 24485 24486 /* 24487 * Attempt a device reservation. A priority reservation is requested. 24488 */ 24489 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24490 != SD_SUCCESS) { 24491 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24492 "sd_take_ownership: return(1)=%d\n", rval); 24493 return (rval); 24494 } 24495 24496 /* Update the softstate reserved status to indicate the reservation */ 24497 mutex_enter(SD_MUTEX(un)); 24498 un->un_resvd_status |= SD_RESERVE; 24499 un->un_resvd_status &= 24500 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24501 mutex_exit(SD_MUTEX(un)); 24502 24503 if (p != NULL) { 24504 if (p->min_ownership_delay != 0) { 24505 min_ownership_delay = p->min_ownership_delay * 1000; 24506 } 24507 if (p->max_ownership_delay != 0) { 24508 max_ownership_delay = p->max_ownership_delay * 1000; 24509 } 24510 } 24511 SD_INFO(SD_LOG_IOCTL_MHD, un, 24512 "sd_take_ownership: min, max delays: %d, %d\n", 24513 min_ownership_delay, max_ownership_delay); 24514 24515 start_time = ddi_get_lbolt(); 24516 current_time = start_time; 24517 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24518 end_time = start_time + drv_usectohz(max_ownership_delay); 24519 24520 while (current_time - end_time < 0) { 24521 delay(drv_usectohz(500000)); 24522 24523 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24524 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24525 mutex_enter(SD_MUTEX(un)); 24526 rval = (un->un_resvd_status & 24527 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24528 mutex_exit(SD_MUTEX(un)); 24529 break; 24530 } 24531 } 24532 previous_current_time = current_time; 24533 current_time = ddi_get_lbolt(); 24534 mutex_enter(SD_MUTEX(un)); 24535 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24536 ownership_time = ddi_get_lbolt() + 24537 drv_usectohz(min_ownership_delay); 24538 reservation_count = 0; 24539 } else { 24540 reservation_count++; 24541 } 24542 un->un_resvd_status |= SD_RESERVE; 24543 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24544 mutex_exit(SD_MUTEX(un)); 24545 24546 SD_INFO(SD_LOG_IOCTL_MHD, un, 24547 "sd_take_ownership: ticks for loop iteration=%ld, " 24548 "reservation=%s\n", (current_time - previous_current_time), 24549 reservation_count ? "ok" : "reclaimed"); 24550 24551 if (current_time - ownership_time >= 0 && 24552 reservation_count >= 4) { 24553 rval = 0; /* Achieved a stable ownership */ 24554 break; 24555 } 24556 if (current_time - end_time >= 0) { 24557 rval = EACCES; /* No ownership in max possible time */ 24558 break; 24559 } 24560 } 24561 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24562 "sd_take_ownership: return(2)=%d\n", rval); 24563 return (rval); 24564 } 24565 24566 24567 /* 24568 * Function: sd_reserve_release() 24569 * 24570 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24571 * PRIORITY RESERVE commands based on a user specified command type 24572 * 24573 * Arguments: dev - the device 'dev_t' 24574 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24575 * SD_RESERVE, SD_RELEASE 24576 * 24577 * Return Code: 0 or Error Code 24578 */ 24579 24580 static int 24581 sd_reserve_release(dev_t dev, int cmd) 24582 { 24583 struct uscsi_cmd *com = NULL; 24584 struct sd_lun *un = NULL; 24585 char cdb[CDB_GROUP0]; 24586 int rval; 24587 24588 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24589 (cmd == SD_PRIORITY_RESERVE)); 24590 24591 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24592 return (ENXIO); 24593 } 24594 24595 /* instantiate and initialize the command and cdb */ 24596 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24597 bzero(cdb, CDB_GROUP0); 24598 com->uscsi_flags = USCSI_SILENT; 24599 com->uscsi_timeout = un->un_reserve_release_time; 24600 com->uscsi_cdblen = CDB_GROUP0; 24601 com->uscsi_cdb = cdb; 24602 if (cmd == SD_RELEASE) { 24603 cdb[0] = SCMD_RELEASE; 24604 } else { 24605 cdb[0] = SCMD_RESERVE; 24606 } 24607 24608 /* Send the command. */ 24609 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24610 SD_PATH_STANDARD); 24611 24612 /* 24613 * "break" a reservation that is held by another host, by issuing a 24614 * reset if priority reserve is desired, and we could not get the 24615 * device. 24616 */ 24617 if ((cmd == SD_PRIORITY_RESERVE) && 24618 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24619 /* 24620 * First try to reset the LUN. If we cannot, then try a target 24621 * reset, followed by a bus reset if the target reset fails. 24622 */ 24623 int reset_retval = 0; 24624 if (un->un_f_lun_reset_enabled == TRUE) { 24625 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24626 } 24627 if (reset_retval == 0) { 24628 /* The LUN reset either failed or was not issued */ 24629 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24630 } 24631 if ((reset_retval == 0) && 24632 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24633 rval = EIO; 24634 kmem_free(com, sizeof (*com)); 24635 return (rval); 24636 } 24637 24638 bzero(com, sizeof (struct uscsi_cmd)); 24639 com->uscsi_flags = USCSI_SILENT; 24640 com->uscsi_cdb = cdb; 24641 com->uscsi_cdblen = CDB_GROUP0; 24642 com->uscsi_timeout = 5; 24643 24644 /* 24645 * Reissue the last reserve command, this time without request 24646 * sense. Assume that it is just a regular reserve command. 24647 */ 24648 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24649 SD_PATH_STANDARD); 24650 } 24651 24652 /* Return an error if still getting a reservation conflict. */ 24653 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24654 rval = EACCES; 24655 } 24656 24657 kmem_free(com, sizeof (*com)); 24658 return (rval); 24659 } 24660 24661 24662 #define SD_NDUMP_RETRIES 12 24663 /* 24664 * System Crash Dump routine 24665 */ 24666 24667 static int 24668 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24669 { 24670 int instance; 24671 int partition; 24672 int i; 24673 int err; 24674 struct sd_lun *un; 24675 struct scsi_pkt *wr_pktp; 24676 struct buf *wr_bp; 24677 struct buf wr_buf; 24678 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24679 daddr_t tgt_blkno; /* rmw - blkno for target */ 24680 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24681 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24682 size_t io_start_offset; 24683 int doing_rmw = FALSE; 24684 int rval; 24685 ssize_t dma_resid; 24686 daddr_t oblkno; 24687 diskaddr_t nblks = 0; 24688 diskaddr_t start_block; 24689 24690 instance = SDUNIT(dev); 24691 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24692 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24693 return (ENXIO); 24694 } 24695 24696 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24697 24698 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24699 24700 partition = SDPART(dev); 24701 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24702 24703 /* Validate blocks to dump at against partition size. */ 24704 24705 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24706 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24707 24708 if ((blkno + nblk) > nblks) { 24709 SD_TRACE(SD_LOG_DUMP, un, 24710 "sddump: dump range larger than partition: " 24711 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24712 blkno, nblk, nblks); 24713 return (EINVAL); 24714 } 24715 24716 mutex_enter(&un->un_pm_mutex); 24717 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24718 struct scsi_pkt *start_pktp; 24719 24720 mutex_exit(&un->un_pm_mutex); 24721 24722 /* 24723 * use pm framework to power on HBA 1st 24724 */ 24725 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24726 24727 /* 24728 * Dump no long uses sdpower to power on a device, it's 24729 * in-line here so it can be done in polled mode. 24730 */ 24731 24732 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24733 24734 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24735 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24736 24737 if (start_pktp == NULL) { 24738 /* We were not given a SCSI packet, fail. */ 24739 return (EIO); 24740 } 24741 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24742 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24743 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24744 start_pktp->pkt_flags = FLAG_NOINTR; 24745 24746 mutex_enter(SD_MUTEX(un)); 24747 SD_FILL_SCSI1_LUN(un, start_pktp); 24748 mutex_exit(SD_MUTEX(un)); 24749 /* 24750 * Scsi_poll returns 0 (success) if the command completes and 24751 * the status block is STATUS_GOOD. 24752 */ 24753 if (sd_scsi_poll(un, start_pktp) != 0) { 24754 scsi_destroy_pkt(start_pktp); 24755 return (EIO); 24756 } 24757 scsi_destroy_pkt(start_pktp); 24758 (void) sd_ddi_pm_resume(un); 24759 } else { 24760 mutex_exit(&un->un_pm_mutex); 24761 } 24762 24763 mutex_enter(SD_MUTEX(un)); 24764 un->un_throttle = 0; 24765 24766 /* 24767 * The first time through, reset the specific target device. 24768 * However, when cpr calls sddump we know that sd is in a 24769 * a good state so no bus reset is required. 24770 * Clear sense data via Request Sense cmd. 24771 * In sddump we don't care about allow_bus_device_reset anymore 24772 */ 24773 24774 if ((un->un_state != SD_STATE_SUSPENDED) && 24775 (un->un_state != SD_STATE_DUMPING)) { 24776 24777 New_state(un, SD_STATE_DUMPING); 24778 24779 if (un->un_f_is_fibre == FALSE) { 24780 mutex_exit(SD_MUTEX(un)); 24781 /* 24782 * Attempt a bus reset for parallel scsi. 24783 * 24784 * Note: A bus reset is required because on some host 24785 * systems (i.e. E420R) a bus device reset is 24786 * insufficient to reset the state of the target. 24787 * 24788 * Note: Don't issue the reset for fibre-channel, 24789 * because this tends to hang the bus (loop) for 24790 * too long while everyone is logging out and in 24791 * and the deadman timer for dumping will fire 24792 * before the dump is complete. 24793 */ 24794 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24795 mutex_enter(SD_MUTEX(un)); 24796 Restore_state(un); 24797 mutex_exit(SD_MUTEX(un)); 24798 return (EIO); 24799 } 24800 24801 /* Delay to give the device some recovery time. */ 24802 drv_usecwait(10000); 24803 24804 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24805 SD_INFO(SD_LOG_DUMP, un, 24806 "sddump: sd_send_polled_RQS failed\n"); 24807 } 24808 mutex_enter(SD_MUTEX(un)); 24809 } 24810 } 24811 24812 /* 24813 * Convert the partition-relative block number to a 24814 * disk physical block number. 24815 */ 24816 blkno += start_block; 24817 24818 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24819 24820 24821 /* 24822 * Check if the device has a non-512 block size. 24823 */ 24824 wr_bp = NULL; 24825 if (NOT_DEVBSIZE(un)) { 24826 tgt_byte_offset = blkno * un->un_sys_blocksize; 24827 tgt_byte_count = nblk * un->un_sys_blocksize; 24828 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24829 (tgt_byte_count % un->un_tgt_blocksize)) { 24830 doing_rmw = TRUE; 24831 /* 24832 * Calculate the block number and number of block 24833 * in terms of the media block size. 24834 */ 24835 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24836 tgt_nblk = 24837 ((tgt_byte_offset + tgt_byte_count + 24838 (un->un_tgt_blocksize - 1)) / 24839 un->un_tgt_blocksize) - tgt_blkno; 24840 24841 /* 24842 * Invoke the routine which is going to do read part 24843 * of read-modify-write. 24844 * Note that this routine returns a pointer to 24845 * a valid bp in wr_bp. 24846 */ 24847 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24848 &wr_bp); 24849 if (err) { 24850 mutex_exit(SD_MUTEX(un)); 24851 return (err); 24852 } 24853 /* 24854 * Offset is being calculated as - 24855 * (original block # * system block size) - 24856 * (new block # * target block size) 24857 */ 24858 io_start_offset = 24859 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24860 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24861 24862 ASSERT((io_start_offset >= 0) && 24863 (io_start_offset < un->un_tgt_blocksize)); 24864 /* 24865 * Do the modify portion of read modify write. 24866 */ 24867 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24868 (size_t)nblk * un->un_sys_blocksize); 24869 } else { 24870 doing_rmw = FALSE; 24871 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24872 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24873 } 24874 24875 /* Convert blkno and nblk to target blocks */ 24876 blkno = tgt_blkno; 24877 nblk = tgt_nblk; 24878 } else { 24879 wr_bp = &wr_buf; 24880 bzero(wr_bp, sizeof (struct buf)); 24881 wr_bp->b_flags = B_BUSY; 24882 wr_bp->b_un.b_addr = addr; 24883 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24884 wr_bp->b_resid = 0; 24885 } 24886 24887 mutex_exit(SD_MUTEX(un)); 24888 24889 /* 24890 * Obtain a SCSI packet for the write command. 24891 * It should be safe to call the allocator here without 24892 * worrying about being locked for DVMA mapping because 24893 * the address we're passed is already a DVMA mapping 24894 * 24895 * We are also not going to worry about semaphore ownership 24896 * in the dump buffer. Dumping is single threaded at present. 24897 */ 24898 24899 wr_pktp = NULL; 24900 24901 dma_resid = wr_bp->b_bcount; 24902 oblkno = blkno; 24903 24904 while (dma_resid != 0) { 24905 24906 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24907 wr_bp->b_flags &= ~B_ERROR; 24908 24909 if (un->un_partial_dma_supported == 1) { 24910 blkno = oblkno + 24911 ((wr_bp->b_bcount - dma_resid) / 24912 un->un_tgt_blocksize); 24913 nblk = dma_resid / un->un_tgt_blocksize; 24914 24915 if (wr_pktp) { 24916 /* 24917 * Partial DMA transfers after initial transfer 24918 */ 24919 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24920 blkno, nblk); 24921 } else { 24922 /* Initial transfer */ 24923 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24924 un->un_pkt_flags, NULL_FUNC, NULL, 24925 blkno, nblk); 24926 } 24927 } else { 24928 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24929 0, NULL_FUNC, NULL, blkno, nblk); 24930 } 24931 24932 if (rval == 0) { 24933 /* We were given a SCSI packet, continue. */ 24934 break; 24935 } 24936 24937 if (i == 0) { 24938 if (wr_bp->b_flags & B_ERROR) { 24939 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24940 "no resources for dumping; " 24941 "error code: 0x%x, retrying", 24942 geterror(wr_bp)); 24943 } else { 24944 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24945 "no resources for dumping; retrying"); 24946 } 24947 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24948 if (wr_bp->b_flags & B_ERROR) { 24949 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24950 "no resources for dumping; error code: " 24951 "0x%x, retrying\n", geterror(wr_bp)); 24952 } 24953 } else { 24954 if (wr_bp->b_flags & B_ERROR) { 24955 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24956 "no resources for dumping; " 24957 "error code: 0x%x, retries failed, " 24958 "giving up.\n", geterror(wr_bp)); 24959 } else { 24960 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24961 "no resources for dumping; " 24962 "retries failed, giving up.\n"); 24963 } 24964 mutex_enter(SD_MUTEX(un)); 24965 Restore_state(un); 24966 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24967 mutex_exit(SD_MUTEX(un)); 24968 scsi_free_consistent_buf(wr_bp); 24969 } else { 24970 mutex_exit(SD_MUTEX(un)); 24971 } 24972 return (EIO); 24973 } 24974 drv_usecwait(10000); 24975 } 24976 24977 if (un->un_partial_dma_supported == 1) { 24978 /* 24979 * save the resid from PARTIAL_DMA 24980 */ 24981 dma_resid = wr_pktp->pkt_resid; 24982 if (dma_resid != 0) 24983 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24984 wr_pktp->pkt_resid = 0; 24985 } else { 24986 dma_resid = 0; 24987 } 24988 24989 /* SunBug 1222170 */ 24990 wr_pktp->pkt_flags = FLAG_NOINTR; 24991 24992 err = EIO; 24993 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24994 24995 /* 24996 * Scsi_poll returns 0 (success) if the command completes and 24997 * the status block is STATUS_GOOD. We should only check 24998 * errors if this condition is not true. Even then we should 24999 * send our own request sense packet only if we have a check 25000 * condition and auto request sense has not been performed by 25001 * the hba. 25002 */ 25003 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25004 25005 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25006 (wr_pktp->pkt_resid == 0)) { 25007 err = SD_SUCCESS; 25008 break; 25009 } 25010 25011 /* 25012 * Check CMD_DEV_GONE 1st, give up if device is gone. 25013 */ 25014 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25015 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25016 "Error while dumping state...Device is gone\n"); 25017 break; 25018 } 25019 25020 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25021 SD_INFO(SD_LOG_DUMP, un, 25022 "sddump: write failed with CHECK, try # %d\n", i); 25023 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25024 (void) sd_send_polled_RQS(un); 25025 } 25026 25027 continue; 25028 } 25029 25030 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25031 int reset_retval = 0; 25032 25033 SD_INFO(SD_LOG_DUMP, un, 25034 "sddump: write failed with BUSY, try # %d\n", i); 25035 25036 if (un->un_f_lun_reset_enabled == TRUE) { 25037 reset_retval = scsi_reset(SD_ADDRESS(un), 25038 RESET_LUN); 25039 } 25040 if (reset_retval == 0) { 25041 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25042 } 25043 (void) sd_send_polled_RQS(un); 25044 25045 } else { 25046 SD_INFO(SD_LOG_DUMP, un, 25047 "sddump: write failed with 0x%x, try # %d\n", 25048 SD_GET_PKT_STATUS(wr_pktp), i); 25049 mutex_enter(SD_MUTEX(un)); 25050 sd_reset_target(un, wr_pktp); 25051 mutex_exit(SD_MUTEX(un)); 25052 } 25053 25054 /* 25055 * If we are not getting anywhere with lun/target resets, 25056 * let's reset the bus. 25057 */ 25058 if (i == SD_NDUMP_RETRIES/2) { 25059 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25060 (void) sd_send_polled_RQS(un); 25061 } 25062 } 25063 } 25064 25065 scsi_destroy_pkt(wr_pktp); 25066 mutex_enter(SD_MUTEX(un)); 25067 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25068 mutex_exit(SD_MUTEX(un)); 25069 scsi_free_consistent_buf(wr_bp); 25070 } else { 25071 mutex_exit(SD_MUTEX(un)); 25072 } 25073 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25074 return (err); 25075 } 25076 25077 /* 25078 * Function: sd_scsi_poll() 25079 * 25080 * Description: This is a wrapper for the scsi_poll call. 25081 * 25082 * Arguments: sd_lun - The unit structure 25083 * scsi_pkt - The scsi packet being sent to the device. 25084 * 25085 * Return Code: 0 - Command completed successfully with good status 25086 * -1 - Command failed. This could indicate a check condition 25087 * or other status value requiring recovery action. 25088 * 25089 * NOTE: This code is only called off sddump(). 25090 */ 25091 25092 static int 25093 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25094 { 25095 int status; 25096 25097 ASSERT(un != NULL); 25098 ASSERT(!mutex_owned(SD_MUTEX(un))); 25099 ASSERT(pktp != NULL); 25100 25101 status = SD_SUCCESS; 25102 25103 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25104 pktp->pkt_flags |= un->un_tagflags; 25105 pktp->pkt_flags &= ~FLAG_NODISCON; 25106 } 25107 25108 status = sd_ddi_scsi_poll(pktp); 25109 /* 25110 * Scsi_poll returns 0 (success) if the command completes and the 25111 * status block is STATUS_GOOD. We should only check errors if this 25112 * condition is not true. Even then we should send our own request 25113 * sense packet only if we have a check condition and auto 25114 * request sense has not been performed by the hba. 25115 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25116 */ 25117 if ((status != SD_SUCCESS) && 25118 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25119 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25120 (pktp->pkt_reason != CMD_DEV_GONE)) 25121 (void) sd_send_polled_RQS(un); 25122 25123 return (status); 25124 } 25125 25126 /* 25127 * Function: sd_send_polled_RQS() 25128 * 25129 * Description: This sends the request sense command to a device. 25130 * 25131 * Arguments: sd_lun - The unit structure 25132 * 25133 * Return Code: 0 - Command completed successfully with good status 25134 * -1 - Command failed. 25135 * 25136 */ 25137 25138 static int 25139 sd_send_polled_RQS(struct sd_lun *un) 25140 { 25141 int ret_val; 25142 struct scsi_pkt *rqs_pktp; 25143 struct buf *rqs_bp; 25144 25145 ASSERT(un != NULL); 25146 ASSERT(!mutex_owned(SD_MUTEX(un))); 25147 25148 ret_val = SD_SUCCESS; 25149 25150 rqs_pktp = un->un_rqs_pktp; 25151 rqs_bp = un->un_rqs_bp; 25152 25153 mutex_enter(SD_MUTEX(un)); 25154 25155 if (un->un_sense_isbusy) { 25156 ret_val = SD_FAILURE; 25157 mutex_exit(SD_MUTEX(un)); 25158 return (ret_val); 25159 } 25160 25161 /* 25162 * If the request sense buffer (and packet) is not in use, 25163 * let's set the un_sense_isbusy and send our packet 25164 */ 25165 un->un_sense_isbusy = 1; 25166 rqs_pktp->pkt_resid = 0; 25167 rqs_pktp->pkt_reason = 0; 25168 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25169 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25170 25171 mutex_exit(SD_MUTEX(un)); 25172 25173 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25174 " 0x%p\n", rqs_bp->b_un.b_addr); 25175 25176 /* 25177 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25178 * axle - it has a call into us! 25179 */ 25180 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25181 SD_INFO(SD_LOG_COMMON, un, 25182 "sd_send_polled_RQS: RQS failed\n"); 25183 } 25184 25185 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25186 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25187 25188 mutex_enter(SD_MUTEX(un)); 25189 un->un_sense_isbusy = 0; 25190 mutex_exit(SD_MUTEX(un)); 25191 25192 return (ret_val); 25193 } 25194 25195 /* 25196 * Defines needed for localized version of the scsi_poll routine. 25197 */ 25198 #define CSEC 10000 /* usecs */ 25199 #define SEC_TO_CSEC (1000000/CSEC) 25200 25201 /* 25202 * Function: sd_ddi_scsi_poll() 25203 * 25204 * Description: Localized version of the scsi_poll routine. The purpose is to 25205 * send a scsi_pkt to a device as a polled command. This version 25206 * is to ensure more robust handling of transport errors. 25207 * Specifically this routine cures not ready, coming ready 25208 * transition for power up and reset of sonoma's. This can take 25209 * up to 45 seconds for power-on and 20 seconds for reset of a 25210 * sonoma lun. 25211 * 25212 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25213 * 25214 * Return Code: 0 - Command completed successfully with good status 25215 * -1 - Command failed. 25216 * 25217 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25218 * be fixed (removing this code), we need to determine how to handle the 25219 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25220 * 25221 * NOTE: This code is only called off sddump(). 25222 */ 25223 static int 25224 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25225 { 25226 int rval = -1; 25227 int savef; 25228 long savet; 25229 void (*savec)(); 25230 int timeout; 25231 int busy_count; 25232 int poll_delay; 25233 int rc; 25234 uint8_t *sensep; 25235 struct scsi_arq_status *arqstat; 25236 extern int do_polled_io; 25237 25238 ASSERT(pkt->pkt_scbp); 25239 25240 /* 25241 * save old flags.. 25242 */ 25243 savef = pkt->pkt_flags; 25244 savec = pkt->pkt_comp; 25245 savet = pkt->pkt_time; 25246 25247 pkt->pkt_flags |= FLAG_NOINTR; 25248 25249 /* 25250 * XXX there is nothing in the SCSA spec that states that we should not 25251 * do a callback for polled cmds; however, removing this will break sd 25252 * and probably other target drivers 25253 */ 25254 pkt->pkt_comp = NULL; 25255 25256 /* 25257 * we don't like a polled command without timeout. 25258 * 60 seconds seems long enough. 25259 */ 25260 if (pkt->pkt_time == 0) 25261 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25262 25263 /* 25264 * Send polled cmd. 25265 * 25266 * We do some error recovery for various errors. Tran_busy, 25267 * queue full, and non-dispatched commands are retried every 10 msec. 25268 * as they are typically transient failures. Busy status and Not 25269 * Ready are retried every second as this status takes a while to 25270 * change. 25271 */ 25272 timeout = pkt->pkt_time * SEC_TO_CSEC; 25273 25274 for (busy_count = 0; busy_count < timeout; busy_count++) { 25275 /* 25276 * Initialize pkt status variables. 25277 */ 25278 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25279 25280 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25281 if (rc != TRAN_BUSY) { 25282 /* Transport failed - give up. */ 25283 break; 25284 } else { 25285 /* Transport busy - try again. */ 25286 poll_delay = 1 * CSEC; /* 10 msec. */ 25287 } 25288 } else { 25289 /* 25290 * Transport accepted - check pkt status. 25291 */ 25292 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25293 if ((pkt->pkt_reason == CMD_CMPLT) && 25294 (rc == STATUS_CHECK) && 25295 (pkt->pkt_state & STATE_ARQ_DONE)) { 25296 arqstat = 25297 (struct scsi_arq_status *)(pkt->pkt_scbp); 25298 sensep = (uint8_t *)&arqstat->sts_sensedata; 25299 } else { 25300 sensep = NULL; 25301 } 25302 25303 if ((pkt->pkt_reason == CMD_CMPLT) && 25304 (rc == STATUS_GOOD)) { 25305 /* No error - we're done */ 25306 rval = 0; 25307 break; 25308 25309 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25310 /* Lost connection - give up */ 25311 break; 25312 25313 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25314 (pkt->pkt_state == 0)) { 25315 /* Pkt not dispatched - try again. */ 25316 poll_delay = 1 * CSEC; /* 10 msec. */ 25317 25318 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25319 (rc == STATUS_QFULL)) { 25320 /* Queue full - try again. */ 25321 poll_delay = 1 * CSEC; /* 10 msec. */ 25322 25323 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25324 (rc == STATUS_BUSY)) { 25325 /* Busy - try again. */ 25326 poll_delay = 100 * CSEC; /* 1 sec. */ 25327 busy_count += (SEC_TO_CSEC - 1); 25328 25329 } else if ((sensep != NULL) && 25330 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25331 /* 25332 * Unit Attention - try again. 25333 * Pretend it took 1 sec. 25334 * NOTE: 'continue' avoids poll_delay 25335 */ 25336 busy_count += (SEC_TO_CSEC - 1); 25337 continue; 25338 25339 } else if ((sensep != NULL) && 25340 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25341 (scsi_sense_asc(sensep) == 0x04) && 25342 (scsi_sense_ascq(sensep) == 0x01)) { 25343 /* 25344 * Not ready -> ready - try again. 25345 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25346 * ...same as STATUS_BUSY 25347 */ 25348 poll_delay = 100 * CSEC; /* 1 sec. */ 25349 busy_count += (SEC_TO_CSEC - 1); 25350 25351 } else { 25352 /* BAD status - give up. */ 25353 break; 25354 } 25355 } 25356 25357 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25358 !do_polled_io) { 25359 delay(drv_usectohz(poll_delay)); 25360 } else { 25361 /* we busy wait during cpr_dump or interrupt threads */ 25362 drv_usecwait(poll_delay); 25363 } 25364 } 25365 25366 pkt->pkt_flags = savef; 25367 pkt->pkt_comp = savec; 25368 pkt->pkt_time = savet; 25369 25370 /* return on error */ 25371 if (rval) 25372 return (rval); 25373 25374 /* 25375 * This is not a performance critical code path. 25376 * 25377 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25378 * issues associated with looking at DMA memory prior to 25379 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25380 */ 25381 scsi_sync_pkt(pkt); 25382 return (0); 25383 } 25384 25385 25386 25387 /* 25388 * Function: sd_persistent_reservation_in_read_keys 25389 * 25390 * Description: This routine is the driver entry point for handling CD-ROM 25391 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25392 * by sending the SCSI-3 PRIN commands to the device. 25393 * Processes the read keys command response by copying the 25394 * reservation key information into the user provided buffer. 25395 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25396 * 25397 * Arguments: un - Pointer to soft state struct for the target. 25398 * usrp - user provided pointer to multihost Persistent In Read 25399 * Keys structure (mhioc_inkeys_t) 25400 * flag - this argument is a pass through to ddi_copyxxx() 25401 * directly from the mode argument of ioctl(). 25402 * 25403 * Return Code: 0 - Success 25404 * EACCES 25405 * ENOTSUP 25406 * errno return code from sd_send_scsi_cmd() 25407 * 25408 * Context: Can sleep. Does not return until command is completed. 25409 */ 25410 25411 static int 25412 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25413 mhioc_inkeys_t *usrp, int flag) 25414 { 25415 #ifdef _MULTI_DATAMODEL 25416 struct mhioc_key_list32 li32; 25417 #endif 25418 sd_prin_readkeys_t *in; 25419 mhioc_inkeys_t *ptr; 25420 mhioc_key_list_t li; 25421 uchar_t *data_bufp; 25422 int data_len; 25423 int rval = 0; 25424 size_t copysz; 25425 sd_ssc_t *ssc; 25426 25427 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25428 return (EINVAL); 25429 } 25430 bzero(&li, sizeof (mhioc_key_list_t)); 25431 25432 ssc = sd_ssc_init(un); 25433 25434 /* 25435 * Get the listsize from user 25436 */ 25437 #ifdef _MULTI_DATAMODEL 25438 25439 switch (ddi_model_convert_from(flag & FMODELS)) { 25440 case DDI_MODEL_ILP32: 25441 copysz = sizeof (struct mhioc_key_list32); 25442 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25443 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25444 "sd_persistent_reservation_in_read_keys: " 25445 "failed ddi_copyin: mhioc_key_list32_t\n"); 25446 rval = EFAULT; 25447 goto done; 25448 } 25449 li.listsize = li32.listsize; 25450 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25451 break; 25452 25453 case DDI_MODEL_NONE: 25454 copysz = sizeof (mhioc_key_list_t); 25455 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25456 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25457 "sd_persistent_reservation_in_read_keys: " 25458 "failed ddi_copyin: mhioc_key_list_t\n"); 25459 rval = EFAULT; 25460 goto done; 25461 } 25462 break; 25463 } 25464 25465 #else /* ! _MULTI_DATAMODEL */ 25466 copysz = sizeof (mhioc_key_list_t); 25467 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25468 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25469 "sd_persistent_reservation_in_read_keys: " 25470 "failed ddi_copyin: mhioc_key_list_t\n"); 25471 rval = EFAULT; 25472 goto done; 25473 } 25474 #endif 25475 25476 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25477 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25478 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25479 25480 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25481 data_len, data_bufp); 25482 if (rval != 0) { 25483 if (rval == EIO) 25484 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25485 else 25486 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25487 goto done; 25488 } 25489 in = (sd_prin_readkeys_t *)data_bufp; 25490 ptr->generation = BE_32(in->generation); 25491 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25492 25493 /* 25494 * Return the min(listsize, listlen) keys 25495 */ 25496 #ifdef _MULTI_DATAMODEL 25497 25498 switch (ddi_model_convert_from(flag & FMODELS)) { 25499 case DDI_MODEL_ILP32: 25500 li32.listlen = li.listlen; 25501 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25502 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25503 "sd_persistent_reservation_in_read_keys: " 25504 "failed ddi_copyout: mhioc_key_list32_t\n"); 25505 rval = EFAULT; 25506 goto done; 25507 } 25508 break; 25509 25510 case DDI_MODEL_NONE: 25511 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25512 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25513 "sd_persistent_reservation_in_read_keys: " 25514 "failed ddi_copyout: mhioc_key_list_t\n"); 25515 rval = EFAULT; 25516 goto done; 25517 } 25518 break; 25519 } 25520 25521 #else /* ! _MULTI_DATAMODEL */ 25522 25523 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25524 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25525 "sd_persistent_reservation_in_read_keys: " 25526 "failed ddi_copyout: mhioc_key_list_t\n"); 25527 rval = EFAULT; 25528 goto done; 25529 } 25530 25531 #endif /* _MULTI_DATAMODEL */ 25532 25533 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25534 li.listsize * MHIOC_RESV_KEY_SIZE); 25535 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25536 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25537 "sd_persistent_reservation_in_read_keys: " 25538 "failed ddi_copyout: keylist\n"); 25539 rval = EFAULT; 25540 } 25541 done: 25542 sd_ssc_fini(ssc); 25543 kmem_free(data_bufp, data_len); 25544 return (rval); 25545 } 25546 25547 25548 /* 25549 * Function: sd_persistent_reservation_in_read_resv 25550 * 25551 * Description: This routine is the driver entry point for handling CD-ROM 25552 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25553 * by sending the SCSI-3 PRIN commands to the device. 25554 * Process the read persistent reservations command response by 25555 * copying the reservation information into the user provided 25556 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25557 * 25558 * Arguments: un - Pointer to soft state struct for the target. 25559 * usrp - user provided pointer to multihost Persistent In Read 25560 * Keys structure (mhioc_inkeys_t) 25561 * flag - this argument is a pass through to ddi_copyxxx() 25562 * directly from the mode argument of ioctl(). 25563 * 25564 * Return Code: 0 - Success 25565 * EACCES 25566 * ENOTSUP 25567 * errno return code from sd_send_scsi_cmd() 25568 * 25569 * Context: Can sleep. Does not return until command is completed. 25570 */ 25571 25572 static int 25573 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25574 mhioc_inresvs_t *usrp, int flag) 25575 { 25576 #ifdef _MULTI_DATAMODEL 25577 struct mhioc_resv_desc_list32 resvlist32; 25578 #endif 25579 sd_prin_readresv_t *in; 25580 mhioc_inresvs_t *ptr; 25581 sd_readresv_desc_t *readresv_ptr; 25582 mhioc_resv_desc_list_t resvlist; 25583 mhioc_resv_desc_t resvdesc; 25584 uchar_t *data_bufp = NULL; 25585 int data_len; 25586 int rval = 0; 25587 int i; 25588 size_t copysz; 25589 mhioc_resv_desc_t *bufp; 25590 sd_ssc_t *ssc; 25591 25592 if ((ptr = usrp) == NULL) { 25593 return (EINVAL); 25594 } 25595 25596 ssc = sd_ssc_init(un); 25597 25598 /* 25599 * Get the listsize from user 25600 */ 25601 #ifdef _MULTI_DATAMODEL 25602 switch (ddi_model_convert_from(flag & FMODELS)) { 25603 case DDI_MODEL_ILP32: 25604 copysz = sizeof (struct mhioc_resv_desc_list32); 25605 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25606 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25607 "sd_persistent_reservation_in_read_resv: " 25608 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25609 rval = EFAULT; 25610 goto done; 25611 } 25612 resvlist.listsize = resvlist32.listsize; 25613 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25614 break; 25615 25616 case DDI_MODEL_NONE: 25617 copysz = sizeof (mhioc_resv_desc_list_t); 25618 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25619 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25620 "sd_persistent_reservation_in_read_resv: " 25621 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25622 rval = EFAULT; 25623 goto done; 25624 } 25625 break; 25626 } 25627 #else /* ! _MULTI_DATAMODEL */ 25628 copysz = sizeof (mhioc_resv_desc_list_t); 25629 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25630 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25631 "sd_persistent_reservation_in_read_resv: " 25632 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25633 rval = EFAULT; 25634 goto done; 25635 } 25636 #endif /* ! _MULTI_DATAMODEL */ 25637 25638 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25639 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25640 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25641 25642 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25643 data_len, data_bufp); 25644 if (rval != 0) { 25645 if (rval == EIO) 25646 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25647 else 25648 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25649 goto done; 25650 } 25651 in = (sd_prin_readresv_t *)data_bufp; 25652 ptr->generation = BE_32(in->generation); 25653 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25654 25655 /* 25656 * Return the min(listsize, listlen( keys 25657 */ 25658 #ifdef _MULTI_DATAMODEL 25659 25660 switch (ddi_model_convert_from(flag & FMODELS)) { 25661 case DDI_MODEL_ILP32: 25662 resvlist32.listlen = resvlist.listlen; 25663 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25664 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25665 "sd_persistent_reservation_in_read_resv: " 25666 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25667 rval = EFAULT; 25668 goto done; 25669 } 25670 break; 25671 25672 case DDI_MODEL_NONE: 25673 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25674 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25675 "sd_persistent_reservation_in_read_resv: " 25676 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25677 rval = EFAULT; 25678 goto done; 25679 } 25680 break; 25681 } 25682 25683 #else /* ! _MULTI_DATAMODEL */ 25684 25685 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25686 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25687 "sd_persistent_reservation_in_read_resv: " 25688 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25689 rval = EFAULT; 25690 goto done; 25691 } 25692 25693 #endif /* ! _MULTI_DATAMODEL */ 25694 25695 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25696 bufp = resvlist.list; 25697 copysz = sizeof (mhioc_resv_desc_t); 25698 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25699 i++, readresv_ptr++, bufp++) { 25700 25701 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25702 MHIOC_RESV_KEY_SIZE); 25703 resvdesc.type = readresv_ptr->type; 25704 resvdesc.scope = readresv_ptr->scope; 25705 resvdesc.scope_specific_addr = 25706 BE_32(readresv_ptr->scope_specific_addr); 25707 25708 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25709 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25710 "sd_persistent_reservation_in_read_resv: " 25711 "failed ddi_copyout: resvlist\n"); 25712 rval = EFAULT; 25713 goto done; 25714 } 25715 } 25716 done: 25717 sd_ssc_fini(ssc); 25718 /* only if data_bufp is allocated, we need to free it */ 25719 if (data_bufp) { 25720 kmem_free(data_bufp, data_len); 25721 } 25722 return (rval); 25723 } 25724 25725 25726 /* 25727 * Function: sr_change_blkmode() 25728 * 25729 * Description: This routine is the driver entry point for handling CD-ROM 25730 * block mode ioctl requests. Support for returning and changing 25731 * the current block size in use by the device is implemented. The 25732 * LBA size is changed via a MODE SELECT Block Descriptor. 25733 * 25734 * This routine issues a mode sense with an allocation length of 25735 * 12 bytes for the mode page header and a single block descriptor. 25736 * 25737 * Arguments: dev - the device 'dev_t' 25738 * cmd - the request type; one of CDROMGBLKMODE (get) or 25739 * CDROMSBLKMODE (set) 25740 * data - current block size or requested block size 25741 * flag - this argument is a pass through to ddi_copyxxx() directly 25742 * from the mode argument of ioctl(). 25743 * 25744 * Return Code: the code returned by sd_send_scsi_cmd() 25745 * EINVAL if invalid arguments are provided 25746 * EFAULT if ddi_copyxxx() fails 25747 * ENXIO if fail ddi_get_soft_state 25748 * EIO if invalid mode sense block descriptor length 25749 * 25750 */ 25751 25752 static int 25753 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25754 { 25755 struct sd_lun *un = NULL; 25756 struct mode_header *sense_mhp, *select_mhp; 25757 struct block_descriptor *sense_desc, *select_desc; 25758 int current_bsize; 25759 int rval = EINVAL; 25760 uchar_t *sense = NULL; 25761 uchar_t *select = NULL; 25762 sd_ssc_t *ssc; 25763 25764 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25765 25766 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25767 return (ENXIO); 25768 } 25769 25770 /* 25771 * The block length is changed via the Mode Select block descriptor, the 25772 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25773 * required as part of this routine. Therefore the mode sense allocation 25774 * length is specified to be the length of a mode page header and a 25775 * block descriptor. 25776 */ 25777 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25778 25779 ssc = sd_ssc_init(un); 25780 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25781 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25782 sd_ssc_fini(ssc); 25783 if (rval != 0) { 25784 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25785 "sr_change_blkmode: Mode Sense Failed\n"); 25786 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25787 return (rval); 25788 } 25789 25790 /* Check the block descriptor len to handle only 1 block descriptor */ 25791 sense_mhp = (struct mode_header *)sense; 25792 if ((sense_mhp->bdesc_length == 0) || 25793 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25794 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25795 "sr_change_blkmode: Mode Sense returned invalid block" 25796 " descriptor length\n"); 25797 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25798 return (EIO); 25799 } 25800 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25801 current_bsize = ((sense_desc->blksize_hi << 16) | 25802 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25803 25804 /* Process command */ 25805 switch (cmd) { 25806 case CDROMGBLKMODE: 25807 /* Return the block size obtained during the mode sense */ 25808 if (ddi_copyout(¤t_bsize, (void *)data, 25809 sizeof (int), flag) != 0) 25810 rval = EFAULT; 25811 break; 25812 case CDROMSBLKMODE: 25813 /* Validate the requested block size */ 25814 switch (data) { 25815 case CDROM_BLK_512: 25816 case CDROM_BLK_1024: 25817 case CDROM_BLK_2048: 25818 case CDROM_BLK_2056: 25819 case CDROM_BLK_2336: 25820 case CDROM_BLK_2340: 25821 case CDROM_BLK_2352: 25822 case CDROM_BLK_2368: 25823 case CDROM_BLK_2448: 25824 case CDROM_BLK_2646: 25825 case CDROM_BLK_2647: 25826 break; 25827 default: 25828 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25829 "sr_change_blkmode: " 25830 "Block Size '%ld' Not Supported\n", data); 25831 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25832 return (EINVAL); 25833 } 25834 25835 /* 25836 * The current block size matches the requested block size so 25837 * there is no need to send the mode select to change the size 25838 */ 25839 if (current_bsize == data) { 25840 break; 25841 } 25842 25843 /* Build the select data for the requested block size */ 25844 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25845 select_mhp = (struct mode_header *)select; 25846 select_desc = 25847 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25848 /* 25849 * The LBA size is changed via the block descriptor, so the 25850 * descriptor is built according to the user data 25851 */ 25852 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25853 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25854 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25855 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25856 25857 /* Send the mode select for the requested block size */ 25858 ssc = sd_ssc_init(un); 25859 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25860 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25861 SD_PATH_STANDARD); 25862 sd_ssc_fini(ssc); 25863 if (rval != 0) { 25864 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25865 "sr_change_blkmode: Mode Select Failed\n"); 25866 /* 25867 * The mode select failed for the requested block size, 25868 * so reset the data for the original block size and 25869 * send it to the target. The error is indicated by the 25870 * return value for the failed mode select. 25871 */ 25872 select_desc->blksize_hi = sense_desc->blksize_hi; 25873 select_desc->blksize_mid = sense_desc->blksize_mid; 25874 select_desc->blksize_lo = sense_desc->blksize_lo; 25875 ssc = sd_ssc_init(un); 25876 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25877 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25878 SD_PATH_STANDARD); 25879 sd_ssc_fini(ssc); 25880 } else { 25881 ASSERT(!mutex_owned(SD_MUTEX(un))); 25882 mutex_enter(SD_MUTEX(un)); 25883 sd_update_block_info(un, (uint32_t)data, 0); 25884 mutex_exit(SD_MUTEX(un)); 25885 } 25886 break; 25887 default: 25888 /* should not reach here, but check anyway */ 25889 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25890 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25891 rval = EINVAL; 25892 break; 25893 } 25894 25895 if (select) { 25896 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25897 } 25898 if (sense) { 25899 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25900 } 25901 return (rval); 25902 } 25903 25904 25905 /* 25906 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25907 * implement driver support for getting and setting the CD speed. The command 25908 * set used will be based on the device type. If the device has not been 25909 * identified as MMC the Toshiba vendor specific mode page will be used. If 25910 * the device is MMC but does not support the Real Time Streaming feature 25911 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25912 * be used to read the speed. 25913 */ 25914 25915 /* 25916 * Function: sr_change_speed() 25917 * 25918 * Description: This routine is the driver entry point for handling CD-ROM 25919 * drive speed ioctl requests for devices supporting the Toshiba 25920 * vendor specific drive speed mode page. Support for returning 25921 * and changing the current drive speed in use by the device is 25922 * implemented. 25923 * 25924 * Arguments: dev - the device 'dev_t' 25925 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25926 * CDROMSDRVSPEED (set) 25927 * data - current drive speed or requested drive speed 25928 * flag - this argument is a pass through to ddi_copyxxx() directly 25929 * from the mode argument of ioctl(). 25930 * 25931 * Return Code: the code returned by sd_send_scsi_cmd() 25932 * EINVAL if invalid arguments are provided 25933 * EFAULT if ddi_copyxxx() fails 25934 * ENXIO if fail ddi_get_soft_state 25935 * EIO if invalid mode sense block descriptor length 25936 */ 25937 25938 static int 25939 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25940 { 25941 struct sd_lun *un = NULL; 25942 struct mode_header *sense_mhp, *select_mhp; 25943 struct mode_speed *sense_page, *select_page; 25944 int current_speed; 25945 int rval = EINVAL; 25946 int bd_len; 25947 uchar_t *sense = NULL; 25948 uchar_t *select = NULL; 25949 sd_ssc_t *ssc; 25950 25951 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25952 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25953 return (ENXIO); 25954 } 25955 25956 /* 25957 * Note: The drive speed is being modified here according to a Toshiba 25958 * vendor specific mode page (0x31). 25959 */ 25960 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25961 25962 ssc = sd_ssc_init(un); 25963 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25964 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25965 SD_PATH_STANDARD); 25966 sd_ssc_fini(ssc); 25967 if (rval != 0) { 25968 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25969 "sr_change_speed: Mode Sense Failed\n"); 25970 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25971 return (rval); 25972 } 25973 sense_mhp = (struct mode_header *)sense; 25974 25975 /* Check the block descriptor len to handle only 1 block descriptor */ 25976 bd_len = sense_mhp->bdesc_length; 25977 if (bd_len > MODE_BLK_DESC_LENGTH) { 25978 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25979 "sr_change_speed: Mode Sense returned invalid block " 25980 "descriptor length\n"); 25981 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25982 return (EIO); 25983 } 25984 25985 sense_page = (struct mode_speed *) 25986 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25987 current_speed = sense_page->speed; 25988 25989 /* Process command */ 25990 switch (cmd) { 25991 case CDROMGDRVSPEED: 25992 /* Return the drive speed obtained during the mode sense */ 25993 if (current_speed == 0x2) { 25994 current_speed = CDROM_TWELVE_SPEED; 25995 } 25996 if (ddi_copyout(¤t_speed, (void *)data, 25997 sizeof (int), flag) != 0) { 25998 rval = EFAULT; 25999 } 26000 break; 26001 case CDROMSDRVSPEED: 26002 /* Validate the requested drive speed */ 26003 switch ((uchar_t)data) { 26004 case CDROM_TWELVE_SPEED: 26005 data = 0x2; 26006 /*FALLTHROUGH*/ 26007 case CDROM_NORMAL_SPEED: 26008 case CDROM_DOUBLE_SPEED: 26009 case CDROM_QUAD_SPEED: 26010 case CDROM_MAXIMUM_SPEED: 26011 break; 26012 default: 26013 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26014 "sr_change_speed: " 26015 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26016 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26017 return (EINVAL); 26018 } 26019 26020 /* 26021 * The current drive speed matches the requested drive speed so 26022 * there is no need to send the mode select to change the speed 26023 */ 26024 if (current_speed == data) { 26025 break; 26026 } 26027 26028 /* Build the select data for the requested drive speed */ 26029 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26030 select_mhp = (struct mode_header *)select; 26031 select_mhp->bdesc_length = 0; 26032 select_page = 26033 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26034 select_page = 26035 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26036 select_page->mode_page.code = CDROM_MODE_SPEED; 26037 select_page->mode_page.length = 2; 26038 select_page->speed = (uchar_t)data; 26039 26040 /* Send the mode select for the requested block size */ 26041 ssc = sd_ssc_init(un); 26042 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26043 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26044 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26045 sd_ssc_fini(ssc); 26046 if (rval != 0) { 26047 /* 26048 * The mode select failed for the requested drive speed, 26049 * so reset the data for the original drive speed and 26050 * send it to the target. The error is indicated by the 26051 * return value for the failed mode select. 26052 */ 26053 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26054 "sr_drive_speed: Mode Select Failed\n"); 26055 select_page->speed = sense_page->speed; 26056 ssc = sd_ssc_init(un); 26057 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26058 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26059 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26060 sd_ssc_fini(ssc); 26061 } 26062 break; 26063 default: 26064 /* should not reach here, but check anyway */ 26065 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26066 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26067 rval = EINVAL; 26068 break; 26069 } 26070 26071 if (select) { 26072 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26073 } 26074 if (sense) { 26075 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26076 } 26077 26078 return (rval); 26079 } 26080 26081 26082 /* 26083 * Function: sr_atapi_change_speed() 26084 * 26085 * Description: This routine is the driver entry point for handling CD-ROM 26086 * drive speed ioctl requests for MMC devices that do not support 26087 * the Real Time Streaming feature (0x107). 26088 * 26089 * Note: This routine will use the SET SPEED command which may not 26090 * be supported by all devices. 26091 * 26092 * Arguments: dev- the device 'dev_t' 26093 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26094 * CDROMSDRVSPEED (set) 26095 * data- current drive speed or requested drive speed 26096 * flag- this argument is a pass through to ddi_copyxxx() directly 26097 * from the mode argument of ioctl(). 26098 * 26099 * Return Code: the code returned by sd_send_scsi_cmd() 26100 * EINVAL if invalid arguments are provided 26101 * EFAULT if ddi_copyxxx() fails 26102 * ENXIO if fail ddi_get_soft_state 26103 * EIO if invalid mode sense block descriptor length 26104 */ 26105 26106 static int 26107 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26108 { 26109 struct sd_lun *un; 26110 struct uscsi_cmd *com = NULL; 26111 struct mode_header_grp2 *sense_mhp; 26112 uchar_t *sense_page; 26113 uchar_t *sense = NULL; 26114 char cdb[CDB_GROUP5]; 26115 int bd_len; 26116 int current_speed = 0; 26117 int max_speed = 0; 26118 int rval; 26119 sd_ssc_t *ssc; 26120 26121 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26122 26123 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26124 return (ENXIO); 26125 } 26126 26127 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26128 26129 ssc = sd_ssc_init(un); 26130 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26131 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26132 SD_PATH_STANDARD); 26133 sd_ssc_fini(ssc); 26134 if (rval != 0) { 26135 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26136 "sr_atapi_change_speed: Mode Sense Failed\n"); 26137 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26138 return (rval); 26139 } 26140 26141 /* Check the block descriptor len to handle only 1 block descriptor */ 26142 sense_mhp = (struct mode_header_grp2 *)sense; 26143 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26144 if (bd_len > MODE_BLK_DESC_LENGTH) { 26145 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26146 "sr_atapi_change_speed: Mode Sense returned invalid " 26147 "block descriptor length\n"); 26148 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26149 return (EIO); 26150 } 26151 26152 /* Calculate the current and maximum drive speeds */ 26153 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26154 current_speed = (sense_page[14] << 8) | sense_page[15]; 26155 max_speed = (sense_page[8] << 8) | sense_page[9]; 26156 26157 /* Process the command */ 26158 switch (cmd) { 26159 case CDROMGDRVSPEED: 26160 current_speed /= SD_SPEED_1X; 26161 if (ddi_copyout(¤t_speed, (void *)data, 26162 sizeof (int), flag) != 0) 26163 rval = EFAULT; 26164 break; 26165 case CDROMSDRVSPEED: 26166 /* Convert the speed code to KB/sec */ 26167 switch ((uchar_t)data) { 26168 case CDROM_NORMAL_SPEED: 26169 current_speed = SD_SPEED_1X; 26170 break; 26171 case CDROM_DOUBLE_SPEED: 26172 current_speed = 2 * SD_SPEED_1X; 26173 break; 26174 case CDROM_QUAD_SPEED: 26175 current_speed = 4 * SD_SPEED_1X; 26176 break; 26177 case CDROM_TWELVE_SPEED: 26178 current_speed = 12 * SD_SPEED_1X; 26179 break; 26180 case CDROM_MAXIMUM_SPEED: 26181 current_speed = 0xffff; 26182 break; 26183 default: 26184 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26185 "sr_atapi_change_speed: invalid drive speed %d\n", 26186 (uchar_t)data); 26187 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26188 return (EINVAL); 26189 } 26190 26191 /* Check the request against the drive's max speed. */ 26192 if (current_speed != 0xffff) { 26193 if (current_speed > max_speed) { 26194 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26195 return (EINVAL); 26196 } 26197 } 26198 26199 /* 26200 * Build and send the SET SPEED command 26201 * 26202 * Note: The SET SPEED (0xBB) command used in this routine is 26203 * obsolete per the SCSI MMC spec but still supported in the 26204 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26205 * therefore the command is still implemented in this routine. 26206 */ 26207 bzero(cdb, sizeof (cdb)); 26208 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26209 cdb[2] = (uchar_t)(current_speed >> 8); 26210 cdb[3] = (uchar_t)current_speed; 26211 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26212 com->uscsi_cdb = (caddr_t)cdb; 26213 com->uscsi_cdblen = CDB_GROUP5; 26214 com->uscsi_bufaddr = NULL; 26215 com->uscsi_buflen = 0; 26216 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26217 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26218 break; 26219 default: 26220 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26221 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26222 rval = EINVAL; 26223 } 26224 26225 if (sense) { 26226 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26227 } 26228 if (com) { 26229 kmem_free(com, sizeof (*com)); 26230 } 26231 return (rval); 26232 } 26233 26234 26235 /* 26236 * Function: sr_pause_resume() 26237 * 26238 * Description: This routine is the driver entry point for handling CD-ROM 26239 * pause/resume ioctl requests. This only affects the audio play 26240 * operation. 26241 * 26242 * Arguments: dev - the device 'dev_t' 26243 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26244 * for setting the resume bit of the cdb. 26245 * 26246 * Return Code: the code returned by sd_send_scsi_cmd() 26247 * EINVAL if invalid mode specified 26248 * 26249 */ 26250 26251 static int 26252 sr_pause_resume(dev_t dev, int cmd) 26253 { 26254 struct sd_lun *un; 26255 struct uscsi_cmd *com; 26256 char cdb[CDB_GROUP1]; 26257 int rval; 26258 26259 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26260 return (ENXIO); 26261 } 26262 26263 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26264 bzero(cdb, CDB_GROUP1); 26265 cdb[0] = SCMD_PAUSE_RESUME; 26266 switch (cmd) { 26267 case CDROMRESUME: 26268 cdb[8] = 1; 26269 break; 26270 case CDROMPAUSE: 26271 cdb[8] = 0; 26272 break; 26273 default: 26274 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26275 " Command '%x' Not Supported\n", cmd); 26276 rval = EINVAL; 26277 goto done; 26278 } 26279 26280 com->uscsi_cdb = cdb; 26281 com->uscsi_cdblen = CDB_GROUP1; 26282 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26283 26284 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26285 SD_PATH_STANDARD); 26286 26287 done: 26288 kmem_free(com, sizeof (*com)); 26289 return (rval); 26290 } 26291 26292 26293 /* 26294 * Function: sr_play_msf() 26295 * 26296 * Description: This routine is the driver entry point for handling CD-ROM 26297 * ioctl requests to output the audio signals at the specified 26298 * starting address and continue the audio play until the specified 26299 * ending address (CDROMPLAYMSF) The address is in Minute Second 26300 * Frame (MSF) format. 26301 * 26302 * Arguments: dev - the device 'dev_t' 26303 * data - pointer to user provided audio msf structure, 26304 * specifying start/end addresses. 26305 * flag - this argument is a pass through to ddi_copyxxx() 26306 * directly from the mode argument of ioctl(). 26307 * 26308 * Return Code: the code returned by sd_send_scsi_cmd() 26309 * EFAULT if ddi_copyxxx() fails 26310 * ENXIO if fail ddi_get_soft_state 26311 * EINVAL if data pointer is NULL 26312 */ 26313 26314 static int 26315 sr_play_msf(dev_t dev, caddr_t data, int flag) 26316 { 26317 struct sd_lun *un; 26318 struct uscsi_cmd *com; 26319 struct cdrom_msf msf_struct; 26320 struct cdrom_msf *msf = &msf_struct; 26321 char cdb[CDB_GROUP1]; 26322 int rval; 26323 26324 if (data == NULL) { 26325 return (EINVAL); 26326 } 26327 26328 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26329 return (ENXIO); 26330 } 26331 26332 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26333 return (EFAULT); 26334 } 26335 26336 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26337 bzero(cdb, CDB_GROUP1); 26338 cdb[0] = SCMD_PLAYAUDIO_MSF; 26339 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26340 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26341 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26342 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26343 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26344 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26345 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26346 } else { 26347 cdb[3] = msf->cdmsf_min0; 26348 cdb[4] = msf->cdmsf_sec0; 26349 cdb[5] = msf->cdmsf_frame0; 26350 cdb[6] = msf->cdmsf_min1; 26351 cdb[7] = msf->cdmsf_sec1; 26352 cdb[8] = msf->cdmsf_frame1; 26353 } 26354 com->uscsi_cdb = cdb; 26355 com->uscsi_cdblen = CDB_GROUP1; 26356 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26357 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26358 SD_PATH_STANDARD); 26359 kmem_free(com, sizeof (*com)); 26360 return (rval); 26361 } 26362 26363 26364 /* 26365 * Function: sr_play_trkind() 26366 * 26367 * Description: This routine is the driver entry point for handling CD-ROM 26368 * ioctl requests to output the audio signals at the specified 26369 * starting address and continue the audio play until the specified 26370 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26371 * format. 26372 * 26373 * Arguments: dev - the device 'dev_t' 26374 * data - pointer to user provided audio track/index structure, 26375 * specifying start/end addresses. 26376 * flag - this argument is a pass through to ddi_copyxxx() 26377 * directly from the mode argument of ioctl(). 26378 * 26379 * Return Code: the code returned by sd_send_scsi_cmd() 26380 * EFAULT if ddi_copyxxx() fails 26381 * ENXIO if fail ddi_get_soft_state 26382 * EINVAL if data pointer is NULL 26383 */ 26384 26385 static int 26386 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26387 { 26388 struct cdrom_ti ti_struct; 26389 struct cdrom_ti *ti = &ti_struct; 26390 struct uscsi_cmd *com = NULL; 26391 char cdb[CDB_GROUP1]; 26392 int rval; 26393 26394 if (data == NULL) { 26395 return (EINVAL); 26396 } 26397 26398 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26399 return (EFAULT); 26400 } 26401 26402 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26403 bzero(cdb, CDB_GROUP1); 26404 cdb[0] = SCMD_PLAYAUDIO_TI; 26405 cdb[4] = ti->cdti_trk0; 26406 cdb[5] = ti->cdti_ind0; 26407 cdb[7] = ti->cdti_trk1; 26408 cdb[8] = ti->cdti_ind1; 26409 com->uscsi_cdb = cdb; 26410 com->uscsi_cdblen = CDB_GROUP1; 26411 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26412 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26413 SD_PATH_STANDARD); 26414 kmem_free(com, sizeof (*com)); 26415 return (rval); 26416 } 26417 26418 26419 /* 26420 * Function: sr_read_all_subcodes() 26421 * 26422 * Description: This routine is the driver entry point for handling CD-ROM 26423 * ioctl requests to return raw subcode data while the target is 26424 * playing audio (CDROMSUBCODE). 26425 * 26426 * Arguments: dev - the device 'dev_t' 26427 * data - pointer to user provided cdrom subcode structure, 26428 * specifying the transfer length and address. 26429 * flag - this argument is a pass through to ddi_copyxxx() 26430 * directly from the mode argument of ioctl(). 26431 * 26432 * Return Code: the code returned by sd_send_scsi_cmd() 26433 * EFAULT if ddi_copyxxx() fails 26434 * ENXIO if fail ddi_get_soft_state 26435 * EINVAL if data pointer is NULL 26436 */ 26437 26438 static int 26439 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26440 { 26441 struct sd_lun *un = NULL; 26442 struct uscsi_cmd *com = NULL; 26443 struct cdrom_subcode *subcode = NULL; 26444 int rval; 26445 size_t buflen; 26446 char cdb[CDB_GROUP5]; 26447 26448 #ifdef _MULTI_DATAMODEL 26449 /* To support ILP32 applications in an LP64 world */ 26450 struct cdrom_subcode32 cdrom_subcode32; 26451 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26452 #endif 26453 if (data == NULL) { 26454 return (EINVAL); 26455 } 26456 26457 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26458 return (ENXIO); 26459 } 26460 26461 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26462 26463 #ifdef _MULTI_DATAMODEL 26464 switch (ddi_model_convert_from(flag & FMODELS)) { 26465 case DDI_MODEL_ILP32: 26466 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26467 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26468 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26469 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26470 return (EFAULT); 26471 } 26472 /* Convert the ILP32 uscsi data from the application to LP64 */ 26473 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26474 break; 26475 case DDI_MODEL_NONE: 26476 if (ddi_copyin(data, subcode, 26477 sizeof (struct cdrom_subcode), flag)) { 26478 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26479 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26480 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26481 return (EFAULT); 26482 } 26483 break; 26484 } 26485 #else /* ! _MULTI_DATAMODEL */ 26486 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26488 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26489 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26490 return (EFAULT); 26491 } 26492 #endif /* _MULTI_DATAMODEL */ 26493 26494 /* 26495 * Since MMC-2 expects max 3 bytes for length, check if the 26496 * length input is greater than 3 bytes 26497 */ 26498 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26499 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26500 "sr_read_all_subcodes: " 26501 "cdrom transfer length too large: %d (limit %d)\n", 26502 subcode->cdsc_length, 0xFFFFFF); 26503 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26504 return (EINVAL); 26505 } 26506 26507 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26508 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26509 bzero(cdb, CDB_GROUP5); 26510 26511 if (un->un_f_mmc_cap == TRUE) { 26512 cdb[0] = (char)SCMD_READ_CD; 26513 cdb[2] = (char)0xff; 26514 cdb[3] = (char)0xff; 26515 cdb[4] = (char)0xff; 26516 cdb[5] = (char)0xff; 26517 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26518 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26519 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26520 cdb[10] = 1; 26521 } else { 26522 /* 26523 * Note: A vendor specific command (0xDF) is being used her to 26524 * request a read of all subcodes. 26525 */ 26526 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26527 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26528 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26529 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26530 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26531 } 26532 com->uscsi_cdb = cdb; 26533 com->uscsi_cdblen = CDB_GROUP5; 26534 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26535 com->uscsi_buflen = buflen; 26536 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26537 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26538 SD_PATH_STANDARD); 26539 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26540 kmem_free(com, sizeof (*com)); 26541 return (rval); 26542 } 26543 26544 26545 /* 26546 * Function: sr_read_subchannel() 26547 * 26548 * Description: This routine is the driver entry point for handling CD-ROM 26549 * ioctl requests to return the Q sub-channel data of the CD 26550 * current position block. (CDROMSUBCHNL) The data includes the 26551 * track number, index number, absolute CD-ROM address (LBA or MSF 26552 * format per the user) , track relative CD-ROM address (LBA or MSF 26553 * format per the user), control data and audio status. 26554 * 26555 * Arguments: dev - the device 'dev_t' 26556 * data - pointer to user provided cdrom sub-channel structure 26557 * flag - this argument is a pass through to ddi_copyxxx() 26558 * directly from the mode argument of ioctl(). 26559 * 26560 * Return Code: the code returned by sd_send_scsi_cmd() 26561 * EFAULT if ddi_copyxxx() fails 26562 * ENXIO if fail ddi_get_soft_state 26563 * EINVAL if data pointer is NULL 26564 */ 26565 26566 static int 26567 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26568 { 26569 struct sd_lun *un; 26570 struct uscsi_cmd *com; 26571 struct cdrom_subchnl subchanel; 26572 struct cdrom_subchnl *subchnl = &subchanel; 26573 char cdb[CDB_GROUP1]; 26574 caddr_t buffer; 26575 int rval; 26576 26577 if (data == NULL) { 26578 return (EINVAL); 26579 } 26580 26581 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26582 (un->un_state == SD_STATE_OFFLINE)) { 26583 return (ENXIO); 26584 } 26585 26586 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26587 return (EFAULT); 26588 } 26589 26590 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26591 bzero(cdb, CDB_GROUP1); 26592 cdb[0] = SCMD_READ_SUBCHANNEL; 26593 /* Set the MSF bit based on the user requested address format */ 26594 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26595 /* 26596 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26597 * returned 26598 */ 26599 cdb[2] = 0x40; 26600 /* 26601 * Set byte 3 to specify the return data format. A value of 0x01 26602 * indicates that the CD-ROM current position should be returned. 26603 */ 26604 cdb[3] = 0x01; 26605 cdb[8] = 0x10; 26606 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26607 com->uscsi_cdb = cdb; 26608 com->uscsi_cdblen = CDB_GROUP1; 26609 com->uscsi_bufaddr = buffer; 26610 com->uscsi_buflen = 16; 26611 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26612 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26613 SD_PATH_STANDARD); 26614 if (rval != 0) { 26615 kmem_free(buffer, 16); 26616 kmem_free(com, sizeof (*com)); 26617 return (rval); 26618 } 26619 26620 /* Process the returned Q sub-channel data */ 26621 subchnl->cdsc_audiostatus = buffer[1]; 26622 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26623 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26624 subchnl->cdsc_trk = buffer[6]; 26625 subchnl->cdsc_ind = buffer[7]; 26626 if (subchnl->cdsc_format & CDROM_LBA) { 26627 subchnl->cdsc_absaddr.lba = 26628 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26629 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26630 subchnl->cdsc_reladdr.lba = 26631 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26632 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26633 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26634 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26635 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26636 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26637 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26638 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26639 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26640 } else { 26641 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26642 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26643 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26644 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26645 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26646 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26647 } 26648 kmem_free(buffer, 16); 26649 kmem_free(com, sizeof (*com)); 26650 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26651 != 0) { 26652 return (EFAULT); 26653 } 26654 return (rval); 26655 } 26656 26657 26658 /* 26659 * Function: sr_read_tocentry() 26660 * 26661 * Description: This routine is the driver entry point for handling CD-ROM 26662 * ioctl requests to read from the Table of Contents (TOC) 26663 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26664 * fields, the starting address (LBA or MSF format per the user) 26665 * and the data mode if the user specified track is a data track. 26666 * 26667 * Note: The READ HEADER (0x44) command used in this routine is 26668 * obsolete per the SCSI MMC spec but still supported in the 26669 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26670 * therefore the command is still implemented in this routine. 26671 * 26672 * Arguments: dev - the device 'dev_t' 26673 * data - pointer to user provided toc entry structure, 26674 * specifying the track # and the address format 26675 * (LBA or MSF). 26676 * flag - this argument is a pass through to ddi_copyxxx() 26677 * directly from the mode argument of ioctl(). 26678 * 26679 * Return Code: the code returned by sd_send_scsi_cmd() 26680 * EFAULT if ddi_copyxxx() fails 26681 * ENXIO if fail ddi_get_soft_state 26682 * EINVAL if data pointer is NULL 26683 */ 26684 26685 static int 26686 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26687 { 26688 struct sd_lun *un = NULL; 26689 struct uscsi_cmd *com; 26690 struct cdrom_tocentry toc_entry; 26691 struct cdrom_tocentry *entry = &toc_entry; 26692 caddr_t buffer; 26693 int rval; 26694 char cdb[CDB_GROUP1]; 26695 26696 if (data == NULL) { 26697 return (EINVAL); 26698 } 26699 26700 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26701 (un->un_state == SD_STATE_OFFLINE)) { 26702 return (ENXIO); 26703 } 26704 26705 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26706 return (EFAULT); 26707 } 26708 26709 /* Validate the requested track and address format */ 26710 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26711 return (EINVAL); 26712 } 26713 26714 if (entry->cdte_track == 0) { 26715 return (EINVAL); 26716 } 26717 26718 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26719 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26720 bzero(cdb, CDB_GROUP1); 26721 26722 cdb[0] = SCMD_READ_TOC; 26723 /* Set the MSF bit based on the user requested address format */ 26724 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26725 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26726 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26727 } else { 26728 cdb[6] = entry->cdte_track; 26729 } 26730 26731 /* 26732 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26733 * (4 byte TOC response header + 8 byte track descriptor) 26734 */ 26735 cdb[8] = 12; 26736 com->uscsi_cdb = cdb; 26737 com->uscsi_cdblen = CDB_GROUP1; 26738 com->uscsi_bufaddr = buffer; 26739 com->uscsi_buflen = 0x0C; 26740 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26741 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26742 SD_PATH_STANDARD); 26743 if (rval != 0) { 26744 kmem_free(buffer, 12); 26745 kmem_free(com, sizeof (*com)); 26746 return (rval); 26747 } 26748 26749 /* Process the toc entry */ 26750 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26751 entry->cdte_ctrl = (buffer[5] & 0x0F); 26752 if (entry->cdte_format & CDROM_LBA) { 26753 entry->cdte_addr.lba = 26754 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26755 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26756 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26757 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26758 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26759 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26760 /* 26761 * Send a READ TOC command using the LBA address format to get 26762 * the LBA for the track requested so it can be used in the 26763 * READ HEADER request 26764 * 26765 * Note: The MSF bit of the READ HEADER command specifies the 26766 * output format. The block address specified in that command 26767 * must be in LBA format. 26768 */ 26769 cdb[1] = 0; 26770 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26771 SD_PATH_STANDARD); 26772 if (rval != 0) { 26773 kmem_free(buffer, 12); 26774 kmem_free(com, sizeof (*com)); 26775 return (rval); 26776 } 26777 } else { 26778 entry->cdte_addr.msf.minute = buffer[9]; 26779 entry->cdte_addr.msf.second = buffer[10]; 26780 entry->cdte_addr.msf.frame = buffer[11]; 26781 /* 26782 * Send a READ TOC command using the LBA address format to get 26783 * the LBA for the track requested so it can be used in the 26784 * READ HEADER request 26785 * 26786 * Note: The MSF bit of the READ HEADER command specifies the 26787 * output format. The block address specified in that command 26788 * must be in LBA format. 26789 */ 26790 cdb[1] = 0; 26791 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26792 SD_PATH_STANDARD); 26793 if (rval != 0) { 26794 kmem_free(buffer, 12); 26795 kmem_free(com, sizeof (*com)); 26796 return (rval); 26797 } 26798 } 26799 26800 /* 26801 * Build and send the READ HEADER command to determine the data mode of 26802 * the user specified track. 26803 */ 26804 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26805 (entry->cdte_track != CDROM_LEADOUT)) { 26806 bzero(cdb, CDB_GROUP1); 26807 cdb[0] = SCMD_READ_HEADER; 26808 cdb[2] = buffer[8]; 26809 cdb[3] = buffer[9]; 26810 cdb[4] = buffer[10]; 26811 cdb[5] = buffer[11]; 26812 cdb[8] = 0x08; 26813 com->uscsi_buflen = 0x08; 26814 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26815 SD_PATH_STANDARD); 26816 if (rval == 0) { 26817 entry->cdte_datamode = buffer[0]; 26818 } else { 26819 /* 26820 * READ HEADER command failed, since this is 26821 * obsoleted in one spec, its better to return 26822 * -1 for an invlid track so that we can still 26823 * receive the rest of the TOC data. 26824 */ 26825 entry->cdte_datamode = (uchar_t)-1; 26826 } 26827 } else { 26828 entry->cdte_datamode = (uchar_t)-1; 26829 } 26830 26831 kmem_free(buffer, 12); 26832 kmem_free(com, sizeof (*com)); 26833 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26834 return (EFAULT); 26835 26836 return (rval); 26837 } 26838 26839 26840 /* 26841 * Function: sr_read_tochdr() 26842 * 26843 * Description: This routine is the driver entry point for handling CD-ROM 26844 * ioctl requests to read the Table of Contents (TOC) header 26845 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26846 * and ending track numbers 26847 * 26848 * Arguments: dev - the device 'dev_t' 26849 * data - pointer to user provided toc header structure, 26850 * specifying the starting and ending track numbers. 26851 * flag - this argument is a pass through to ddi_copyxxx() 26852 * directly from the mode argument of ioctl(). 26853 * 26854 * Return Code: the code returned by sd_send_scsi_cmd() 26855 * EFAULT if ddi_copyxxx() fails 26856 * ENXIO if fail ddi_get_soft_state 26857 * EINVAL if data pointer is NULL 26858 */ 26859 26860 static int 26861 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26862 { 26863 struct sd_lun *un; 26864 struct uscsi_cmd *com; 26865 struct cdrom_tochdr toc_header; 26866 struct cdrom_tochdr *hdr = &toc_header; 26867 char cdb[CDB_GROUP1]; 26868 int rval; 26869 caddr_t buffer; 26870 26871 if (data == NULL) { 26872 return (EINVAL); 26873 } 26874 26875 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26876 (un->un_state == SD_STATE_OFFLINE)) { 26877 return (ENXIO); 26878 } 26879 26880 buffer = kmem_zalloc(4, KM_SLEEP); 26881 bzero(cdb, CDB_GROUP1); 26882 cdb[0] = SCMD_READ_TOC; 26883 /* 26884 * Specifying a track number of 0x00 in the READ TOC command indicates 26885 * that the TOC header should be returned 26886 */ 26887 cdb[6] = 0x00; 26888 /* 26889 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26890 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26891 */ 26892 cdb[8] = 0x04; 26893 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26894 com->uscsi_cdb = cdb; 26895 com->uscsi_cdblen = CDB_GROUP1; 26896 com->uscsi_bufaddr = buffer; 26897 com->uscsi_buflen = 0x04; 26898 com->uscsi_timeout = 300; 26899 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26900 26901 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26902 SD_PATH_STANDARD); 26903 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26904 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26905 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26906 } else { 26907 hdr->cdth_trk0 = buffer[2]; 26908 hdr->cdth_trk1 = buffer[3]; 26909 } 26910 kmem_free(buffer, 4); 26911 kmem_free(com, sizeof (*com)); 26912 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26913 return (EFAULT); 26914 } 26915 return (rval); 26916 } 26917 26918 26919 /* 26920 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26921 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26922 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26923 * digital audio and extended architecture digital audio. These modes are 26924 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26925 * MMC specs. 26926 * 26927 * In addition to support for the various data formats these routines also 26928 * include support for devices that implement only the direct access READ 26929 * commands (0x08, 0x28), devices that implement the READ_CD commands 26930 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26931 * READ CDXA commands (0xD8, 0xDB) 26932 */ 26933 26934 /* 26935 * Function: sr_read_mode1() 26936 * 26937 * Description: This routine is the driver entry point for handling CD-ROM 26938 * ioctl read mode1 requests (CDROMREADMODE1). 26939 * 26940 * Arguments: dev - the device 'dev_t' 26941 * data - pointer to user provided cd read structure specifying 26942 * the lba buffer address and length. 26943 * flag - this argument is a pass through to ddi_copyxxx() 26944 * directly from the mode argument of ioctl(). 26945 * 26946 * Return Code: the code returned by sd_send_scsi_cmd() 26947 * EFAULT if ddi_copyxxx() fails 26948 * ENXIO if fail ddi_get_soft_state 26949 * EINVAL if data pointer is NULL 26950 */ 26951 26952 static int 26953 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26954 { 26955 struct sd_lun *un; 26956 struct cdrom_read mode1_struct; 26957 struct cdrom_read *mode1 = &mode1_struct; 26958 int rval; 26959 sd_ssc_t *ssc; 26960 26961 #ifdef _MULTI_DATAMODEL 26962 /* To support ILP32 applications in an LP64 world */ 26963 struct cdrom_read32 cdrom_read32; 26964 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26965 #endif /* _MULTI_DATAMODEL */ 26966 26967 if (data == NULL) { 26968 return (EINVAL); 26969 } 26970 26971 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26972 (un->un_state == SD_STATE_OFFLINE)) { 26973 return (ENXIO); 26974 } 26975 26976 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26977 "sd_read_mode1: entry: un:0x%p\n", un); 26978 26979 #ifdef _MULTI_DATAMODEL 26980 switch (ddi_model_convert_from(flag & FMODELS)) { 26981 case DDI_MODEL_ILP32: 26982 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26983 return (EFAULT); 26984 } 26985 /* Convert the ILP32 uscsi data from the application to LP64 */ 26986 cdrom_read32tocdrom_read(cdrd32, mode1); 26987 break; 26988 case DDI_MODEL_NONE: 26989 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26990 return (EFAULT); 26991 } 26992 } 26993 #else /* ! _MULTI_DATAMODEL */ 26994 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26995 return (EFAULT); 26996 } 26997 #endif /* _MULTI_DATAMODEL */ 26998 26999 ssc = sd_ssc_init(un); 27000 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 27001 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27002 sd_ssc_fini(ssc); 27003 27004 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27005 "sd_read_mode1: exit: un:0x%p\n", un); 27006 27007 return (rval); 27008 } 27009 27010 27011 /* 27012 * Function: sr_read_cd_mode2() 27013 * 27014 * Description: This routine is the driver entry point for handling CD-ROM 27015 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27016 * support the READ CD (0xBE) command or the 1st generation 27017 * READ CD (0xD4) command. 27018 * 27019 * Arguments: dev - the device 'dev_t' 27020 * data - pointer to user provided cd read structure specifying 27021 * the lba buffer address and length. 27022 * flag - this argument is a pass through to ddi_copyxxx() 27023 * directly from the mode argument of ioctl(). 27024 * 27025 * Return Code: the code returned by sd_send_scsi_cmd() 27026 * EFAULT if ddi_copyxxx() fails 27027 * ENXIO if fail ddi_get_soft_state 27028 * EINVAL if data pointer is NULL 27029 */ 27030 27031 static int 27032 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27033 { 27034 struct sd_lun *un; 27035 struct uscsi_cmd *com; 27036 struct cdrom_read mode2_struct; 27037 struct cdrom_read *mode2 = &mode2_struct; 27038 uchar_t cdb[CDB_GROUP5]; 27039 int nblocks; 27040 int rval; 27041 #ifdef _MULTI_DATAMODEL 27042 /* To support ILP32 applications in an LP64 world */ 27043 struct cdrom_read32 cdrom_read32; 27044 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27045 #endif /* _MULTI_DATAMODEL */ 27046 27047 if (data == NULL) { 27048 return (EINVAL); 27049 } 27050 27051 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27052 (un->un_state == SD_STATE_OFFLINE)) { 27053 return (ENXIO); 27054 } 27055 27056 #ifdef _MULTI_DATAMODEL 27057 switch (ddi_model_convert_from(flag & FMODELS)) { 27058 case DDI_MODEL_ILP32: 27059 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27060 return (EFAULT); 27061 } 27062 /* Convert the ILP32 uscsi data from the application to LP64 */ 27063 cdrom_read32tocdrom_read(cdrd32, mode2); 27064 break; 27065 case DDI_MODEL_NONE: 27066 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27067 return (EFAULT); 27068 } 27069 break; 27070 } 27071 27072 #else /* ! _MULTI_DATAMODEL */ 27073 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27074 return (EFAULT); 27075 } 27076 #endif /* _MULTI_DATAMODEL */ 27077 27078 bzero(cdb, sizeof (cdb)); 27079 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27080 /* Read command supported by 1st generation atapi drives */ 27081 cdb[0] = SCMD_READ_CDD4; 27082 } else { 27083 /* Universal CD Access Command */ 27084 cdb[0] = SCMD_READ_CD; 27085 } 27086 27087 /* 27088 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27089 */ 27090 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27091 27092 /* set the start address */ 27093 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27094 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27095 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27096 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27097 27098 /* set the transfer length */ 27099 nblocks = mode2->cdread_buflen / 2336; 27100 cdb[6] = (uchar_t)(nblocks >> 16); 27101 cdb[7] = (uchar_t)(nblocks >> 8); 27102 cdb[8] = (uchar_t)nblocks; 27103 27104 /* set the filter bits */ 27105 cdb[9] = CDROM_READ_CD_USERDATA; 27106 27107 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27108 com->uscsi_cdb = (caddr_t)cdb; 27109 com->uscsi_cdblen = sizeof (cdb); 27110 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27111 com->uscsi_buflen = mode2->cdread_buflen; 27112 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27113 27114 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27115 SD_PATH_STANDARD); 27116 kmem_free(com, sizeof (*com)); 27117 return (rval); 27118 } 27119 27120 27121 /* 27122 * Function: sr_read_mode2() 27123 * 27124 * Description: This routine is the driver entry point for handling CD-ROM 27125 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27126 * do not support the READ CD (0xBE) command. 27127 * 27128 * Arguments: dev - the device 'dev_t' 27129 * data - pointer to user provided cd read structure specifying 27130 * the lba buffer address and length. 27131 * flag - this argument is a pass through to ddi_copyxxx() 27132 * directly from the mode argument of ioctl(). 27133 * 27134 * Return Code: the code returned by sd_send_scsi_cmd() 27135 * EFAULT if ddi_copyxxx() fails 27136 * ENXIO if fail ddi_get_soft_state 27137 * EINVAL if data pointer is NULL 27138 * EIO if fail to reset block size 27139 * EAGAIN if commands are in progress in the driver 27140 */ 27141 27142 static int 27143 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27144 { 27145 struct sd_lun *un; 27146 struct cdrom_read mode2_struct; 27147 struct cdrom_read *mode2 = &mode2_struct; 27148 int rval; 27149 uint32_t restore_blksize; 27150 struct uscsi_cmd *com; 27151 uchar_t cdb[CDB_GROUP0]; 27152 int nblocks; 27153 27154 #ifdef _MULTI_DATAMODEL 27155 /* To support ILP32 applications in an LP64 world */ 27156 struct cdrom_read32 cdrom_read32; 27157 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27158 #endif /* _MULTI_DATAMODEL */ 27159 27160 if (data == NULL) { 27161 return (EINVAL); 27162 } 27163 27164 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27165 (un->un_state == SD_STATE_OFFLINE)) { 27166 return (ENXIO); 27167 } 27168 27169 /* 27170 * Because this routine will update the device and driver block size 27171 * being used we want to make sure there are no commands in progress. 27172 * If commands are in progress the user will have to try again. 27173 * 27174 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27175 * in sdioctl to protect commands from sdioctl through to the top of 27176 * sd_uscsi_strategy. See sdioctl for details. 27177 */ 27178 mutex_enter(SD_MUTEX(un)); 27179 if (un->un_ncmds_in_driver != 1) { 27180 mutex_exit(SD_MUTEX(un)); 27181 return (EAGAIN); 27182 } 27183 mutex_exit(SD_MUTEX(un)); 27184 27185 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27186 "sd_read_mode2: entry: un:0x%p\n", un); 27187 27188 #ifdef _MULTI_DATAMODEL 27189 switch (ddi_model_convert_from(flag & FMODELS)) { 27190 case DDI_MODEL_ILP32: 27191 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27192 return (EFAULT); 27193 } 27194 /* Convert the ILP32 uscsi data from the application to LP64 */ 27195 cdrom_read32tocdrom_read(cdrd32, mode2); 27196 break; 27197 case DDI_MODEL_NONE: 27198 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27199 return (EFAULT); 27200 } 27201 break; 27202 } 27203 #else /* ! _MULTI_DATAMODEL */ 27204 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27205 return (EFAULT); 27206 } 27207 #endif /* _MULTI_DATAMODEL */ 27208 27209 /* Store the current target block size for restoration later */ 27210 restore_blksize = un->un_tgt_blocksize; 27211 27212 /* Change the device and soft state target block size to 2336 */ 27213 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27214 rval = EIO; 27215 goto done; 27216 } 27217 27218 27219 bzero(cdb, sizeof (cdb)); 27220 27221 /* set READ operation */ 27222 cdb[0] = SCMD_READ; 27223 27224 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27225 mode2->cdread_lba >>= 2; 27226 27227 /* set the start address */ 27228 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27229 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27230 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27231 27232 /* set the transfer length */ 27233 nblocks = mode2->cdread_buflen / 2336; 27234 cdb[4] = (uchar_t)nblocks & 0xFF; 27235 27236 /* build command */ 27237 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27238 com->uscsi_cdb = (caddr_t)cdb; 27239 com->uscsi_cdblen = sizeof (cdb); 27240 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27241 com->uscsi_buflen = mode2->cdread_buflen; 27242 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27243 27244 /* 27245 * Issue SCSI command with user space address for read buffer. 27246 * 27247 * This sends the command through main channel in the driver. 27248 * 27249 * Since this is accessed via an IOCTL call, we go through the 27250 * standard path, so that if the device was powered down, then 27251 * it would be 'awakened' to handle the command. 27252 */ 27253 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27254 SD_PATH_STANDARD); 27255 27256 kmem_free(com, sizeof (*com)); 27257 27258 /* Restore the device and soft state target block size */ 27259 if (sr_sector_mode(dev, restore_blksize) != 0) { 27260 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27261 "can't do switch back to mode 1\n"); 27262 /* 27263 * If sd_send_scsi_READ succeeded we still need to report 27264 * an error because we failed to reset the block size 27265 */ 27266 if (rval == 0) { 27267 rval = EIO; 27268 } 27269 } 27270 27271 done: 27272 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27273 "sd_read_mode2: exit: un:0x%p\n", un); 27274 27275 return (rval); 27276 } 27277 27278 27279 /* 27280 * Function: sr_sector_mode() 27281 * 27282 * Description: This utility function is used by sr_read_mode2 to set the target 27283 * block size based on the user specified size. This is a legacy 27284 * implementation based upon a vendor specific mode page 27285 * 27286 * Arguments: dev - the device 'dev_t' 27287 * data - flag indicating if block size is being set to 2336 or 27288 * 512. 27289 * 27290 * Return Code: the code returned by sd_send_scsi_cmd() 27291 * EFAULT if ddi_copyxxx() fails 27292 * ENXIO if fail ddi_get_soft_state 27293 * EINVAL if data pointer is NULL 27294 */ 27295 27296 static int 27297 sr_sector_mode(dev_t dev, uint32_t blksize) 27298 { 27299 struct sd_lun *un; 27300 uchar_t *sense; 27301 uchar_t *select; 27302 int rval; 27303 sd_ssc_t *ssc; 27304 27305 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27306 (un->un_state == SD_STATE_OFFLINE)) { 27307 return (ENXIO); 27308 } 27309 27310 sense = kmem_zalloc(20, KM_SLEEP); 27311 27312 /* Note: This is a vendor specific mode page (0x81) */ 27313 ssc = sd_ssc_init(un); 27314 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27315 SD_PATH_STANDARD); 27316 sd_ssc_fini(ssc); 27317 if (rval != 0) { 27318 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27319 "sr_sector_mode: Mode Sense failed\n"); 27320 kmem_free(sense, 20); 27321 return (rval); 27322 } 27323 select = kmem_zalloc(20, KM_SLEEP); 27324 select[3] = 0x08; 27325 select[10] = ((blksize >> 8) & 0xff); 27326 select[11] = (blksize & 0xff); 27327 select[12] = 0x01; 27328 select[13] = 0x06; 27329 select[14] = sense[14]; 27330 select[15] = sense[15]; 27331 if (blksize == SD_MODE2_BLKSIZE) { 27332 select[14] |= 0x01; 27333 } 27334 27335 ssc = sd_ssc_init(un); 27336 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27337 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27338 sd_ssc_fini(ssc); 27339 if (rval != 0) { 27340 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27341 "sr_sector_mode: Mode Select failed\n"); 27342 } else { 27343 /* 27344 * Only update the softstate block size if we successfully 27345 * changed the device block mode. 27346 */ 27347 mutex_enter(SD_MUTEX(un)); 27348 sd_update_block_info(un, blksize, 0); 27349 mutex_exit(SD_MUTEX(un)); 27350 } 27351 kmem_free(sense, 20); 27352 kmem_free(select, 20); 27353 return (rval); 27354 } 27355 27356 27357 /* 27358 * Function: sr_read_cdda() 27359 * 27360 * Description: This routine is the driver entry point for handling CD-ROM 27361 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27362 * the target supports CDDA these requests are handled via a vendor 27363 * specific command (0xD8) If the target does not support CDDA 27364 * these requests are handled via the READ CD command (0xBE). 27365 * 27366 * Arguments: dev - the device 'dev_t' 27367 * data - pointer to user provided CD-DA structure specifying 27368 * the track starting address, transfer length, and 27369 * subcode options. 27370 * flag - this argument is a pass through to ddi_copyxxx() 27371 * directly from the mode argument of ioctl(). 27372 * 27373 * Return Code: the code returned by sd_send_scsi_cmd() 27374 * EFAULT if ddi_copyxxx() fails 27375 * ENXIO if fail ddi_get_soft_state 27376 * EINVAL if invalid arguments are provided 27377 * ENOTTY 27378 */ 27379 27380 static int 27381 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27382 { 27383 struct sd_lun *un; 27384 struct uscsi_cmd *com; 27385 struct cdrom_cdda *cdda; 27386 int rval; 27387 size_t buflen; 27388 char cdb[CDB_GROUP5]; 27389 27390 #ifdef _MULTI_DATAMODEL 27391 /* To support ILP32 applications in an LP64 world */ 27392 struct cdrom_cdda32 cdrom_cdda32; 27393 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27394 #endif /* _MULTI_DATAMODEL */ 27395 27396 if (data == NULL) { 27397 return (EINVAL); 27398 } 27399 27400 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27401 return (ENXIO); 27402 } 27403 27404 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27405 27406 #ifdef _MULTI_DATAMODEL 27407 switch (ddi_model_convert_from(flag & FMODELS)) { 27408 case DDI_MODEL_ILP32: 27409 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27410 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27411 "sr_read_cdda: ddi_copyin Failed\n"); 27412 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27413 return (EFAULT); 27414 } 27415 /* Convert the ILP32 uscsi data from the application to LP64 */ 27416 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27417 break; 27418 case DDI_MODEL_NONE: 27419 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27420 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27421 "sr_read_cdda: ddi_copyin Failed\n"); 27422 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27423 return (EFAULT); 27424 } 27425 break; 27426 } 27427 #else /* ! _MULTI_DATAMODEL */ 27428 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27429 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27430 "sr_read_cdda: ddi_copyin Failed\n"); 27431 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27432 return (EFAULT); 27433 } 27434 #endif /* _MULTI_DATAMODEL */ 27435 27436 /* 27437 * Since MMC-2 expects max 3 bytes for length, check if the 27438 * length input is greater than 3 bytes 27439 */ 27440 if ((cdda->cdda_length & 0xFF000000) != 0) { 27441 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27442 "cdrom transfer length too large: %d (limit %d)\n", 27443 cdda->cdda_length, 0xFFFFFF); 27444 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27445 return (EINVAL); 27446 } 27447 27448 switch (cdda->cdda_subcode) { 27449 case CDROM_DA_NO_SUBCODE: 27450 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27451 break; 27452 case CDROM_DA_SUBQ: 27453 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27454 break; 27455 case CDROM_DA_ALL_SUBCODE: 27456 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27457 break; 27458 case CDROM_DA_SUBCODE_ONLY: 27459 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27460 break; 27461 default: 27462 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27463 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27464 cdda->cdda_subcode); 27465 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27466 return (EINVAL); 27467 } 27468 27469 /* Build and send the command */ 27470 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27471 bzero(cdb, CDB_GROUP5); 27472 27473 if (un->un_f_cfg_cdda == TRUE) { 27474 cdb[0] = (char)SCMD_READ_CD; 27475 cdb[1] = 0x04; 27476 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27477 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27478 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27479 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27480 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27481 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27482 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27483 cdb[9] = 0x10; 27484 switch (cdda->cdda_subcode) { 27485 case CDROM_DA_NO_SUBCODE : 27486 cdb[10] = 0x0; 27487 break; 27488 case CDROM_DA_SUBQ : 27489 cdb[10] = 0x2; 27490 break; 27491 case CDROM_DA_ALL_SUBCODE : 27492 cdb[10] = 0x1; 27493 break; 27494 case CDROM_DA_SUBCODE_ONLY : 27495 /* FALLTHROUGH */ 27496 default : 27497 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27498 kmem_free(com, sizeof (*com)); 27499 return (ENOTTY); 27500 } 27501 } else { 27502 cdb[0] = (char)SCMD_READ_CDDA; 27503 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27504 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27505 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27506 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27507 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27508 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27509 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27510 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27511 cdb[10] = cdda->cdda_subcode; 27512 } 27513 27514 com->uscsi_cdb = cdb; 27515 com->uscsi_cdblen = CDB_GROUP5; 27516 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27517 com->uscsi_buflen = buflen; 27518 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27519 27520 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27521 SD_PATH_STANDARD); 27522 27523 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27524 kmem_free(com, sizeof (*com)); 27525 return (rval); 27526 } 27527 27528 27529 /* 27530 * Function: sr_read_cdxa() 27531 * 27532 * Description: This routine is the driver entry point for handling CD-ROM 27533 * ioctl requests to return CD-XA (Extended Architecture) data. 27534 * (CDROMCDXA). 27535 * 27536 * Arguments: dev - the device 'dev_t' 27537 * data - pointer to user provided CD-XA structure specifying 27538 * the data starting address, transfer length, and format 27539 * flag - this argument is a pass through to ddi_copyxxx() 27540 * directly from the mode argument of ioctl(). 27541 * 27542 * Return Code: the code returned by sd_send_scsi_cmd() 27543 * EFAULT if ddi_copyxxx() fails 27544 * ENXIO if fail ddi_get_soft_state 27545 * EINVAL if data pointer is NULL 27546 */ 27547 27548 static int 27549 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27550 { 27551 struct sd_lun *un; 27552 struct uscsi_cmd *com; 27553 struct cdrom_cdxa *cdxa; 27554 int rval; 27555 size_t buflen; 27556 char cdb[CDB_GROUP5]; 27557 uchar_t read_flags; 27558 27559 #ifdef _MULTI_DATAMODEL 27560 /* To support ILP32 applications in an LP64 world */ 27561 struct cdrom_cdxa32 cdrom_cdxa32; 27562 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27563 #endif /* _MULTI_DATAMODEL */ 27564 27565 if (data == NULL) { 27566 return (EINVAL); 27567 } 27568 27569 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27570 return (ENXIO); 27571 } 27572 27573 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27574 27575 #ifdef _MULTI_DATAMODEL 27576 switch (ddi_model_convert_from(flag & FMODELS)) { 27577 case DDI_MODEL_ILP32: 27578 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27579 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27580 return (EFAULT); 27581 } 27582 /* 27583 * Convert the ILP32 uscsi data from the 27584 * application to LP64 for internal use. 27585 */ 27586 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27587 break; 27588 case DDI_MODEL_NONE: 27589 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27590 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27591 return (EFAULT); 27592 } 27593 break; 27594 } 27595 #else /* ! _MULTI_DATAMODEL */ 27596 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27597 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27598 return (EFAULT); 27599 } 27600 #endif /* _MULTI_DATAMODEL */ 27601 27602 /* 27603 * Since MMC-2 expects max 3 bytes for length, check if the 27604 * length input is greater than 3 bytes 27605 */ 27606 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27607 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27608 "cdrom transfer length too large: %d (limit %d)\n", 27609 cdxa->cdxa_length, 0xFFFFFF); 27610 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27611 return (EINVAL); 27612 } 27613 27614 switch (cdxa->cdxa_format) { 27615 case CDROM_XA_DATA: 27616 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27617 read_flags = 0x10; 27618 break; 27619 case CDROM_XA_SECTOR_DATA: 27620 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27621 read_flags = 0xf8; 27622 break; 27623 case CDROM_XA_DATA_W_ERROR: 27624 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27625 read_flags = 0xfc; 27626 break; 27627 default: 27628 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27629 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27630 cdxa->cdxa_format); 27631 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27632 return (EINVAL); 27633 } 27634 27635 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27636 bzero(cdb, CDB_GROUP5); 27637 if (un->un_f_mmc_cap == TRUE) { 27638 cdb[0] = (char)SCMD_READ_CD; 27639 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27640 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27641 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27642 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27643 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27644 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27645 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27646 cdb[9] = (char)read_flags; 27647 } else { 27648 /* 27649 * Note: A vendor specific command (0xDB) is being used her to 27650 * request a read of all subcodes. 27651 */ 27652 cdb[0] = (char)SCMD_READ_CDXA; 27653 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27654 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27655 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27656 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27657 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27658 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27659 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27660 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27661 cdb[10] = cdxa->cdxa_format; 27662 } 27663 com->uscsi_cdb = cdb; 27664 com->uscsi_cdblen = CDB_GROUP5; 27665 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27666 com->uscsi_buflen = buflen; 27667 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27668 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27669 SD_PATH_STANDARD); 27670 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27671 kmem_free(com, sizeof (*com)); 27672 return (rval); 27673 } 27674 27675 27676 /* 27677 * Function: sr_eject() 27678 * 27679 * Description: This routine is the driver entry point for handling CD-ROM 27680 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27681 * 27682 * Arguments: dev - the device 'dev_t' 27683 * 27684 * Return Code: the code returned by sd_send_scsi_cmd() 27685 */ 27686 27687 static int 27688 sr_eject(dev_t dev) 27689 { 27690 struct sd_lun *un; 27691 int rval; 27692 sd_ssc_t *ssc; 27693 27694 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27695 (un->un_state == SD_STATE_OFFLINE)) { 27696 return (ENXIO); 27697 } 27698 27699 /* 27700 * To prevent race conditions with the eject 27701 * command, keep track of an eject command as 27702 * it progresses. If we are already handling 27703 * an eject command in the driver for the given 27704 * unit and another request to eject is received 27705 * immediately return EAGAIN so we don't lose 27706 * the command if the current eject command fails. 27707 */ 27708 mutex_enter(SD_MUTEX(un)); 27709 if (un->un_f_ejecting == TRUE) { 27710 mutex_exit(SD_MUTEX(un)); 27711 return (EAGAIN); 27712 } 27713 un->un_f_ejecting = TRUE; 27714 mutex_exit(SD_MUTEX(un)); 27715 27716 ssc = sd_ssc_init(un); 27717 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27718 SD_PATH_STANDARD); 27719 sd_ssc_fini(ssc); 27720 27721 if (rval != 0) { 27722 mutex_enter(SD_MUTEX(un)); 27723 un->un_f_ejecting = FALSE; 27724 mutex_exit(SD_MUTEX(un)); 27725 return (rval); 27726 } 27727 27728 ssc = sd_ssc_init(un); 27729 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27730 SD_PATH_STANDARD); 27731 sd_ssc_fini(ssc); 27732 27733 if (rval == 0) { 27734 mutex_enter(SD_MUTEX(un)); 27735 sr_ejected(un); 27736 un->un_mediastate = DKIO_EJECTED; 27737 un->un_f_ejecting = FALSE; 27738 cv_broadcast(&un->un_state_cv); 27739 mutex_exit(SD_MUTEX(un)); 27740 } else { 27741 mutex_enter(SD_MUTEX(un)); 27742 un->un_f_ejecting = FALSE; 27743 mutex_exit(SD_MUTEX(un)); 27744 } 27745 return (rval); 27746 } 27747 27748 27749 /* 27750 * Function: sr_ejected() 27751 * 27752 * Description: This routine updates the soft state structure to invalidate the 27753 * geometry information after the media has been ejected or a 27754 * media eject has been detected. 27755 * 27756 * Arguments: un - driver soft state (unit) structure 27757 */ 27758 27759 static void 27760 sr_ejected(struct sd_lun *un) 27761 { 27762 struct sd_errstats *stp; 27763 27764 ASSERT(un != NULL); 27765 ASSERT(mutex_owned(SD_MUTEX(un))); 27766 27767 un->un_f_blockcount_is_valid = FALSE; 27768 un->un_f_tgt_blocksize_is_valid = FALSE; 27769 mutex_exit(SD_MUTEX(un)); 27770 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27771 mutex_enter(SD_MUTEX(un)); 27772 27773 if (un->un_errstats != NULL) { 27774 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27775 stp->sd_capacity.value.ui64 = 0; 27776 } 27777 } 27778 27779 27780 /* 27781 * Function: sr_check_wp() 27782 * 27783 * Description: This routine checks the write protection of a removable 27784 * media disk and hotpluggable devices via the write protect bit of 27785 * the Mode Page Header device specific field. Some devices choke 27786 * on unsupported mode page. In order to workaround this issue, 27787 * this routine has been implemented to use 0x3f mode page(request 27788 * for all pages) for all device types. 27789 * 27790 * Arguments: dev - the device 'dev_t' 27791 * 27792 * Return Code: int indicating if the device is write protected (1) or not (0) 27793 * 27794 * Context: Kernel thread. 27795 * 27796 */ 27797 27798 static int 27799 sr_check_wp(dev_t dev) 27800 { 27801 struct sd_lun *un; 27802 uchar_t device_specific; 27803 uchar_t *sense; 27804 int hdrlen; 27805 int rval = FALSE; 27806 int status; 27807 sd_ssc_t *ssc; 27808 27809 /* 27810 * Note: The return codes for this routine should be reworked to 27811 * properly handle the case of a NULL softstate. 27812 */ 27813 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27814 return (FALSE); 27815 } 27816 27817 if (un->un_f_cfg_is_atapi == TRUE) { 27818 /* 27819 * The mode page contents are not required; set the allocation 27820 * length for the mode page header only 27821 */ 27822 hdrlen = MODE_HEADER_LENGTH_GRP2; 27823 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27824 ssc = sd_ssc_init(un); 27825 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27826 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27827 sd_ssc_fini(ssc); 27828 if (status != 0) 27829 goto err_exit; 27830 device_specific = 27831 ((struct mode_header_grp2 *)sense)->device_specific; 27832 } else { 27833 hdrlen = MODE_HEADER_LENGTH; 27834 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27835 ssc = sd_ssc_init(un); 27836 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27837 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27838 sd_ssc_fini(ssc); 27839 if (status != 0) 27840 goto err_exit; 27841 device_specific = 27842 ((struct mode_header *)sense)->device_specific; 27843 } 27844 27845 27846 /* 27847 * Write protect mode sense failed; not all disks 27848 * understand this query. Return FALSE assuming that 27849 * these devices are not writable. 27850 */ 27851 if (device_specific & WRITE_PROTECT) { 27852 rval = TRUE; 27853 } 27854 27855 err_exit: 27856 kmem_free(sense, hdrlen); 27857 return (rval); 27858 } 27859 27860 /* 27861 * Function: sr_volume_ctrl() 27862 * 27863 * Description: This routine is the driver entry point for handling CD-ROM 27864 * audio output volume ioctl requests. (CDROMVOLCTRL) 27865 * 27866 * Arguments: dev - the device 'dev_t' 27867 * data - pointer to user audio volume control structure 27868 * flag - this argument is a pass through to ddi_copyxxx() 27869 * directly from the mode argument of ioctl(). 27870 * 27871 * Return Code: the code returned by sd_send_scsi_cmd() 27872 * EFAULT if ddi_copyxxx() fails 27873 * ENXIO if fail ddi_get_soft_state 27874 * EINVAL if data pointer is NULL 27875 * 27876 */ 27877 27878 static int 27879 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27880 { 27881 struct sd_lun *un; 27882 struct cdrom_volctrl volume; 27883 struct cdrom_volctrl *vol = &volume; 27884 uchar_t *sense_page; 27885 uchar_t *select_page; 27886 uchar_t *sense; 27887 uchar_t *select; 27888 int sense_buflen; 27889 int select_buflen; 27890 int rval; 27891 sd_ssc_t *ssc; 27892 27893 if (data == NULL) { 27894 return (EINVAL); 27895 } 27896 27897 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27898 (un->un_state == SD_STATE_OFFLINE)) { 27899 return (ENXIO); 27900 } 27901 27902 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27903 return (EFAULT); 27904 } 27905 27906 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27907 struct mode_header_grp2 *sense_mhp; 27908 struct mode_header_grp2 *select_mhp; 27909 int bd_len; 27910 27911 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27912 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27913 MODEPAGE_AUDIO_CTRL_LEN; 27914 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27915 select = kmem_zalloc(select_buflen, KM_SLEEP); 27916 ssc = sd_ssc_init(un); 27917 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27918 sense_buflen, MODEPAGE_AUDIO_CTRL, 27919 SD_PATH_STANDARD); 27920 sd_ssc_fini(ssc); 27921 27922 if (rval != 0) { 27923 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27924 "sr_volume_ctrl: Mode Sense Failed\n"); 27925 kmem_free(sense, sense_buflen); 27926 kmem_free(select, select_buflen); 27927 return (rval); 27928 } 27929 sense_mhp = (struct mode_header_grp2 *)sense; 27930 select_mhp = (struct mode_header_grp2 *)select; 27931 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27932 sense_mhp->bdesc_length_lo; 27933 if (bd_len > MODE_BLK_DESC_LENGTH) { 27934 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27935 "sr_volume_ctrl: Mode Sense returned invalid " 27936 "block descriptor length\n"); 27937 kmem_free(sense, sense_buflen); 27938 kmem_free(select, select_buflen); 27939 return (EIO); 27940 } 27941 sense_page = (uchar_t *) 27942 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27943 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27944 select_mhp->length_msb = 0; 27945 select_mhp->length_lsb = 0; 27946 select_mhp->bdesc_length_hi = 0; 27947 select_mhp->bdesc_length_lo = 0; 27948 } else { 27949 struct mode_header *sense_mhp, *select_mhp; 27950 27951 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27952 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27953 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27954 select = kmem_zalloc(select_buflen, KM_SLEEP); 27955 ssc = sd_ssc_init(un); 27956 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27957 sense_buflen, MODEPAGE_AUDIO_CTRL, 27958 SD_PATH_STANDARD); 27959 sd_ssc_fini(ssc); 27960 27961 if (rval != 0) { 27962 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27963 "sr_volume_ctrl: Mode Sense Failed\n"); 27964 kmem_free(sense, sense_buflen); 27965 kmem_free(select, select_buflen); 27966 return (rval); 27967 } 27968 sense_mhp = (struct mode_header *)sense; 27969 select_mhp = (struct mode_header *)select; 27970 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27972 "sr_volume_ctrl: Mode Sense returned invalid " 27973 "block descriptor length\n"); 27974 kmem_free(sense, sense_buflen); 27975 kmem_free(select, select_buflen); 27976 return (EIO); 27977 } 27978 sense_page = (uchar_t *) 27979 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27980 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27981 select_mhp->length = 0; 27982 select_mhp->bdesc_length = 0; 27983 } 27984 /* 27985 * Note: An audio control data structure could be created and overlayed 27986 * on the following in place of the array indexing method implemented. 27987 */ 27988 27989 /* Build the select data for the user volume data */ 27990 select_page[0] = MODEPAGE_AUDIO_CTRL; 27991 select_page[1] = 0xE; 27992 /* Set the immediate bit */ 27993 select_page[2] = 0x04; 27994 /* Zero out reserved fields */ 27995 select_page[3] = 0x00; 27996 select_page[4] = 0x00; 27997 /* Return sense data for fields not to be modified */ 27998 select_page[5] = sense_page[5]; 27999 select_page[6] = sense_page[6]; 28000 select_page[7] = sense_page[7]; 28001 /* Set the user specified volume levels for channel 0 and 1 */ 28002 select_page[8] = 0x01; 28003 select_page[9] = vol->channel0; 28004 select_page[10] = 0x02; 28005 select_page[11] = vol->channel1; 28006 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28007 select_page[12] = sense_page[12]; 28008 select_page[13] = sense_page[13]; 28009 select_page[14] = sense_page[14]; 28010 select_page[15] = sense_page[15]; 28011 28012 ssc = sd_ssc_init(un); 28013 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28014 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 28015 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28016 } else { 28017 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 28018 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28019 } 28020 sd_ssc_fini(ssc); 28021 28022 kmem_free(sense, sense_buflen); 28023 kmem_free(select, select_buflen); 28024 return (rval); 28025 } 28026 28027 28028 /* 28029 * Function: sr_read_sony_session_offset() 28030 * 28031 * Description: This routine is the driver entry point for handling CD-ROM 28032 * ioctl requests for session offset information. (CDROMREADOFFSET) 28033 * The address of the first track in the last session of a 28034 * multi-session CD-ROM is returned 28035 * 28036 * Note: This routine uses a vendor specific key value in the 28037 * command control field without implementing any vendor check here 28038 * or in the ioctl routine. 28039 * 28040 * Arguments: dev - the device 'dev_t' 28041 * data - pointer to an int to hold the requested address 28042 * flag - this argument is a pass through to ddi_copyxxx() 28043 * directly from the mode argument of ioctl(). 28044 * 28045 * Return Code: the code returned by sd_send_scsi_cmd() 28046 * EFAULT if ddi_copyxxx() fails 28047 * ENXIO if fail ddi_get_soft_state 28048 * EINVAL if data pointer is NULL 28049 */ 28050 28051 static int 28052 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28053 { 28054 struct sd_lun *un; 28055 struct uscsi_cmd *com; 28056 caddr_t buffer; 28057 char cdb[CDB_GROUP1]; 28058 int session_offset = 0; 28059 int rval; 28060 28061 if (data == NULL) { 28062 return (EINVAL); 28063 } 28064 28065 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28066 (un->un_state == SD_STATE_OFFLINE)) { 28067 return (ENXIO); 28068 } 28069 28070 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28071 bzero(cdb, CDB_GROUP1); 28072 cdb[0] = SCMD_READ_TOC; 28073 /* 28074 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28075 * (4 byte TOC response header + 8 byte response data) 28076 */ 28077 cdb[8] = SONY_SESSION_OFFSET_LEN; 28078 /* Byte 9 is the control byte. A vendor specific value is used */ 28079 cdb[9] = SONY_SESSION_OFFSET_KEY; 28080 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28081 com->uscsi_cdb = cdb; 28082 com->uscsi_cdblen = CDB_GROUP1; 28083 com->uscsi_bufaddr = buffer; 28084 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28085 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28086 28087 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28088 SD_PATH_STANDARD); 28089 if (rval != 0) { 28090 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28091 kmem_free(com, sizeof (*com)); 28092 return (rval); 28093 } 28094 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28095 session_offset = 28096 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28097 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28098 /* 28099 * Offset returned offset in current lbasize block's. Convert to 28100 * 2k block's to return to the user 28101 */ 28102 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28103 session_offset >>= 2; 28104 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28105 session_offset >>= 1; 28106 } 28107 } 28108 28109 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28110 rval = EFAULT; 28111 } 28112 28113 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28114 kmem_free(com, sizeof (*com)); 28115 return (rval); 28116 } 28117 28118 28119 /* 28120 * Function: sd_wm_cache_constructor() 28121 * 28122 * Description: Cache Constructor for the wmap cache for the read/modify/write 28123 * devices. 28124 * 28125 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28126 * un - sd_lun structure for the device. 28127 * flag - the km flags passed to constructor 28128 * 28129 * Return Code: 0 on success. 28130 * -1 on failure. 28131 */ 28132 28133 /*ARGSUSED*/ 28134 static int 28135 sd_wm_cache_constructor(void *wm, void *un, int flags) 28136 { 28137 bzero(wm, sizeof (struct sd_w_map)); 28138 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28139 return (0); 28140 } 28141 28142 28143 /* 28144 * Function: sd_wm_cache_destructor() 28145 * 28146 * Description: Cache destructor for the wmap cache for the read/modify/write 28147 * devices. 28148 * 28149 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28150 * un - sd_lun structure for the device. 28151 */ 28152 /*ARGSUSED*/ 28153 static void 28154 sd_wm_cache_destructor(void *wm, void *un) 28155 { 28156 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28157 } 28158 28159 28160 /* 28161 * Function: sd_range_lock() 28162 * 28163 * Description: Lock the range of blocks specified as parameter to ensure 28164 * that read, modify write is atomic and no other i/o writes 28165 * to the same location. The range is specified in terms 28166 * of start and end blocks. Block numbers are the actual 28167 * media block numbers and not system. 28168 * 28169 * Arguments: un - sd_lun structure for the device. 28170 * startb - The starting block number 28171 * endb - The end block number 28172 * typ - type of i/o - simple/read_modify_write 28173 * 28174 * Return Code: wm - pointer to the wmap structure. 28175 * 28176 * Context: This routine can sleep. 28177 */ 28178 28179 static struct sd_w_map * 28180 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28181 { 28182 struct sd_w_map *wmp = NULL; 28183 struct sd_w_map *sl_wmp = NULL; 28184 struct sd_w_map *tmp_wmp; 28185 wm_state state = SD_WM_CHK_LIST; 28186 28187 28188 ASSERT(un != NULL); 28189 ASSERT(!mutex_owned(SD_MUTEX(un))); 28190 28191 mutex_enter(SD_MUTEX(un)); 28192 28193 while (state != SD_WM_DONE) { 28194 28195 switch (state) { 28196 case SD_WM_CHK_LIST: 28197 /* 28198 * This is the starting state. Check the wmap list 28199 * to see if the range is currently available. 28200 */ 28201 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28202 /* 28203 * If this is a simple write and no rmw 28204 * i/o is pending then try to lock the 28205 * range as the range should be available. 28206 */ 28207 state = SD_WM_LOCK_RANGE; 28208 } else { 28209 tmp_wmp = sd_get_range(un, startb, endb); 28210 if (tmp_wmp != NULL) { 28211 if ((wmp != NULL) && ONLIST(un, wmp)) { 28212 /* 28213 * Should not keep onlist wmps 28214 * while waiting this macro 28215 * will also do wmp = NULL; 28216 */ 28217 FREE_ONLIST_WMAP(un, wmp); 28218 } 28219 /* 28220 * sl_wmp is the wmap on which wait 28221 * is done, since the tmp_wmp points 28222 * to the inuse wmap, set sl_wmp to 28223 * tmp_wmp and change the state to sleep 28224 */ 28225 sl_wmp = tmp_wmp; 28226 state = SD_WM_WAIT_MAP; 28227 } else { 28228 state = SD_WM_LOCK_RANGE; 28229 } 28230 28231 } 28232 break; 28233 28234 case SD_WM_LOCK_RANGE: 28235 ASSERT(un->un_wm_cache); 28236 /* 28237 * The range need to be locked, try to get a wmap. 28238 * First attempt it with NO_SLEEP, want to avoid a sleep 28239 * if possible as we will have to release the sd mutex 28240 * if we have to sleep. 28241 */ 28242 if (wmp == NULL) 28243 wmp = kmem_cache_alloc(un->un_wm_cache, 28244 KM_NOSLEEP); 28245 if (wmp == NULL) { 28246 mutex_exit(SD_MUTEX(un)); 28247 _NOTE(DATA_READABLE_WITHOUT_LOCK 28248 (sd_lun::un_wm_cache)) 28249 wmp = kmem_cache_alloc(un->un_wm_cache, 28250 KM_SLEEP); 28251 mutex_enter(SD_MUTEX(un)); 28252 /* 28253 * we released the mutex so recheck and go to 28254 * check list state. 28255 */ 28256 state = SD_WM_CHK_LIST; 28257 } else { 28258 /* 28259 * We exit out of state machine since we 28260 * have the wmap. Do the housekeeping first. 28261 * place the wmap on the wmap list if it is not 28262 * on it already and then set the state to done. 28263 */ 28264 wmp->wm_start = startb; 28265 wmp->wm_end = endb; 28266 wmp->wm_flags = typ | SD_WM_BUSY; 28267 if (typ & SD_WTYPE_RMW) { 28268 un->un_rmw_count++; 28269 } 28270 /* 28271 * If not already on the list then link 28272 */ 28273 if (!ONLIST(un, wmp)) { 28274 wmp->wm_next = un->un_wm; 28275 wmp->wm_prev = NULL; 28276 if (wmp->wm_next) 28277 wmp->wm_next->wm_prev = wmp; 28278 un->un_wm = wmp; 28279 } 28280 state = SD_WM_DONE; 28281 } 28282 break; 28283 28284 case SD_WM_WAIT_MAP: 28285 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28286 /* 28287 * Wait is done on sl_wmp, which is set in the 28288 * check_list state. 28289 */ 28290 sl_wmp->wm_wanted_count++; 28291 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28292 sl_wmp->wm_wanted_count--; 28293 /* 28294 * We can reuse the memory from the completed sl_wmp 28295 * lock range for our new lock, but only if noone is 28296 * waiting for it. 28297 */ 28298 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28299 if (sl_wmp->wm_wanted_count == 0) { 28300 if (wmp != NULL) 28301 CHK_N_FREEWMP(un, wmp); 28302 wmp = sl_wmp; 28303 } 28304 sl_wmp = NULL; 28305 /* 28306 * After waking up, need to recheck for availability of 28307 * range. 28308 */ 28309 state = SD_WM_CHK_LIST; 28310 break; 28311 28312 default: 28313 panic("sd_range_lock: " 28314 "Unknown state %d in sd_range_lock", state); 28315 /*NOTREACHED*/ 28316 } /* switch(state) */ 28317 28318 } /* while(state != SD_WM_DONE) */ 28319 28320 mutex_exit(SD_MUTEX(un)); 28321 28322 ASSERT(wmp != NULL); 28323 28324 return (wmp); 28325 } 28326 28327 28328 /* 28329 * Function: sd_get_range() 28330 * 28331 * Description: Find if there any overlapping I/O to this one 28332 * Returns the write-map of 1st such I/O, NULL otherwise. 28333 * 28334 * Arguments: un - sd_lun structure for the device. 28335 * startb - The starting block number 28336 * endb - The end block number 28337 * 28338 * Return Code: wm - pointer to the wmap structure. 28339 */ 28340 28341 static struct sd_w_map * 28342 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28343 { 28344 struct sd_w_map *wmp; 28345 28346 ASSERT(un != NULL); 28347 28348 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28349 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28350 continue; 28351 } 28352 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28353 break; 28354 } 28355 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28356 break; 28357 } 28358 } 28359 28360 return (wmp); 28361 } 28362 28363 28364 /* 28365 * Function: sd_free_inlist_wmap() 28366 * 28367 * Description: Unlink and free a write map struct. 28368 * 28369 * Arguments: un - sd_lun structure for the device. 28370 * wmp - sd_w_map which needs to be unlinked. 28371 */ 28372 28373 static void 28374 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28375 { 28376 ASSERT(un != NULL); 28377 28378 if (un->un_wm == wmp) { 28379 un->un_wm = wmp->wm_next; 28380 } else { 28381 wmp->wm_prev->wm_next = wmp->wm_next; 28382 } 28383 28384 if (wmp->wm_next) { 28385 wmp->wm_next->wm_prev = wmp->wm_prev; 28386 } 28387 28388 wmp->wm_next = wmp->wm_prev = NULL; 28389 28390 kmem_cache_free(un->un_wm_cache, wmp); 28391 } 28392 28393 28394 /* 28395 * Function: sd_range_unlock() 28396 * 28397 * Description: Unlock the range locked by wm. 28398 * Free write map if nobody else is waiting on it. 28399 * 28400 * Arguments: un - sd_lun structure for the device. 28401 * wmp - sd_w_map which needs to be unlinked. 28402 */ 28403 28404 static void 28405 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28406 { 28407 ASSERT(un != NULL); 28408 ASSERT(wm != NULL); 28409 ASSERT(!mutex_owned(SD_MUTEX(un))); 28410 28411 mutex_enter(SD_MUTEX(un)); 28412 28413 if (wm->wm_flags & SD_WTYPE_RMW) { 28414 un->un_rmw_count--; 28415 } 28416 28417 if (wm->wm_wanted_count) { 28418 wm->wm_flags = 0; 28419 /* 28420 * Broadcast that the wmap is available now. 28421 */ 28422 cv_broadcast(&wm->wm_avail); 28423 } else { 28424 /* 28425 * If no one is waiting on the map, it should be free'ed. 28426 */ 28427 sd_free_inlist_wmap(un, wm); 28428 } 28429 28430 mutex_exit(SD_MUTEX(un)); 28431 } 28432 28433 28434 /* 28435 * Function: sd_read_modify_write_task 28436 * 28437 * Description: Called from a taskq thread to initiate the write phase of 28438 * a read-modify-write request. This is used for targets where 28439 * un->un_sys_blocksize != un->un_tgt_blocksize. 28440 * 28441 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28442 * 28443 * Context: Called under taskq thread context. 28444 */ 28445 28446 static void 28447 sd_read_modify_write_task(void *arg) 28448 { 28449 struct sd_mapblocksize_info *bsp; 28450 struct buf *bp; 28451 struct sd_xbuf *xp; 28452 struct sd_lun *un; 28453 28454 bp = arg; /* The bp is given in arg */ 28455 ASSERT(bp != NULL); 28456 28457 /* Get the pointer to the layer-private data struct */ 28458 xp = SD_GET_XBUF(bp); 28459 ASSERT(xp != NULL); 28460 bsp = xp->xb_private; 28461 ASSERT(bsp != NULL); 28462 28463 un = SD_GET_UN(bp); 28464 ASSERT(un != NULL); 28465 ASSERT(!mutex_owned(SD_MUTEX(un))); 28466 28467 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28468 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28469 28470 /* 28471 * This is the write phase of a read-modify-write request, called 28472 * under the context of a taskq thread in response to the completion 28473 * of the read portion of the rmw request completing under interrupt 28474 * context. The write request must be sent from here down the iostart 28475 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28476 * we use the layer index saved in the layer-private data area. 28477 */ 28478 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28479 28480 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28481 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28482 } 28483 28484 28485 /* 28486 * Function: sddump_do_read_of_rmw() 28487 * 28488 * Description: This routine will be called from sddump, If sddump is called 28489 * with an I/O which not aligned on device blocksize boundary 28490 * then the write has to be converted to read-modify-write. 28491 * Do the read part here in order to keep sddump simple. 28492 * Note - That the sd_mutex is held across the call to this 28493 * routine. 28494 * 28495 * Arguments: un - sd_lun 28496 * blkno - block number in terms of media block size. 28497 * nblk - number of blocks. 28498 * bpp - pointer to pointer to the buf structure. On return 28499 * from this function, *bpp points to the valid buffer 28500 * to which the write has to be done. 28501 * 28502 * Return Code: 0 for success or errno-type return code 28503 */ 28504 28505 static int 28506 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28507 struct buf **bpp) 28508 { 28509 int err; 28510 int i; 28511 int rval; 28512 struct buf *bp; 28513 struct scsi_pkt *pkt = NULL; 28514 uint32_t target_blocksize; 28515 28516 ASSERT(un != NULL); 28517 ASSERT(mutex_owned(SD_MUTEX(un))); 28518 28519 target_blocksize = un->un_tgt_blocksize; 28520 28521 mutex_exit(SD_MUTEX(un)); 28522 28523 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28524 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28525 if (bp == NULL) { 28526 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28527 "no resources for dumping; giving up"); 28528 err = ENOMEM; 28529 goto done; 28530 } 28531 28532 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28533 blkno, nblk); 28534 if (rval != 0) { 28535 scsi_free_consistent_buf(bp); 28536 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28537 "no resources for dumping; giving up"); 28538 err = ENOMEM; 28539 goto done; 28540 } 28541 28542 pkt->pkt_flags |= FLAG_NOINTR; 28543 28544 err = EIO; 28545 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28546 28547 /* 28548 * Scsi_poll returns 0 (success) if the command completes and 28549 * the status block is STATUS_GOOD. We should only check 28550 * errors if this condition is not true. Even then we should 28551 * send our own request sense packet only if we have a check 28552 * condition and auto request sense has not been performed by 28553 * the hba. 28554 */ 28555 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28556 28557 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28558 err = 0; 28559 break; 28560 } 28561 28562 /* 28563 * Check CMD_DEV_GONE 1st, give up if device is gone, 28564 * no need to read RQS data. 28565 */ 28566 if (pkt->pkt_reason == CMD_DEV_GONE) { 28567 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28568 "Error while dumping state with rmw..." 28569 "Device is gone\n"); 28570 break; 28571 } 28572 28573 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28574 SD_INFO(SD_LOG_DUMP, un, 28575 "sddump: read failed with CHECK, try # %d\n", i); 28576 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28577 (void) sd_send_polled_RQS(un); 28578 } 28579 28580 continue; 28581 } 28582 28583 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28584 int reset_retval = 0; 28585 28586 SD_INFO(SD_LOG_DUMP, un, 28587 "sddump: read failed with BUSY, try # %d\n", i); 28588 28589 if (un->un_f_lun_reset_enabled == TRUE) { 28590 reset_retval = scsi_reset(SD_ADDRESS(un), 28591 RESET_LUN); 28592 } 28593 if (reset_retval == 0) { 28594 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28595 } 28596 (void) sd_send_polled_RQS(un); 28597 28598 } else { 28599 SD_INFO(SD_LOG_DUMP, un, 28600 "sddump: read failed with 0x%x, try # %d\n", 28601 SD_GET_PKT_STATUS(pkt), i); 28602 mutex_enter(SD_MUTEX(un)); 28603 sd_reset_target(un, pkt); 28604 mutex_exit(SD_MUTEX(un)); 28605 } 28606 28607 /* 28608 * If we are not getting anywhere with lun/target resets, 28609 * let's reset the bus. 28610 */ 28611 if (i > SD_NDUMP_RETRIES/2) { 28612 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28613 (void) sd_send_polled_RQS(un); 28614 } 28615 28616 } 28617 scsi_destroy_pkt(pkt); 28618 28619 if (err != 0) { 28620 scsi_free_consistent_buf(bp); 28621 *bpp = NULL; 28622 } else { 28623 *bpp = bp; 28624 } 28625 28626 done: 28627 mutex_enter(SD_MUTEX(un)); 28628 return (err); 28629 } 28630 28631 28632 /* 28633 * Function: sd_failfast_flushq 28634 * 28635 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28636 * in b_flags and move them onto the failfast queue, then kick 28637 * off a thread to return all bp's on the failfast queue to 28638 * their owners with an error set. 28639 * 28640 * Arguments: un - pointer to the soft state struct for the instance. 28641 * 28642 * Context: may execute in interrupt context. 28643 */ 28644 28645 static void 28646 sd_failfast_flushq(struct sd_lun *un) 28647 { 28648 struct buf *bp; 28649 struct buf *next_waitq_bp; 28650 struct buf *prev_waitq_bp = NULL; 28651 28652 ASSERT(un != NULL); 28653 ASSERT(mutex_owned(SD_MUTEX(un))); 28654 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28655 ASSERT(un->un_failfast_bp == NULL); 28656 28657 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28658 "sd_failfast_flushq: entry: un:0x%p\n", un); 28659 28660 /* 28661 * Check if we should flush all bufs when entering failfast state, or 28662 * just those with B_FAILFAST set. 28663 */ 28664 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28665 /* 28666 * Move *all* bp's on the wait queue to the failfast flush 28667 * queue, including those that do NOT have B_FAILFAST set. 28668 */ 28669 if (un->un_failfast_headp == NULL) { 28670 ASSERT(un->un_failfast_tailp == NULL); 28671 un->un_failfast_headp = un->un_waitq_headp; 28672 } else { 28673 ASSERT(un->un_failfast_tailp != NULL); 28674 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28675 } 28676 28677 un->un_failfast_tailp = un->un_waitq_tailp; 28678 28679 /* update kstat for each bp moved out of the waitq */ 28680 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28681 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28682 } 28683 28684 /* empty the waitq */ 28685 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28686 28687 } else { 28688 /* 28689 * Go thru the wait queue, pick off all entries with 28690 * B_FAILFAST set, and move these onto the failfast queue. 28691 */ 28692 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28693 /* 28694 * Save the pointer to the next bp on the wait queue, 28695 * so we get to it on the next iteration of this loop. 28696 */ 28697 next_waitq_bp = bp->av_forw; 28698 28699 /* 28700 * If this bp from the wait queue does NOT have 28701 * B_FAILFAST set, just move on to the next element 28702 * in the wait queue. Note, this is the only place 28703 * where it is correct to set prev_waitq_bp. 28704 */ 28705 if ((bp->b_flags & B_FAILFAST) == 0) { 28706 prev_waitq_bp = bp; 28707 continue; 28708 } 28709 28710 /* 28711 * Remove the bp from the wait queue. 28712 */ 28713 if (bp == un->un_waitq_headp) { 28714 /* The bp is the first element of the waitq. */ 28715 un->un_waitq_headp = next_waitq_bp; 28716 if (un->un_waitq_headp == NULL) { 28717 /* The wait queue is now empty */ 28718 un->un_waitq_tailp = NULL; 28719 } 28720 } else { 28721 /* 28722 * The bp is either somewhere in the middle 28723 * or at the end of the wait queue. 28724 */ 28725 ASSERT(un->un_waitq_headp != NULL); 28726 ASSERT(prev_waitq_bp != NULL); 28727 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28728 == 0); 28729 if (bp == un->un_waitq_tailp) { 28730 /* bp is the last entry on the waitq. */ 28731 ASSERT(next_waitq_bp == NULL); 28732 un->un_waitq_tailp = prev_waitq_bp; 28733 } 28734 prev_waitq_bp->av_forw = next_waitq_bp; 28735 } 28736 bp->av_forw = NULL; 28737 28738 /* 28739 * update kstat since the bp is moved out of 28740 * the waitq 28741 */ 28742 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28743 28744 /* 28745 * Now put the bp onto the failfast queue. 28746 */ 28747 if (un->un_failfast_headp == NULL) { 28748 /* failfast queue is currently empty */ 28749 ASSERT(un->un_failfast_tailp == NULL); 28750 un->un_failfast_headp = 28751 un->un_failfast_tailp = bp; 28752 } else { 28753 /* Add the bp to the end of the failfast q */ 28754 ASSERT(un->un_failfast_tailp != NULL); 28755 ASSERT(un->un_failfast_tailp->b_flags & 28756 B_FAILFAST); 28757 un->un_failfast_tailp->av_forw = bp; 28758 un->un_failfast_tailp = bp; 28759 } 28760 } 28761 } 28762 28763 /* 28764 * Now return all bp's on the failfast queue to their owners. 28765 */ 28766 while ((bp = un->un_failfast_headp) != NULL) { 28767 28768 un->un_failfast_headp = bp->av_forw; 28769 if (un->un_failfast_headp == NULL) { 28770 un->un_failfast_tailp = NULL; 28771 } 28772 28773 /* 28774 * We want to return the bp with a failure error code, but 28775 * we do not want a call to sd_start_cmds() to occur here, 28776 * so use sd_return_failed_command_no_restart() instead of 28777 * sd_return_failed_command(). 28778 */ 28779 sd_return_failed_command_no_restart(un, bp, EIO); 28780 } 28781 28782 /* Flush the xbuf queues if required. */ 28783 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28784 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28785 } 28786 28787 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28788 "sd_failfast_flushq: exit: un:0x%p\n", un); 28789 } 28790 28791 28792 /* 28793 * Function: sd_failfast_flushq_callback 28794 * 28795 * Description: Return TRUE if the given bp meets the criteria for failfast 28796 * flushing. Used with ddi_xbuf_flushq(9F). 28797 * 28798 * Arguments: bp - ptr to buf struct to be examined. 28799 * 28800 * Context: Any 28801 */ 28802 28803 static int 28804 sd_failfast_flushq_callback(struct buf *bp) 28805 { 28806 /* 28807 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28808 * state is entered; OR (2) the given bp has B_FAILFAST set. 28809 */ 28810 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28811 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28812 } 28813 28814 28815 28816 /* 28817 * Function: sd_setup_next_xfer 28818 * 28819 * Description: Prepare next I/O operation using DMA_PARTIAL 28820 * 28821 */ 28822 28823 static int 28824 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28825 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28826 { 28827 ssize_t num_blks_not_xfered; 28828 daddr_t strt_blk_num; 28829 ssize_t bytes_not_xfered; 28830 int rval; 28831 28832 ASSERT(pkt->pkt_resid == 0); 28833 28834 /* 28835 * Calculate next block number and amount to be transferred. 28836 * 28837 * How much data NOT transfered to the HBA yet. 28838 */ 28839 bytes_not_xfered = xp->xb_dma_resid; 28840 28841 /* 28842 * figure how many blocks NOT transfered to the HBA yet. 28843 */ 28844 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28845 28846 /* 28847 * set starting block number to the end of what WAS transfered. 28848 */ 28849 strt_blk_num = xp->xb_blkno + 28850 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28851 28852 /* 28853 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28854 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28855 * the disk mutex here. 28856 */ 28857 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28858 strt_blk_num, num_blks_not_xfered); 28859 28860 if (rval == 0) { 28861 28862 /* 28863 * Success. 28864 * 28865 * Adjust things if there are still more blocks to be 28866 * transfered. 28867 */ 28868 xp->xb_dma_resid = pkt->pkt_resid; 28869 pkt->pkt_resid = 0; 28870 28871 return (1); 28872 } 28873 28874 /* 28875 * There's really only one possible return value from 28876 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28877 * returns NULL. 28878 */ 28879 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28880 28881 bp->b_resid = bp->b_bcount; 28882 bp->b_flags |= B_ERROR; 28883 28884 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28885 "Error setting up next portion of DMA transfer\n"); 28886 28887 return (0); 28888 } 28889 28890 /* 28891 * Function: sd_panic_for_res_conflict 28892 * 28893 * Description: Call panic with a string formatted with "Reservation Conflict" 28894 * and a human readable identifier indicating the SD instance 28895 * that experienced the reservation conflict. 28896 * 28897 * Arguments: un - pointer to the soft state struct for the instance. 28898 * 28899 * Context: may execute in interrupt context. 28900 */ 28901 28902 #define SD_RESV_CONFLICT_FMT_LEN 40 28903 void 28904 sd_panic_for_res_conflict(struct sd_lun *un) 28905 { 28906 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28907 char path_str[MAXPATHLEN]; 28908 28909 (void) snprintf(panic_str, sizeof (panic_str), 28910 "Reservation Conflict\nDisk: %s", 28911 ddi_pathname(SD_DEVINFO(un), path_str)); 28912 28913 panic(panic_str); 28914 } 28915 28916 /* 28917 * Note: The following sd_faultinjection_ioctl( ) routines implement 28918 * driver support for handling fault injection for error analysis 28919 * causing faults in multiple layers of the driver. 28920 * 28921 */ 28922 28923 #ifdef SD_FAULT_INJECTION 28924 static uint_t sd_fault_injection_on = 0; 28925 28926 /* 28927 * Function: sd_faultinjection_ioctl() 28928 * 28929 * Description: This routine is the driver entry point for handling 28930 * faultinjection ioctls to inject errors into the 28931 * layer model 28932 * 28933 * Arguments: cmd - the ioctl cmd received 28934 * arg - the arguments from user and returns 28935 */ 28936 28937 static void 28938 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28939 28940 uint_t i = 0; 28941 uint_t rval; 28942 28943 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28944 28945 mutex_enter(SD_MUTEX(un)); 28946 28947 switch (cmd) { 28948 case SDIOCRUN: 28949 /* Allow pushed faults to be injected */ 28950 SD_INFO(SD_LOG_SDTEST, un, 28951 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28952 28953 sd_fault_injection_on = 1; 28954 28955 SD_INFO(SD_LOG_IOERR, un, 28956 "sd_faultinjection_ioctl: run finished\n"); 28957 break; 28958 28959 case SDIOCSTART: 28960 /* Start Injection Session */ 28961 SD_INFO(SD_LOG_SDTEST, un, 28962 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28963 28964 sd_fault_injection_on = 0; 28965 un->sd_injection_mask = 0xFFFFFFFF; 28966 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28967 un->sd_fi_fifo_pkt[i] = NULL; 28968 un->sd_fi_fifo_xb[i] = NULL; 28969 un->sd_fi_fifo_un[i] = NULL; 28970 un->sd_fi_fifo_arq[i] = NULL; 28971 } 28972 un->sd_fi_fifo_start = 0; 28973 un->sd_fi_fifo_end = 0; 28974 28975 mutex_enter(&(un->un_fi_mutex)); 28976 un->sd_fi_log[0] = '\0'; 28977 un->sd_fi_buf_len = 0; 28978 mutex_exit(&(un->un_fi_mutex)); 28979 28980 SD_INFO(SD_LOG_IOERR, un, 28981 "sd_faultinjection_ioctl: start finished\n"); 28982 break; 28983 28984 case SDIOCSTOP: 28985 /* Stop Injection Session */ 28986 SD_INFO(SD_LOG_SDTEST, un, 28987 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28988 sd_fault_injection_on = 0; 28989 un->sd_injection_mask = 0x0; 28990 28991 /* Empty stray or unuseds structs from fifo */ 28992 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28993 if (un->sd_fi_fifo_pkt[i] != NULL) { 28994 kmem_free(un->sd_fi_fifo_pkt[i], 28995 sizeof (struct sd_fi_pkt)); 28996 } 28997 if (un->sd_fi_fifo_xb[i] != NULL) { 28998 kmem_free(un->sd_fi_fifo_xb[i], 28999 sizeof (struct sd_fi_xb)); 29000 } 29001 if (un->sd_fi_fifo_un[i] != NULL) { 29002 kmem_free(un->sd_fi_fifo_un[i], 29003 sizeof (struct sd_fi_un)); 29004 } 29005 if (un->sd_fi_fifo_arq[i] != NULL) { 29006 kmem_free(un->sd_fi_fifo_arq[i], 29007 sizeof (struct sd_fi_arq)); 29008 } 29009 un->sd_fi_fifo_pkt[i] = NULL; 29010 un->sd_fi_fifo_un[i] = NULL; 29011 un->sd_fi_fifo_xb[i] = NULL; 29012 un->sd_fi_fifo_arq[i] = NULL; 29013 } 29014 un->sd_fi_fifo_start = 0; 29015 un->sd_fi_fifo_end = 0; 29016 29017 SD_INFO(SD_LOG_IOERR, un, 29018 "sd_faultinjection_ioctl: stop finished\n"); 29019 break; 29020 29021 case SDIOCINSERTPKT: 29022 /* Store a packet struct to be pushed onto fifo */ 29023 SD_INFO(SD_LOG_SDTEST, un, 29024 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29025 29026 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29027 29028 sd_fault_injection_on = 0; 29029 29030 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29031 if (un->sd_fi_fifo_pkt[i] != NULL) { 29032 kmem_free(un->sd_fi_fifo_pkt[i], 29033 sizeof (struct sd_fi_pkt)); 29034 } 29035 if (arg != NULL) { 29036 un->sd_fi_fifo_pkt[i] = 29037 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29038 if (un->sd_fi_fifo_pkt[i] == NULL) { 29039 /* Alloc failed don't store anything */ 29040 break; 29041 } 29042 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29043 sizeof (struct sd_fi_pkt), 0); 29044 if (rval == -1) { 29045 kmem_free(un->sd_fi_fifo_pkt[i], 29046 sizeof (struct sd_fi_pkt)); 29047 un->sd_fi_fifo_pkt[i] = NULL; 29048 } 29049 } else { 29050 SD_INFO(SD_LOG_IOERR, un, 29051 "sd_faultinjection_ioctl: pkt null\n"); 29052 } 29053 break; 29054 29055 case SDIOCINSERTXB: 29056 /* Store a xb struct to be pushed onto fifo */ 29057 SD_INFO(SD_LOG_SDTEST, un, 29058 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29059 29060 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29061 29062 sd_fault_injection_on = 0; 29063 29064 if (un->sd_fi_fifo_xb[i] != NULL) { 29065 kmem_free(un->sd_fi_fifo_xb[i], 29066 sizeof (struct sd_fi_xb)); 29067 un->sd_fi_fifo_xb[i] = NULL; 29068 } 29069 if (arg != NULL) { 29070 un->sd_fi_fifo_xb[i] = 29071 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29072 if (un->sd_fi_fifo_xb[i] == NULL) { 29073 /* Alloc failed don't store anything */ 29074 break; 29075 } 29076 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29077 sizeof (struct sd_fi_xb), 0); 29078 29079 if (rval == -1) { 29080 kmem_free(un->sd_fi_fifo_xb[i], 29081 sizeof (struct sd_fi_xb)); 29082 un->sd_fi_fifo_xb[i] = NULL; 29083 } 29084 } else { 29085 SD_INFO(SD_LOG_IOERR, un, 29086 "sd_faultinjection_ioctl: xb null\n"); 29087 } 29088 break; 29089 29090 case SDIOCINSERTUN: 29091 /* Store a un struct to be pushed onto fifo */ 29092 SD_INFO(SD_LOG_SDTEST, un, 29093 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29094 29095 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29096 29097 sd_fault_injection_on = 0; 29098 29099 if (un->sd_fi_fifo_un[i] != NULL) { 29100 kmem_free(un->sd_fi_fifo_un[i], 29101 sizeof (struct sd_fi_un)); 29102 un->sd_fi_fifo_un[i] = NULL; 29103 } 29104 if (arg != NULL) { 29105 un->sd_fi_fifo_un[i] = 29106 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29107 if (un->sd_fi_fifo_un[i] == NULL) { 29108 /* Alloc failed don't store anything */ 29109 break; 29110 } 29111 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29112 sizeof (struct sd_fi_un), 0); 29113 if (rval == -1) { 29114 kmem_free(un->sd_fi_fifo_un[i], 29115 sizeof (struct sd_fi_un)); 29116 un->sd_fi_fifo_un[i] = NULL; 29117 } 29118 29119 } else { 29120 SD_INFO(SD_LOG_IOERR, un, 29121 "sd_faultinjection_ioctl: un null\n"); 29122 } 29123 29124 break; 29125 29126 case SDIOCINSERTARQ: 29127 /* Store a arq struct to be pushed onto fifo */ 29128 SD_INFO(SD_LOG_SDTEST, un, 29129 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29130 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29131 29132 sd_fault_injection_on = 0; 29133 29134 if (un->sd_fi_fifo_arq[i] != NULL) { 29135 kmem_free(un->sd_fi_fifo_arq[i], 29136 sizeof (struct sd_fi_arq)); 29137 un->sd_fi_fifo_arq[i] = NULL; 29138 } 29139 if (arg != NULL) { 29140 un->sd_fi_fifo_arq[i] = 29141 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29142 if (un->sd_fi_fifo_arq[i] == NULL) { 29143 /* Alloc failed don't store anything */ 29144 break; 29145 } 29146 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29147 sizeof (struct sd_fi_arq), 0); 29148 if (rval == -1) { 29149 kmem_free(un->sd_fi_fifo_arq[i], 29150 sizeof (struct sd_fi_arq)); 29151 un->sd_fi_fifo_arq[i] = NULL; 29152 } 29153 29154 } else { 29155 SD_INFO(SD_LOG_IOERR, un, 29156 "sd_faultinjection_ioctl: arq null\n"); 29157 } 29158 29159 break; 29160 29161 case SDIOCPUSH: 29162 /* Push stored xb, pkt, un, and arq onto fifo */ 29163 sd_fault_injection_on = 0; 29164 29165 if (arg != NULL) { 29166 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29167 if (rval != -1 && 29168 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29169 un->sd_fi_fifo_end += i; 29170 } 29171 } else { 29172 SD_INFO(SD_LOG_IOERR, un, 29173 "sd_faultinjection_ioctl: push arg null\n"); 29174 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29175 un->sd_fi_fifo_end++; 29176 } 29177 } 29178 SD_INFO(SD_LOG_IOERR, un, 29179 "sd_faultinjection_ioctl: push to end=%d\n", 29180 un->sd_fi_fifo_end); 29181 break; 29182 29183 case SDIOCRETRIEVE: 29184 /* Return buffer of log from Injection session */ 29185 SD_INFO(SD_LOG_SDTEST, un, 29186 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29187 29188 sd_fault_injection_on = 0; 29189 29190 mutex_enter(&(un->un_fi_mutex)); 29191 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29192 un->sd_fi_buf_len+1, 0); 29193 mutex_exit(&(un->un_fi_mutex)); 29194 29195 if (rval == -1) { 29196 /* 29197 * arg is possibly invalid setting 29198 * it to NULL for return 29199 */ 29200 arg = NULL; 29201 } 29202 break; 29203 } 29204 29205 mutex_exit(SD_MUTEX(un)); 29206 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29207 " exit\n"); 29208 } 29209 29210 29211 /* 29212 * Function: sd_injection_log() 29213 * 29214 * Description: This routine adds buff to the already existing injection log 29215 * for retrieval via faultinjection_ioctl for use in fault 29216 * detection and recovery 29217 * 29218 * Arguments: buf - the string to add to the log 29219 */ 29220 29221 static void 29222 sd_injection_log(char *buf, struct sd_lun *un) 29223 { 29224 uint_t len; 29225 29226 ASSERT(un != NULL); 29227 ASSERT(buf != NULL); 29228 29229 mutex_enter(&(un->un_fi_mutex)); 29230 29231 len = min(strlen(buf), 255); 29232 /* Add logged value to Injection log to be returned later */ 29233 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29234 uint_t offset = strlen((char *)un->sd_fi_log); 29235 char *destp = (char *)un->sd_fi_log + offset; 29236 int i; 29237 for (i = 0; i < len; i++) { 29238 *destp++ = *buf++; 29239 } 29240 un->sd_fi_buf_len += len; 29241 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29242 } 29243 29244 mutex_exit(&(un->un_fi_mutex)); 29245 } 29246 29247 29248 /* 29249 * Function: sd_faultinjection() 29250 * 29251 * Description: This routine takes the pkt and changes its 29252 * content based on error injection scenerio. 29253 * 29254 * Arguments: pktp - packet to be changed 29255 */ 29256 29257 static void 29258 sd_faultinjection(struct scsi_pkt *pktp) 29259 { 29260 uint_t i; 29261 struct sd_fi_pkt *fi_pkt; 29262 struct sd_fi_xb *fi_xb; 29263 struct sd_fi_un *fi_un; 29264 struct sd_fi_arq *fi_arq; 29265 struct buf *bp; 29266 struct sd_xbuf *xb; 29267 struct sd_lun *un; 29268 29269 ASSERT(pktp != NULL); 29270 29271 /* pull bp xb and un from pktp */ 29272 bp = (struct buf *)pktp->pkt_private; 29273 xb = SD_GET_XBUF(bp); 29274 un = SD_GET_UN(bp); 29275 29276 ASSERT(un != NULL); 29277 29278 mutex_enter(SD_MUTEX(un)); 29279 29280 SD_TRACE(SD_LOG_SDTEST, un, 29281 "sd_faultinjection: entry Injection from sdintr\n"); 29282 29283 /* if injection is off return */ 29284 if (sd_fault_injection_on == 0 || 29285 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29286 mutex_exit(SD_MUTEX(un)); 29287 return; 29288 } 29289 29290 SD_INFO(SD_LOG_SDTEST, un, 29291 "sd_faultinjection: is working for copying\n"); 29292 29293 /* take next set off fifo */ 29294 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29295 29296 fi_pkt = un->sd_fi_fifo_pkt[i]; 29297 fi_xb = un->sd_fi_fifo_xb[i]; 29298 fi_un = un->sd_fi_fifo_un[i]; 29299 fi_arq = un->sd_fi_fifo_arq[i]; 29300 29301 29302 /* set variables accordingly */ 29303 /* set pkt if it was on fifo */ 29304 if (fi_pkt != NULL) { 29305 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29306 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29307 if (fi_pkt->pkt_cdbp != 0xff) 29308 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29309 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29310 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29311 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29312 29313 } 29314 /* set xb if it was on fifo */ 29315 if (fi_xb != NULL) { 29316 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29317 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29318 if (fi_xb->xb_retry_count != 0) 29319 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29320 SD_CONDSET(xb, xb, xb_victim_retry_count, 29321 "xb_victim_retry_count"); 29322 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29323 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29324 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29325 29326 /* copy in block data from sense */ 29327 /* 29328 * if (fi_xb->xb_sense_data[0] != -1) { 29329 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29330 * SENSE_LENGTH); 29331 * } 29332 */ 29333 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29334 29335 /* copy in extended sense codes */ 29336 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29337 xb, es_code, "es_code"); 29338 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29339 xb, es_key, "es_key"); 29340 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29341 xb, es_add_code, "es_add_code"); 29342 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29343 xb, es_qual_code, "es_qual_code"); 29344 struct scsi_extended_sense *esp; 29345 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29346 esp->es_class = CLASS_EXTENDED_SENSE; 29347 } 29348 29349 /* set un if it was on fifo */ 29350 if (fi_un != NULL) { 29351 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29352 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29353 SD_CONDSET(un, un, un_reset_retry_count, 29354 "un_reset_retry_count"); 29355 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29356 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29357 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29358 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29359 "un_f_allow_bus_device_reset"); 29360 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29361 29362 } 29363 29364 /* copy in auto request sense if it was on fifo */ 29365 if (fi_arq != NULL) { 29366 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29367 } 29368 29369 /* free structs */ 29370 if (un->sd_fi_fifo_pkt[i] != NULL) { 29371 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29372 } 29373 if (un->sd_fi_fifo_xb[i] != NULL) { 29374 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29375 } 29376 if (un->sd_fi_fifo_un[i] != NULL) { 29377 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29378 } 29379 if (un->sd_fi_fifo_arq[i] != NULL) { 29380 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29381 } 29382 29383 /* 29384 * kmem_free does not gurantee to set to NULL 29385 * since we uses these to determine if we set 29386 * values or not lets confirm they are always 29387 * NULL after free 29388 */ 29389 un->sd_fi_fifo_pkt[i] = NULL; 29390 un->sd_fi_fifo_un[i] = NULL; 29391 un->sd_fi_fifo_xb[i] = NULL; 29392 un->sd_fi_fifo_arq[i] = NULL; 29393 29394 un->sd_fi_fifo_start++; 29395 29396 mutex_exit(SD_MUTEX(un)); 29397 29398 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29399 } 29400 29401 #endif /* SD_FAULT_INJECTION */ 29402 29403 /* 29404 * This routine is invoked in sd_unit_attach(). Before calling it, the 29405 * properties in conf file should be processed already, and "hotpluggable" 29406 * property was processed also. 29407 * 29408 * The sd driver distinguishes 3 different type of devices: removable media, 29409 * non-removable media, and hotpluggable. Below the differences are defined: 29410 * 29411 * 1. Device ID 29412 * 29413 * The device ID of a device is used to identify this device. Refer to 29414 * ddi_devid_register(9F). 29415 * 29416 * For a non-removable media disk device which can provide 0x80 or 0x83 29417 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29418 * device ID is created to identify this device. For other non-removable 29419 * media devices, a default device ID is created only if this device has 29420 * at least 2 alter cylinders. Otherwise, this device has no devid. 29421 * 29422 * ------------------------------------------------------- 29423 * removable media hotpluggable | Can Have Device ID 29424 * ------------------------------------------------------- 29425 * false false | Yes 29426 * false true | Yes 29427 * true x | No 29428 * ------------------------------------------------------ 29429 * 29430 * 29431 * 2. SCSI group 4 commands 29432 * 29433 * In SCSI specs, only some commands in group 4 command set can use 29434 * 8-byte addresses that can be used to access >2TB storage spaces. 29435 * Other commands have no such capability. Without supporting group4, 29436 * it is impossible to make full use of storage spaces of a disk with 29437 * capacity larger than 2TB. 29438 * 29439 * ----------------------------------------------- 29440 * removable media hotpluggable LP64 | Group 29441 * ----------------------------------------------- 29442 * false false false | 1 29443 * false false true | 4 29444 * false true false | 1 29445 * false true true | 4 29446 * true x x | 5 29447 * ----------------------------------------------- 29448 * 29449 * 29450 * 3. Check for VTOC Label 29451 * 29452 * If a direct-access disk has no EFI label, sd will check if it has a 29453 * valid VTOC label. Now, sd also does that check for removable media 29454 * and hotpluggable devices. 29455 * 29456 * -------------------------------------------------------------- 29457 * Direct-Access removable media hotpluggable | Check Label 29458 * ------------------------------------------------------------- 29459 * false false false | No 29460 * false false true | No 29461 * false true false | Yes 29462 * false true true | Yes 29463 * true x x | Yes 29464 * -------------------------------------------------------------- 29465 * 29466 * 29467 * 4. Building default VTOC label 29468 * 29469 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29470 * If those devices have no valid VTOC label, sd(7d) will attempt to 29471 * create default VTOC for them. Currently sd creates default VTOC label 29472 * for all devices on x86 platform (VTOC_16), but only for removable 29473 * media devices on SPARC (VTOC_8). 29474 * 29475 * ----------------------------------------------------------- 29476 * removable media hotpluggable platform | Default Label 29477 * ----------------------------------------------------------- 29478 * false false sparc | No 29479 * false true x86 | Yes 29480 * false true sparc | Yes 29481 * true x x | Yes 29482 * ---------------------------------------------------------- 29483 * 29484 * 29485 * 5. Supported blocksizes of target devices 29486 * 29487 * Sd supports non-512-byte blocksize for removable media devices only. 29488 * For other devices, only 512-byte blocksize is supported. This may be 29489 * changed in near future because some RAID devices require non-512-byte 29490 * blocksize 29491 * 29492 * ----------------------------------------------------------- 29493 * removable media hotpluggable | non-512-byte blocksize 29494 * ----------------------------------------------------------- 29495 * false false | No 29496 * false true | No 29497 * true x | Yes 29498 * ----------------------------------------------------------- 29499 * 29500 * 29501 * 6. Automatic mount & unmount 29502 * 29503 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29504 * if a device is removable media device. It return 1 for removable media 29505 * devices, and 0 for others. 29506 * 29507 * The automatic mounting subsystem should distinguish between the types 29508 * of devices and apply automounting policies to each. 29509 * 29510 * 29511 * 7. fdisk partition management 29512 * 29513 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29514 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29515 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29516 * fdisk partitions on both x86 and SPARC platform. 29517 * 29518 * ----------------------------------------------------------- 29519 * platform removable media USB/1394 | fdisk supported 29520 * ----------------------------------------------------------- 29521 * x86 X X | true 29522 * ------------------------------------------------------------ 29523 * sparc X X | false 29524 * ------------------------------------------------------------ 29525 * 29526 * 29527 * 8. MBOOT/MBR 29528 * 29529 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29530 * read/write mboot for removable media devices on sparc platform. 29531 * 29532 * ----------------------------------------------------------- 29533 * platform removable media USB/1394 | mboot supported 29534 * ----------------------------------------------------------- 29535 * x86 X X | true 29536 * ------------------------------------------------------------ 29537 * sparc false false | false 29538 * sparc false true | true 29539 * sparc true false | true 29540 * sparc true true | true 29541 * ------------------------------------------------------------ 29542 * 29543 * 29544 * 9. error handling during opening device 29545 * 29546 * If failed to open a disk device, an errno is returned. For some kinds 29547 * of errors, different errno is returned depending on if this device is 29548 * a removable media device. This brings USB/1394 hard disks in line with 29549 * expected hard disk behavior. It is not expected that this breaks any 29550 * application. 29551 * 29552 * ------------------------------------------------------ 29553 * removable media hotpluggable | errno 29554 * ------------------------------------------------------ 29555 * false false | EIO 29556 * false true | EIO 29557 * true x | ENXIO 29558 * ------------------------------------------------------ 29559 * 29560 * 29561 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29562 * 29563 * These IOCTLs are applicable only to removable media devices. 29564 * 29565 * ----------------------------------------------------------- 29566 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29567 * ----------------------------------------------------------- 29568 * false false | No 29569 * false true | No 29570 * true x | Yes 29571 * ----------------------------------------------------------- 29572 * 29573 * 29574 * 12. Kstats for partitions 29575 * 29576 * sd creates partition kstat for non-removable media devices. USB and 29577 * Firewire hard disks now have partition kstats 29578 * 29579 * ------------------------------------------------------ 29580 * removable media hotpluggable | kstat 29581 * ------------------------------------------------------ 29582 * false false | Yes 29583 * false true | Yes 29584 * true x | No 29585 * ------------------------------------------------------ 29586 * 29587 * 29588 * 13. Removable media & hotpluggable properties 29589 * 29590 * Sd driver creates a "removable-media" property for removable media 29591 * devices. Parent nexus drivers create a "hotpluggable" property if 29592 * it supports hotplugging. 29593 * 29594 * --------------------------------------------------------------------- 29595 * removable media hotpluggable | "removable-media" " hotpluggable" 29596 * --------------------------------------------------------------------- 29597 * false false | No No 29598 * false true | No Yes 29599 * true false | Yes No 29600 * true true | Yes Yes 29601 * --------------------------------------------------------------------- 29602 * 29603 * 29604 * 14. Power Management 29605 * 29606 * sd only power manages removable media devices or devices that support 29607 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29608 * 29609 * A parent nexus that supports hotplugging can also set "pm-capable" 29610 * if the disk can be power managed. 29611 * 29612 * ------------------------------------------------------------ 29613 * removable media hotpluggable pm-capable | power manage 29614 * ------------------------------------------------------------ 29615 * false false false | No 29616 * false false true | Yes 29617 * false true false | No 29618 * false true true | Yes 29619 * true x x | Yes 29620 * ------------------------------------------------------------ 29621 * 29622 * USB and firewire hard disks can now be power managed independently 29623 * of the framebuffer 29624 * 29625 * 29626 * 15. Support for USB disks with capacity larger than 1TB 29627 * 29628 * Currently, sd doesn't permit a fixed disk device with capacity 29629 * larger than 1TB to be used in a 32-bit operating system environment. 29630 * However, sd doesn't do that for removable media devices. Instead, it 29631 * assumes that removable media devices cannot have a capacity larger 29632 * than 1TB. Therefore, using those devices on 32-bit system is partially 29633 * supported, which can cause some unexpected results. 29634 * 29635 * --------------------------------------------------------------------- 29636 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29637 * --------------------------------------------------------------------- 29638 * false false | true | no 29639 * false true | true | no 29640 * true false | true | Yes 29641 * true true | true | Yes 29642 * --------------------------------------------------------------------- 29643 * 29644 * 29645 * 16. Check write-protection at open time 29646 * 29647 * When a removable media device is being opened for writing without NDELAY 29648 * flag, sd will check if this device is writable. If attempting to open 29649 * without NDELAY flag a write-protected device, this operation will abort. 29650 * 29651 * ------------------------------------------------------------ 29652 * removable media USB/1394 | WP Check 29653 * ------------------------------------------------------------ 29654 * false false | No 29655 * false true | No 29656 * true false | Yes 29657 * true true | Yes 29658 * ------------------------------------------------------------ 29659 * 29660 * 29661 * 17. syslog when corrupted VTOC is encountered 29662 * 29663 * Currently, if an invalid VTOC is encountered, sd only print syslog 29664 * for fixed SCSI disks. 29665 * ------------------------------------------------------------ 29666 * removable media USB/1394 | print syslog 29667 * ------------------------------------------------------------ 29668 * false false | Yes 29669 * false true | No 29670 * true false | No 29671 * true true | No 29672 * ------------------------------------------------------------ 29673 */ 29674 static void 29675 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29676 { 29677 int pm_capable_prop; 29678 29679 ASSERT(un->un_sd); 29680 ASSERT(un->un_sd->sd_inq); 29681 29682 /* 29683 * Enable SYNC CACHE support for all devices. 29684 */ 29685 un->un_f_sync_cache_supported = TRUE; 29686 29687 /* 29688 * Set the sync cache required flag to false. 29689 * This would ensure that there is no SYNC CACHE 29690 * sent when there are no writes 29691 */ 29692 un->un_f_sync_cache_required = FALSE; 29693 29694 if (un->un_sd->sd_inq->inq_rmb) { 29695 /* 29696 * The media of this device is removable. And for this kind 29697 * of devices, it is possible to change medium after opening 29698 * devices. Thus we should support this operation. 29699 */ 29700 un->un_f_has_removable_media = TRUE; 29701 29702 /* 29703 * support non-512-byte blocksize of removable media devices 29704 */ 29705 un->un_f_non_devbsize_supported = TRUE; 29706 29707 /* 29708 * Assume that all removable media devices support DOOR_LOCK 29709 */ 29710 un->un_f_doorlock_supported = TRUE; 29711 29712 /* 29713 * For a removable media device, it is possible to be opened 29714 * with NDELAY flag when there is no media in drive, in this 29715 * case we don't care if device is writable. But if without 29716 * NDELAY flag, we need to check if media is write-protected. 29717 */ 29718 un->un_f_chk_wp_open = TRUE; 29719 29720 /* 29721 * need to start a SCSI watch thread to monitor media state, 29722 * when media is being inserted or ejected, notify syseventd. 29723 */ 29724 un->un_f_monitor_media_state = TRUE; 29725 29726 /* 29727 * Some devices don't support START_STOP_UNIT command. 29728 * Therefore, we'd better check if a device supports it 29729 * before sending it. 29730 */ 29731 un->un_f_check_start_stop = TRUE; 29732 29733 /* 29734 * support eject media ioctl: 29735 * FDEJECT, DKIOCEJECT, CDROMEJECT 29736 */ 29737 un->un_f_eject_media_supported = TRUE; 29738 29739 /* 29740 * Because many removable-media devices don't support 29741 * LOG_SENSE, we couldn't use this command to check if 29742 * a removable media device support power-management. 29743 * We assume that they support power-management via 29744 * START_STOP_UNIT command and can be spun up and down 29745 * without limitations. 29746 */ 29747 un->un_f_pm_supported = TRUE; 29748 29749 /* 29750 * Need to create a zero length (Boolean) property 29751 * removable-media for the removable media devices. 29752 * Note that the return value of the property is not being 29753 * checked, since if unable to create the property 29754 * then do not want the attach to fail altogether. Consistent 29755 * with other property creation in attach. 29756 */ 29757 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29758 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29759 29760 } else { 29761 /* 29762 * create device ID for device 29763 */ 29764 un->un_f_devid_supported = TRUE; 29765 29766 /* 29767 * Spin up non-removable-media devices once it is attached 29768 */ 29769 un->un_f_attach_spinup = TRUE; 29770 29771 /* 29772 * According to SCSI specification, Sense data has two kinds of 29773 * format: fixed format, and descriptor format. At present, we 29774 * don't support descriptor format sense data for removable 29775 * media. 29776 */ 29777 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29778 un->un_f_descr_format_supported = TRUE; 29779 } 29780 29781 /* 29782 * kstats are created only for non-removable media devices. 29783 * 29784 * Set this in sd.conf to 0 in order to disable kstats. The 29785 * default is 1, so they are enabled by default. 29786 */ 29787 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29788 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29789 "enable-partition-kstats", 1)); 29790 29791 /* 29792 * Check if HBA has set the "pm-capable" property. 29793 * If "pm-capable" exists and is non-zero then we can 29794 * power manage the device without checking the start/stop 29795 * cycle count log sense page. 29796 * 29797 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29798 * then we should not power manage the device. 29799 * 29800 * If "pm-capable" doesn't exist then pm_capable_prop will 29801 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29802 * sd will check the start/stop cycle count log sense page 29803 * and power manage the device if the cycle count limit has 29804 * not been exceeded. 29805 */ 29806 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29807 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29808 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29809 un->un_f_log_sense_supported = TRUE; 29810 } else { 29811 /* 29812 * pm-capable property exists. 29813 * 29814 * Convert "TRUE" values for pm_capable_prop to 29815 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29816 * later. "TRUE" values are any values except 29817 * SD_PM_CAPABLE_FALSE (0) and 29818 * SD_PM_CAPABLE_UNDEFINED (-1) 29819 */ 29820 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29821 un->un_f_log_sense_supported = FALSE; 29822 } else { 29823 un->un_f_pm_supported = TRUE; 29824 } 29825 29826 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29827 "sd_unit_attach: un:0x%p pm-capable " 29828 "property set to %d.\n", un, un->un_f_pm_supported); 29829 } 29830 } 29831 29832 if (un->un_f_is_hotpluggable) { 29833 29834 /* 29835 * Have to watch hotpluggable devices as well, since 29836 * that's the only way for userland applications to 29837 * detect hot removal while device is busy/mounted. 29838 */ 29839 un->un_f_monitor_media_state = TRUE; 29840 29841 un->un_f_check_start_stop = TRUE; 29842 29843 } 29844 } 29845 29846 /* 29847 * sd_tg_rdwr: 29848 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29849 * in sys block size, req_length in bytes. 29850 * 29851 */ 29852 static int 29853 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29854 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29855 { 29856 struct sd_lun *un; 29857 int path_flag = (int)(uintptr_t)tg_cookie; 29858 char *dkl = NULL; 29859 diskaddr_t real_addr = start_block; 29860 diskaddr_t first_byte, end_block; 29861 29862 size_t buffer_size = reqlength; 29863 int rval = 0; 29864 diskaddr_t cap; 29865 uint32_t lbasize; 29866 sd_ssc_t *ssc; 29867 29868 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29869 if (un == NULL) 29870 return (ENXIO); 29871 29872 if (cmd != TG_READ && cmd != TG_WRITE) 29873 return (EINVAL); 29874 29875 ssc = sd_ssc_init(un); 29876 mutex_enter(SD_MUTEX(un)); 29877 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29878 mutex_exit(SD_MUTEX(un)); 29879 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29880 &lbasize, path_flag); 29881 if (rval != 0) 29882 goto done1; 29883 mutex_enter(SD_MUTEX(un)); 29884 sd_update_block_info(un, lbasize, cap); 29885 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29886 mutex_exit(SD_MUTEX(un)); 29887 rval = EIO; 29888 goto done; 29889 } 29890 } 29891 29892 if (NOT_DEVBSIZE(un)) { 29893 /* 29894 * sys_blocksize != tgt_blocksize, need to re-adjust 29895 * blkno and save the index to beginning of dk_label 29896 */ 29897 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29898 real_addr = first_byte / un->un_tgt_blocksize; 29899 29900 end_block = (first_byte + reqlength + 29901 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29902 29903 /* round up buffer size to multiple of target block size */ 29904 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29905 29906 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29907 "label_addr: 0x%x allocation size: 0x%x\n", 29908 real_addr, buffer_size); 29909 29910 if (((first_byte % un->un_tgt_blocksize) != 0) || 29911 (reqlength % un->un_tgt_blocksize) != 0) 29912 /* the request is not aligned */ 29913 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29914 } 29915 29916 /* 29917 * The MMC standard allows READ CAPACITY to be 29918 * inaccurate by a bounded amount (in the interest of 29919 * response latency). As a result, failed READs are 29920 * commonplace (due to the reading of metadata and not 29921 * data). Depending on the per-Vendor/drive Sense data, 29922 * the failed READ can cause many (unnecessary) retries. 29923 */ 29924 29925 if (ISCD(un) && (cmd == TG_READ) && 29926 (un->un_f_blockcount_is_valid == TRUE) && 29927 ((start_block == (un->un_blockcount - 1))|| 29928 (start_block == (un->un_blockcount - 2)))) { 29929 path_flag = SD_PATH_DIRECT_PRIORITY; 29930 } 29931 29932 mutex_exit(SD_MUTEX(un)); 29933 if (cmd == TG_READ) { 29934 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29935 buffer_size, real_addr, path_flag); 29936 if (dkl != NULL) 29937 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29938 real_addr), bufaddr, reqlength); 29939 } else { 29940 if (dkl) { 29941 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29942 real_addr, path_flag); 29943 if (rval) { 29944 goto done1; 29945 } 29946 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29947 real_addr), reqlength); 29948 } 29949 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29950 buffer_size, real_addr, path_flag); 29951 } 29952 29953 done1: 29954 if (dkl != NULL) 29955 kmem_free(dkl, buffer_size); 29956 29957 if (rval != 0) { 29958 if (rval == EIO) 29959 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29960 else 29961 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29962 } 29963 done: 29964 sd_ssc_fini(ssc); 29965 return (rval); 29966 } 29967 29968 29969 static int 29970 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29971 { 29972 29973 struct sd_lun *un; 29974 diskaddr_t cap; 29975 uint32_t lbasize; 29976 int path_flag = (int)(uintptr_t)tg_cookie; 29977 int ret = 0; 29978 29979 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29980 if (un == NULL) 29981 return (ENXIO); 29982 29983 switch (cmd) { 29984 case TG_GETPHYGEOM: 29985 case TG_GETVIRTGEOM: 29986 case TG_GETCAPACITY: 29987 case TG_GETBLOCKSIZE: 29988 mutex_enter(SD_MUTEX(un)); 29989 29990 if ((un->un_f_blockcount_is_valid == TRUE) && 29991 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29992 cap = un->un_blockcount; 29993 lbasize = un->un_tgt_blocksize; 29994 mutex_exit(SD_MUTEX(un)); 29995 } else { 29996 sd_ssc_t *ssc; 29997 mutex_exit(SD_MUTEX(un)); 29998 ssc = sd_ssc_init(un); 29999 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30000 &lbasize, path_flag); 30001 if (ret != 0) { 30002 if (ret == EIO) 30003 sd_ssc_assessment(ssc, 30004 SD_FMT_STATUS_CHECK); 30005 else 30006 sd_ssc_assessment(ssc, 30007 SD_FMT_IGNORE); 30008 sd_ssc_fini(ssc); 30009 return (ret); 30010 } 30011 sd_ssc_fini(ssc); 30012 mutex_enter(SD_MUTEX(un)); 30013 sd_update_block_info(un, lbasize, cap); 30014 if ((un->un_f_blockcount_is_valid == FALSE) || 30015 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 30016 mutex_exit(SD_MUTEX(un)); 30017 return (EIO); 30018 } 30019 mutex_exit(SD_MUTEX(un)); 30020 } 30021 30022 if (cmd == TG_GETCAPACITY) { 30023 *(diskaddr_t *)arg = cap; 30024 return (0); 30025 } 30026 30027 if (cmd == TG_GETBLOCKSIZE) { 30028 *(uint32_t *)arg = lbasize; 30029 return (0); 30030 } 30031 30032 if (cmd == TG_GETPHYGEOM) 30033 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 30034 cap, lbasize, path_flag); 30035 else 30036 /* TG_GETVIRTGEOM */ 30037 ret = sd_get_virtual_geometry(un, 30038 (cmlb_geom_t *)arg, cap, lbasize); 30039 30040 return (ret); 30041 30042 case TG_GETATTR: 30043 mutex_enter(SD_MUTEX(un)); 30044 ((tg_attribute_t *)arg)->media_is_writable = 30045 un->un_f_mmc_writable_media; 30046 mutex_exit(SD_MUTEX(un)); 30047 return (0); 30048 default: 30049 return (ENOTTY); 30050 30051 } 30052 } 30053 30054 /* 30055 * Function: sd_ssc_ereport_post 30056 * 30057 * Description: Will be called when SD driver need to post an ereport. 30058 * 30059 * Context: Kernel thread or interrupt context. 30060 */ 30061 static void 30062 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30063 { 30064 int uscsi_path_instance = 0; 30065 uchar_t uscsi_pkt_reason; 30066 uint32_t uscsi_pkt_state; 30067 uint32_t uscsi_pkt_statistics; 30068 uint64_t uscsi_ena; 30069 uchar_t op_code; 30070 uint8_t *sensep; 30071 union scsi_cdb *cdbp; 30072 uint_t cdblen = 0; 30073 uint_t senlen = 0; 30074 struct sd_lun *un; 30075 dev_info_t *dip; 30076 char *devid; 30077 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30078 SSC_FLAGS_INVALID_STATUS | 30079 SSC_FLAGS_INVALID_SENSE | 30080 SSC_FLAGS_INVALID_DATA; 30081 char assessment[16]; 30082 30083 ASSERT(ssc != NULL); 30084 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30085 ASSERT(ssc->ssc_uscsi_info != NULL); 30086 30087 un = ssc->ssc_un; 30088 ASSERT(un != NULL); 30089 30090 dip = un->un_sd->sd_dev; 30091 30092 /* 30093 * Get the devid: 30094 * devid will only be passed to non-transport error reports. 30095 */ 30096 devid = DEVI(dip)->devi_devid_str; 30097 30098 /* 30099 * If we are syncing or dumping, the command will not be executed 30100 * so we bypass this situation. 30101 */ 30102 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30103 (un->un_state == SD_STATE_DUMPING)) 30104 return; 30105 30106 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30107 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30108 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30109 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30110 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30111 30112 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30113 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30114 30115 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30116 if (cdbp == NULL) { 30117 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30118 "sd_ssc_ereport_post meet empty cdb\n"); 30119 return; 30120 } 30121 30122 op_code = cdbp->scc_cmd; 30123 30124 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30125 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30126 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30127 30128 if (senlen > 0) 30129 ASSERT(sensep != NULL); 30130 30131 /* 30132 * Initialize drv_assess to corresponding values. 30133 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30134 * on the sense-key returned back. 30135 */ 30136 switch (drv_assess) { 30137 case SD_FM_DRV_RECOVERY: 30138 (void) sprintf(assessment, "%s", "recovered"); 30139 break; 30140 case SD_FM_DRV_RETRY: 30141 (void) sprintf(assessment, "%s", "retry"); 30142 break; 30143 case SD_FM_DRV_NOTICE: 30144 (void) sprintf(assessment, "%s", "info"); 30145 break; 30146 case SD_FM_DRV_FATAL: 30147 default: 30148 (void) sprintf(assessment, "%s", "unknown"); 30149 } 30150 /* 30151 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30152 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30153 * driver-assessment will always be "recovered" here. 30154 */ 30155 if (drv_assess == SD_FM_DRV_RECOVERY) { 30156 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30157 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30158 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30159 "driver-assessment", DATA_TYPE_STRING, assessment, 30160 "op-code", DATA_TYPE_UINT8, op_code, 30161 "cdb", DATA_TYPE_UINT8_ARRAY, 30162 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30163 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30164 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30165 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30166 NULL); 30167 return; 30168 } 30169 30170 /* 30171 * If there is un-expected/un-decodable data, we should post 30172 * ereport.io.scsi.cmd.disk.dev.uderr. 30173 * driver-assessment will be set based on parameter drv_assess. 30174 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30175 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30176 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30177 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30178 */ 30179 if (ssc->ssc_flags & ssc_invalid_flags) { 30180 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30181 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30182 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30183 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30184 "driver-assessment", DATA_TYPE_STRING, 30185 drv_assess == SD_FM_DRV_FATAL ? 30186 "fail" : assessment, 30187 "op-code", DATA_TYPE_UINT8, op_code, 30188 "cdb", DATA_TYPE_UINT8_ARRAY, 30189 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30190 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30191 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30192 "pkt-stats", DATA_TYPE_UINT32, 30193 uscsi_pkt_statistics, 30194 "stat-code", DATA_TYPE_UINT8, 30195 ssc->ssc_uscsi_cmd->uscsi_status, 30196 "un-decode-info", DATA_TYPE_STRING, 30197 ssc->ssc_info, 30198 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30199 senlen, sensep, 30200 NULL); 30201 } else { 30202 /* 30203 * For other type of invalid data, the 30204 * un-decode-value field would be empty because the 30205 * un-decodable content could be seen from upper 30206 * level payload or inside un-decode-info. 30207 */ 30208 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30209 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30210 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30211 "driver-assessment", DATA_TYPE_STRING, 30212 drv_assess == SD_FM_DRV_FATAL ? 30213 "fail" : assessment, 30214 "op-code", DATA_TYPE_UINT8, op_code, 30215 "cdb", DATA_TYPE_UINT8_ARRAY, 30216 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30217 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30218 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30219 "pkt-stats", DATA_TYPE_UINT32, 30220 uscsi_pkt_statistics, 30221 "stat-code", DATA_TYPE_UINT8, 30222 ssc->ssc_uscsi_cmd->uscsi_status, 30223 "un-decode-info", DATA_TYPE_STRING, 30224 ssc->ssc_info, 30225 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30226 0, NULL, 30227 NULL); 30228 } 30229 ssc->ssc_flags &= ~ssc_invalid_flags; 30230 return; 30231 } 30232 30233 if (uscsi_pkt_reason != CMD_CMPLT || 30234 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30235 /* 30236 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30237 * set inside sd_start_cmds due to errors(bad packet or 30238 * fatal transport error), we should take it as a 30239 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30240 * driver-assessment will be set based on drv_assess. 30241 * We will set devid to NULL because it is a transport 30242 * error. 30243 */ 30244 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30245 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30246 30247 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30248 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30249 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30250 "driver-assessment", DATA_TYPE_STRING, 30251 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30252 "op-code", DATA_TYPE_UINT8, op_code, 30253 "cdb", DATA_TYPE_UINT8_ARRAY, 30254 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30255 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30256 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30257 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30258 NULL); 30259 } else { 30260 /* 30261 * If we got here, we have a completed command, and we need 30262 * to further investigate the sense data to see what kind 30263 * of ereport we should post. 30264 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30265 * if sense-key == 0x3. 30266 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30267 * driver-assessment will be set based on the parameter 30268 * drv_assess. 30269 */ 30270 if (senlen > 0) { 30271 /* 30272 * Here we have sense data available. 30273 */ 30274 uint8_t sense_key; 30275 sense_key = scsi_sense_key(sensep); 30276 if (sense_key == 0x3) { 30277 /* 30278 * sense-key == 0x3(medium error), 30279 * driver-assessment should be "fatal" if 30280 * drv_assess is SD_FM_DRV_FATAL. 30281 */ 30282 scsi_fm_ereport_post(un->un_sd, 30283 uscsi_path_instance, 30284 "cmd.disk.dev.rqs.merr", 30285 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30286 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30287 "driver-assessment", 30288 DATA_TYPE_STRING, 30289 drv_assess == SD_FM_DRV_FATAL ? 30290 "fatal" : assessment, 30291 "op-code", 30292 DATA_TYPE_UINT8, op_code, 30293 "cdb", 30294 DATA_TYPE_UINT8_ARRAY, cdblen, 30295 ssc->ssc_uscsi_cmd->uscsi_cdb, 30296 "pkt-reason", 30297 DATA_TYPE_UINT8, uscsi_pkt_reason, 30298 "pkt-state", 30299 DATA_TYPE_UINT8, uscsi_pkt_state, 30300 "pkt-stats", 30301 DATA_TYPE_UINT32, 30302 uscsi_pkt_statistics, 30303 "stat-code", 30304 DATA_TYPE_UINT8, 30305 ssc->ssc_uscsi_cmd->uscsi_status, 30306 "key", 30307 DATA_TYPE_UINT8, 30308 scsi_sense_key(sensep), 30309 "asc", 30310 DATA_TYPE_UINT8, 30311 scsi_sense_asc(sensep), 30312 "ascq", 30313 DATA_TYPE_UINT8, 30314 scsi_sense_ascq(sensep), 30315 "sense-data", 30316 DATA_TYPE_UINT8_ARRAY, 30317 senlen, sensep, 30318 "lba", 30319 DATA_TYPE_UINT64, 30320 ssc->ssc_uscsi_info->ui_lba, 30321 NULL); 30322 } else { 30323 /* 30324 * if sense-key == 0x4(hardware 30325 * error), driver-assessment should 30326 * be "fatal" if drv_assess is 30327 * SD_FM_DRV_FATAL. 30328 */ 30329 scsi_fm_ereport_post(un->un_sd, 30330 uscsi_path_instance, 30331 "cmd.disk.dev.rqs.derr", 30332 uscsi_ena, devid, DDI_NOSLEEP, 30333 FM_VERSION, 30334 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30335 "driver-assessment", 30336 DATA_TYPE_STRING, 30337 drv_assess == SD_FM_DRV_FATAL ? 30338 (sense_key == 0x4 ? 30339 "fatal" : "fail") : assessment, 30340 "op-code", 30341 DATA_TYPE_UINT8, op_code, 30342 "cdb", 30343 DATA_TYPE_UINT8_ARRAY, cdblen, 30344 ssc->ssc_uscsi_cmd->uscsi_cdb, 30345 "pkt-reason", 30346 DATA_TYPE_UINT8, uscsi_pkt_reason, 30347 "pkt-state", 30348 DATA_TYPE_UINT8, uscsi_pkt_state, 30349 "pkt-stats", 30350 DATA_TYPE_UINT32, 30351 uscsi_pkt_statistics, 30352 "stat-code", 30353 DATA_TYPE_UINT8, 30354 ssc->ssc_uscsi_cmd->uscsi_status, 30355 "key", 30356 DATA_TYPE_UINT8, 30357 scsi_sense_key(sensep), 30358 "asc", 30359 DATA_TYPE_UINT8, 30360 scsi_sense_asc(sensep), 30361 "ascq", 30362 DATA_TYPE_UINT8, 30363 scsi_sense_ascq(sensep), 30364 "sense-data", 30365 DATA_TYPE_UINT8_ARRAY, 30366 senlen, sensep, 30367 NULL); 30368 } 30369 } else { 30370 /* 30371 * For stat_code == STATUS_GOOD, this is not a 30372 * hardware error. 30373 */ 30374 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 30375 return; 30376 30377 /* 30378 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30379 * stat-code but with sense data unavailable. 30380 * driver-assessment will be set based on parameter 30381 * drv_assess. 30382 */ 30383 scsi_fm_ereport_post(un->un_sd, 30384 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30385 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30386 FM_EREPORT_VERS0, 30387 "driver-assessment", DATA_TYPE_STRING, 30388 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30389 "op-code", DATA_TYPE_UINT8, op_code, 30390 "cdb", 30391 DATA_TYPE_UINT8_ARRAY, 30392 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30393 "pkt-reason", 30394 DATA_TYPE_UINT8, uscsi_pkt_reason, 30395 "pkt-state", 30396 DATA_TYPE_UINT8, uscsi_pkt_state, 30397 "pkt-stats", 30398 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30399 "stat-code", 30400 DATA_TYPE_UINT8, 30401 ssc->ssc_uscsi_cmd->uscsi_status, 30402 NULL); 30403 } 30404 } 30405 } 30406 30407 /* 30408 * Function: sd_ssc_extract_info 30409 * 30410 * Description: Extract information available to help generate ereport. 30411 * 30412 * Context: Kernel thread or interrupt context. 30413 */ 30414 static void 30415 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30416 struct buf *bp, struct sd_xbuf *xp) 30417 { 30418 size_t senlen = 0; 30419 union scsi_cdb *cdbp; 30420 int path_instance; 30421 /* 30422 * Need scsi_cdb_size array to determine the cdb length. 30423 */ 30424 extern uchar_t scsi_cdb_size[]; 30425 30426 ASSERT(un != NULL); 30427 ASSERT(pktp != NULL); 30428 ASSERT(bp != NULL); 30429 ASSERT(xp != NULL); 30430 ASSERT(ssc != NULL); 30431 ASSERT(mutex_owned(SD_MUTEX(un))); 30432 30433 /* 30434 * Transfer the cdb buffer pointer here. 30435 */ 30436 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30437 30438 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30439 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30440 30441 /* 30442 * Transfer the sense data buffer pointer if sense data is available, 30443 * calculate the sense data length first. 30444 */ 30445 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 30446 (xp->xb_sense_state & STATE_ARQ_DONE)) { 30447 /* 30448 * For arq case, we will enter here. 30449 */ 30450 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30451 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30452 } else { 30453 senlen = SENSE_LENGTH; 30454 } 30455 } else { 30456 /* 30457 * For non-arq case, we will enter this branch. 30458 */ 30459 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30460 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 30461 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30462 } 30463 30464 } 30465 30466 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30467 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30468 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30469 30470 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30471 30472 /* 30473 * Only transfer path_instance when scsi_pkt was properly allocated. 30474 */ 30475 path_instance = pktp->pkt_path_instance; 30476 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30477 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30478 else 30479 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30480 30481 /* 30482 * Copy in the other fields we may need when posting ereport. 30483 */ 30484 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30485 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30486 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30487 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30488 30489 /* 30490 * For partially read/write command, we will not create ena 30491 * in case of a successful command be reconized as recovered. 30492 */ 30493 if ((pktp->pkt_reason == CMD_CMPLT) && 30494 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 30495 (senlen == 0)) { 30496 return; 30497 } 30498 30499 /* 30500 * To associate ereports of a single command execution flow, we 30501 * need a shared ena for a specific command. 30502 */ 30503 if (xp->xb_ena == 0) 30504 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30505 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30506 } 30507