1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else /* !__fibre */ 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif /* !__fibre */ 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_IS_PARALLEL_SCSI(un) \ 779 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 780 #define SD_IS_SERIAL(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 782 783 /* 784 * Definitions used by device id registration routines 785 */ 786 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 787 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 788 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 789 790 static kmutex_t sd_sense_mutex = {0}; 791 792 /* 793 * Macros for updates of the driver state 794 */ 795 #define New_state(un, s) \ 796 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 797 #define Restore_state(un) \ 798 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 799 800 static struct sd_cdbinfo sd_cdbtab[] = { 801 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 802 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 803 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 804 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 805 }; 806 807 /* 808 * Specifies the number of seconds that must have elapsed since the last 809 * cmd. has completed for a device to be declared idle to the PM framework. 810 */ 811 static int sd_pm_idletime = 1; 812 813 /* 814 * Internal function prototypes 815 */ 816 817 #if (defined(__fibre)) 818 /* 819 * These #defines are to avoid namespace collisions that occur because this 820 * code is currently used to compile two separate driver modules: sd and ssd. 821 * All function names need to be treated this way (even if declared static) 822 * in order to allow the debugger to resolve the names properly. 823 * It is anticipated that in the near future the ssd module will be obsoleted, 824 * at which time this ugliness should go away. 825 */ 826 #define sd_log_trace ssd_log_trace 827 #define sd_log_info ssd_log_info 828 #define sd_log_err ssd_log_err 829 #define sdprobe ssdprobe 830 #define sdinfo ssdinfo 831 #define sd_prop_op ssd_prop_op 832 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 833 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 834 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 835 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 836 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 837 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 838 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 839 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 840 #define sd_spin_up_unit ssd_spin_up_unit 841 #define sd_enable_descr_sense ssd_enable_descr_sense 842 #define sd_reenable_dsense_task ssd_reenable_dsense_task 843 #define sd_set_mmc_caps ssd_set_mmc_caps 844 #define sd_read_unit_properties ssd_read_unit_properties 845 #define sd_process_sdconf_file ssd_process_sdconf_file 846 #define sd_process_sdconf_table ssd_process_sdconf_table 847 #define sd_sdconf_id_match ssd_sdconf_id_match 848 #define sd_blank_cmp ssd_blank_cmp 849 #define sd_chk_vers1_data ssd_chk_vers1_data 850 #define sd_set_vers1_properties ssd_set_vers1_properties 851 852 #define sd_get_physical_geometry ssd_get_physical_geometry 853 #define sd_get_virtual_geometry ssd_get_virtual_geometry 854 #define sd_update_block_info ssd_update_block_info 855 #define sd_register_devid ssd_register_devid 856 #define sd_get_devid ssd_get_devid 857 #define sd_create_devid ssd_create_devid 858 #define sd_write_deviceid ssd_write_deviceid 859 #define sd_check_vpd_page_support ssd_check_vpd_page_support 860 #define sd_setup_pm ssd_setup_pm 861 #define sd_create_pm_components ssd_create_pm_components 862 #define sd_ddi_suspend ssd_ddi_suspend 863 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 864 #define sd_ddi_resume ssd_ddi_resume 865 #define sd_ddi_pm_resume ssd_ddi_pm_resume 866 #define sdpower ssdpower 867 #define sdattach ssdattach 868 #define sddetach ssddetach 869 #define sd_unit_attach ssd_unit_attach 870 #define sd_unit_detach ssd_unit_detach 871 #define sd_set_unit_attributes ssd_set_unit_attributes 872 #define sd_create_errstats ssd_create_errstats 873 #define sd_set_errstats ssd_set_errstats 874 #define sd_set_pstats ssd_set_pstats 875 #define sddump ssddump 876 #define sd_scsi_poll ssd_scsi_poll 877 #define sd_send_polled_RQS ssd_send_polled_RQS 878 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 879 #define sd_init_event_callbacks ssd_init_event_callbacks 880 #define sd_event_callback ssd_event_callback 881 #define sd_cache_control ssd_cache_control 882 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 883 #define sd_get_nv_sup ssd_get_nv_sup 884 #define sd_make_device ssd_make_device 885 #define sdopen ssdopen 886 #define sdclose ssdclose 887 #define sd_ready_and_valid ssd_ready_and_valid 888 #define sdmin ssdmin 889 #define sdread ssdread 890 #define sdwrite ssdwrite 891 #define sdaread ssdaread 892 #define sdawrite ssdawrite 893 #define sdstrategy ssdstrategy 894 #define sdioctl ssdioctl 895 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 896 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 897 #define sd_checksum_iostart ssd_checksum_iostart 898 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 899 #define sd_pm_iostart ssd_pm_iostart 900 #define sd_core_iostart ssd_core_iostart 901 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 902 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 903 #define sd_checksum_iodone ssd_checksum_iodone 904 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 905 #define sd_pm_iodone ssd_pm_iodone 906 #define sd_initpkt_for_buf ssd_initpkt_for_buf 907 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 908 #define sd_setup_rw_pkt ssd_setup_rw_pkt 909 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 910 #define sd_buf_iodone ssd_buf_iodone 911 #define sd_uscsi_strategy ssd_uscsi_strategy 912 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 913 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 914 #define sd_uscsi_iodone ssd_uscsi_iodone 915 #define sd_xbuf_strategy ssd_xbuf_strategy 916 #define sd_xbuf_init ssd_xbuf_init 917 #define sd_pm_entry ssd_pm_entry 918 #define sd_pm_exit ssd_pm_exit 919 920 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 921 #define sd_pm_timeout_handler ssd_pm_timeout_handler 922 923 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 924 #define sdintr ssdintr 925 #define sd_start_cmds ssd_start_cmds 926 #define sd_send_scsi_cmd ssd_send_scsi_cmd 927 #define sd_bioclone_alloc ssd_bioclone_alloc 928 #define sd_bioclone_free ssd_bioclone_free 929 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 930 #define sd_shadow_buf_free ssd_shadow_buf_free 931 #define sd_print_transport_rejected_message \ 932 ssd_print_transport_rejected_message 933 #define sd_retry_command ssd_retry_command 934 #define sd_set_retry_bp ssd_set_retry_bp 935 #define sd_send_request_sense_command ssd_send_request_sense_command 936 #define sd_start_retry_command ssd_start_retry_command 937 #define sd_start_direct_priority_command \ 938 ssd_start_direct_priority_command 939 #define sd_return_failed_command ssd_return_failed_command 940 #define sd_return_failed_command_no_restart \ 941 ssd_return_failed_command_no_restart 942 #define sd_return_command ssd_return_command 943 #define sd_sync_with_callback ssd_sync_with_callback 944 #define sdrunout ssdrunout 945 #define sd_mark_rqs_busy ssd_mark_rqs_busy 946 #define sd_mark_rqs_idle ssd_mark_rqs_idle 947 #define sd_reduce_throttle ssd_reduce_throttle 948 #define sd_restore_throttle ssd_restore_throttle 949 #define sd_print_incomplete_msg ssd_print_incomplete_msg 950 #define sd_init_cdb_limits ssd_init_cdb_limits 951 #define sd_pkt_status_good ssd_pkt_status_good 952 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 953 #define sd_pkt_status_busy ssd_pkt_status_busy 954 #define sd_pkt_status_reservation_conflict \ 955 ssd_pkt_status_reservation_conflict 956 #define sd_pkt_status_qfull ssd_pkt_status_qfull 957 #define sd_handle_request_sense ssd_handle_request_sense 958 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 959 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 960 #define sd_validate_sense_data ssd_validate_sense_data 961 #define sd_decode_sense ssd_decode_sense 962 #define sd_print_sense_msg ssd_print_sense_msg 963 #define sd_sense_key_no_sense ssd_sense_key_no_sense 964 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 965 #define sd_sense_key_not_ready ssd_sense_key_not_ready 966 #define sd_sense_key_medium_or_hardware_error \ 967 ssd_sense_key_medium_or_hardware_error 968 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 969 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 970 #define sd_sense_key_fail_command ssd_sense_key_fail_command 971 #define sd_sense_key_blank_check ssd_sense_key_blank_check 972 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 973 #define sd_sense_key_default ssd_sense_key_default 974 #define sd_print_retry_msg ssd_print_retry_msg 975 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 976 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 977 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 978 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 979 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 980 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 981 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 982 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 983 #define sd_pkt_reason_default ssd_pkt_reason_default 984 #define sd_reset_target ssd_reset_target 985 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 986 #define sd_start_stop_unit_task ssd_start_stop_unit_task 987 #define sd_taskq_create ssd_taskq_create 988 #define sd_taskq_delete ssd_taskq_delete 989 #define sd_target_change_task ssd_target_change_task 990 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 991 #define sd_media_change_task ssd_media_change_task 992 #define sd_handle_mchange ssd_handle_mchange 993 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 994 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 995 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 996 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 997 #define sd_send_scsi_feature_GET_CONFIGURATION \ 998 sd_send_scsi_feature_GET_CONFIGURATION 999 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1000 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1001 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1002 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1003 ssd_send_scsi_PERSISTENT_RESERVE_IN 1004 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1005 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1006 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1007 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1008 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1009 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1010 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1011 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1012 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1013 #define sd_alloc_rqs ssd_alloc_rqs 1014 #define sd_free_rqs ssd_free_rqs 1015 #define sd_dump_memory ssd_dump_memory 1016 #define sd_get_media_info ssd_get_media_info 1017 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1018 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1019 #define sd_strtok_r ssd_strtok_r 1020 #define sd_set_properties ssd_set_properties 1021 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1022 #define sd_setup_next_xfer ssd_setup_next_xfer 1023 #define sd_dkio_get_temp ssd_dkio_get_temp 1024 #define sd_check_mhd ssd_check_mhd 1025 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1026 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1027 #define sd_sname ssd_sname 1028 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1029 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1030 #define sd_take_ownership ssd_take_ownership 1031 #define sd_reserve_release ssd_reserve_release 1032 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1033 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1034 #define sd_persistent_reservation_in_read_keys \ 1035 ssd_persistent_reservation_in_read_keys 1036 #define sd_persistent_reservation_in_read_resv \ 1037 ssd_persistent_reservation_in_read_resv 1038 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1039 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1040 #define sd_mhdioc_release ssd_mhdioc_release 1041 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1042 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1043 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1044 #define sr_change_blkmode ssr_change_blkmode 1045 #define sr_change_speed ssr_change_speed 1046 #define sr_atapi_change_speed ssr_atapi_change_speed 1047 #define sr_pause_resume ssr_pause_resume 1048 #define sr_play_msf ssr_play_msf 1049 #define sr_play_trkind ssr_play_trkind 1050 #define sr_read_all_subcodes ssr_read_all_subcodes 1051 #define sr_read_subchannel ssr_read_subchannel 1052 #define sr_read_tocentry ssr_read_tocentry 1053 #define sr_read_tochdr ssr_read_tochdr 1054 #define sr_read_cdda ssr_read_cdda 1055 #define sr_read_cdxa ssr_read_cdxa 1056 #define sr_read_mode1 ssr_read_mode1 1057 #define sr_read_mode2 ssr_read_mode2 1058 #define sr_read_cd_mode2 ssr_read_cd_mode2 1059 #define sr_sector_mode ssr_sector_mode 1060 #define sr_eject ssr_eject 1061 #define sr_ejected ssr_ejected 1062 #define sr_check_wp ssr_check_wp 1063 #define sd_check_media ssd_check_media 1064 #define sd_media_watch_cb ssd_media_watch_cb 1065 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1066 #define sr_volume_ctrl ssr_volume_ctrl 1067 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1068 #define sd_log_page_supported ssd_log_page_supported 1069 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1070 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1071 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1072 #define sd_range_lock ssd_range_lock 1073 #define sd_get_range ssd_get_range 1074 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1075 #define sd_range_unlock ssd_range_unlock 1076 #define sd_read_modify_write_task ssd_read_modify_write_task 1077 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1078 1079 #define sd_iostart_chain ssd_iostart_chain 1080 #define sd_iodone_chain ssd_iodone_chain 1081 #define sd_initpkt_map ssd_initpkt_map 1082 #define sd_destroypkt_map ssd_destroypkt_map 1083 #define sd_chain_type_map ssd_chain_type_map 1084 #define sd_chain_index_map ssd_chain_index_map 1085 1086 #define sd_failfast_flushctl ssd_failfast_flushctl 1087 #define sd_failfast_flushq ssd_failfast_flushq 1088 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1089 1090 #define sd_is_lsi ssd_is_lsi 1091 #define sd_tg_rdwr ssd_tg_rdwr 1092 #define sd_tg_getinfo ssd_tg_getinfo 1093 1094 #endif /* #if (defined(__fibre)) */ 1095 1096 1097 int _init(void); 1098 int _fini(void); 1099 int _info(struct modinfo *modinfop); 1100 1101 /*PRINTFLIKE3*/ 1102 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1103 /*PRINTFLIKE3*/ 1104 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1105 /*PRINTFLIKE3*/ 1106 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1107 1108 static int sdprobe(dev_info_t *devi); 1109 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1110 void **result); 1111 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1112 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1113 1114 /* 1115 * Smart probe for parallel scsi 1116 */ 1117 static void sd_scsi_probe_cache_init(void); 1118 static void sd_scsi_probe_cache_fini(void); 1119 static void sd_scsi_clear_probe_cache(void); 1120 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1121 1122 /* 1123 * Attached luns on target for parallel scsi 1124 */ 1125 static void sd_scsi_target_lun_init(void); 1126 static void sd_scsi_target_lun_fini(void); 1127 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1128 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1129 1130 static int sd_spin_up_unit(sd_ssc_t *ssc); 1131 1132 /* 1133 * Using sd_ssc_init to establish sd_ssc_t struct 1134 * Using sd_ssc_send to send uscsi internal command 1135 * Using sd_ssc_fini to free sd_ssc_t struct 1136 */ 1137 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1138 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1139 int flag, enum uio_seg dataspace, int path_flag); 1140 static void sd_ssc_fini(sd_ssc_t *ssc); 1141 1142 /* 1143 * Using sd_ssc_assessment to set correct type-of-assessment 1144 * Using sd_ssc_post to post ereport & system log 1145 * sd_ssc_post will call sd_ssc_print to print system log 1146 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1147 */ 1148 static void sd_ssc_assessment(sd_ssc_t *ssc, 1149 enum sd_type_assessment tp_assess); 1150 1151 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1152 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1153 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1154 enum sd_driver_assessment drv_assess); 1155 1156 /* 1157 * Using sd_ssc_set_info to mark an un-decodable-data error. 1158 * Using sd_ssc_extract_info to transfer information from internal 1159 * data structures to sd_ssc_t. 1160 */ 1161 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1162 const char *fmt, ...); 1163 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1164 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1165 1166 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1167 enum uio_seg dataspace, int path_flag); 1168 1169 #ifdef _LP64 1170 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1171 static void sd_reenable_dsense_task(void *arg); 1172 #endif /* _LP64 */ 1173 1174 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1175 1176 static void sd_read_unit_properties(struct sd_lun *un); 1177 static int sd_process_sdconf_file(struct sd_lun *un); 1178 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1179 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1180 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1181 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1182 int *data_list, sd_tunables *values); 1183 static void sd_process_sdconf_table(struct sd_lun *un); 1184 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1185 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1186 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1187 int list_len, char *dataname_ptr); 1188 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1189 sd_tunables *prop_list); 1190 1191 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1192 int reservation_flag); 1193 static int sd_get_devid(sd_ssc_t *ssc); 1194 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1195 static int sd_write_deviceid(sd_ssc_t *ssc); 1196 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1197 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1198 1199 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1200 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1201 1202 static int sd_ddi_suspend(dev_info_t *devi); 1203 static int sd_ddi_pm_suspend(struct sd_lun *un); 1204 static int sd_ddi_resume(dev_info_t *devi); 1205 static int sd_ddi_pm_resume(struct sd_lun *un); 1206 static int sdpower(dev_info_t *devi, int component, int level); 1207 1208 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1209 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1210 static int sd_unit_attach(dev_info_t *devi); 1211 static int sd_unit_detach(dev_info_t *devi); 1212 1213 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1214 static void sd_create_errstats(struct sd_lun *un, int instance); 1215 static void sd_set_errstats(struct sd_lun *un); 1216 static void sd_set_pstats(struct sd_lun *un); 1217 1218 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1219 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1220 static int sd_send_polled_RQS(struct sd_lun *un); 1221 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1222 1223 #if (defined(__fibre)) 1224 /* 1225 * Event callbacks (photon) 1226 */ 1227 static void sd_init_event_callbacks(struct sd_lun *un); 1228 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1229 #endif 1230 1231 /* 1232 * Defines for sd_cache_control 1233 */ 1234 1235 #define SD_CACHE_ENABLE 1 1236 #define SD_CACHE_DISABLE 0 1237 #define SD_CACHE_NOCHANGE -1 1238 1239 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1240 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1241 static void sd_get_nv_sup(sd_ssc_t *ssc); 1242 static dev_t sd_make_device(dev_info_t *devi); 1243 1244 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1245 uint64_t capacity); 1246 1247 /* 1248 * Driver entry point functions. 1249 */ 1250 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1251 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1252 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1253 1254 static void sdmin(struct buf *bp); 1255 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1256 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1257 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1258 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1259 1260 static int sdstrategy(struct buf *bp); 1261 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1262 1263 /* 1264 * Function prototypes for layering functions in the iostart chain. 1265 */ 1266 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1267 struct buf *bp); 1268 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1269 struct buf *bp); 1270 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1271 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1275 1276 /* 1277 * Function prototypes for layering functions in the iodone chain. 1278 */ 1279 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1281 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1282 struct buf *bp); 1283 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1284 struct buf *bp); 1285 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 1290 /* 1291 * Prototypes for functions to support buf(9S) based IO. 1292 */ 1293 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1294 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1295 static void sd_destroypkt_for_buf(struct buf *); 1296 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1297 struct buf *bp, int flags, 1298 int (*callback)(caddr_t), caddr_t callback_arg, 1299 diskaddr_t lba, uint32_t blockcount); 1300 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1301 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1302 1303 /* 1304 * Prototypes for functions to support USCSI IO. 1305 */ 1306 static int sd_uscsi_strategy(struct buf *bp); 1307 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1308 static void sd_destroypkt_for_uscsi(struct buf *); 1309 1310 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1311 uchar_t chain_type, void *pktinfop); 1312 1313 static int sd_pm_entry(struct sd_lun *un); 1314 static void sd_pm_exit(struct sd_lun *un); 1315 1316 static void sd_pm_idletimeout_handler(void *arg); 1317 1318 /* 1319 * sd_core internal functions (used at the sd_core_io layer). 1320 */ 1321 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1322 static void sdintr(struct scsi_pkt *pktp); 1323 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1324 1325 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1326 enum uio_seg dataspace, int path_flag); 1327 1328 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1329 daddr_t blkno, int (*func)(struct buf *)); 1330 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1331 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1332 static void sd_bioclone_free(struct buf *bp); 1333 static void sd_shadow_buf_free(struct buf *bp); 1334 1335 static void sd_print_transport_rejected_message(struct sd_lun *un, 1336 struct sd_xbuf *xp, int code); 1337 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1342 void *arg, int code); 1343 1344 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1345 int retry_check_flag, 1346 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1347 int c), 1348 void *user_arg, int failure_code, clock_t retry_delay, 1349 void (*statp)(kstat_io_t *)); 1350 1351 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1352 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1353 1354 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1355 struct scsi_pkt *pktp); 1356 static void sd_start_retry_command(void *arg); 1357 static void sd_start_direct_priority_command(void *arg); 1358 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1359 int errcode); 1360 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1361 struct buf *bp, int errcode); 1362 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1363 static void sd_sync_with_callback(struct sd_lun *un); 1364 static int sdrunout(caddr_t arg); 1365 1366 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1367 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1368 1369 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1370 static void sd_restore_throttle(void *arg); 1371 1372 static void sd_init_cdb_limits(struct sd_lun *un); 1373 1374 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 1377 /* 1378 * Error handling functions 1379 */ 1380 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1385 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 1389 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, size_t actual_len); 1395 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1404 uint8_t *sense_datap, 1405 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_not_ready(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_unit_attention(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1420 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1421 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1422 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_default(struct sd_lun *un, 1424 uint8_t *sense_datap, 1425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1428 void *arg, int flag); 1429 1430 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1435 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1436 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1448 1449 static void sd_start_stop_unit_callback(void *arg); 1450 static void sd_start_stop_unit_task(void *arg); 1451 1452 static void sd_taskq_create(void); 1453 static void sd_taskq_delete(void); 1454 static void sd_target_change_task(void *arg); 1455 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1456 static void sd_media_change_task(void *arg); 1457 1458 static int sd_handle_mchange(struct sd_lun *un); 1459 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1460 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1461 uint32_t *lbap, int path_flag); 1462 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1463 uint32_t *lbap, int path_flag); 1464 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1465 int path_flag); 1466 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1467 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1468 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1469 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1470 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1471 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1472 uchar_t usr_cmd, uchar_t *usr_bufp); 1473 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1474 struct dk_callback *dkc); 1475 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1476 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1478 uchar_t *bufaddr, uint_t buflen, int path_flag); 1479 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1482 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1483 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1484 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1485 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1486 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1487 size_t buflen, daddr_t start_block, int path_flag); 1488 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1489 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1490 path_flag) 1491 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1492 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1493 path_flag) 1494 1495 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1496 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1497 uint16_t param_ptr, int path_flag); 1498 1499 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1500 static void sd_free_rqs(struct sd_lun *un); 1501 1502 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1503 uchar_t *data, int len, int fmt); 1504 static void sd_panic_for_res_conflict(struct sd_lun *un); 1505 1506 /* 1507 * Disk Ioctl Function Prototypes 1508 */ 1509 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1510 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1511 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1512 1513 /* 1514 * Multi-host Ioctl Prototypes 1515 */ 1516 static int sd_check_mhd(dev_t dev, int interval); 1517 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1519 static char *sd_sname(uchar_t status); 1520 static void sd_mhd_resvd_recover(void *arg); 1521 static void sd_resv_reclaim_thread(); 1522 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1523 static int sd_reserve_release(dev_t dev, int cmd); 1524 static void sd_rmv_resv_reclaim_req(dev_t dev); 1525 static void sd_mhd_reset_notify_cb(caddr_t arg); 1526 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1527 mhioc_inkeys_t *usrp, int flag); 1528 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1529 mhioc_inresvs_t *usrp, int flag); 1530 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1531 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1532 static int sd_mhdioc_release(dev_t dev); 1533 static int sd_mhdioc_register_devid(dev_t dev); 1534 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1536 1537 /* 1538 * SCSI removable prototypes 1539 */ 1540 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1541 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1542 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1543 static int sr_pause_resume(dev_t dev, int mode); 1544 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1545 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1546 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1547 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1548 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1555 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1556 static int sr_eject(dev_t dev); 1557 static void sr_ejected(register struct sd_lun *un); 1558 static int sr_check_wp(dev_t dev); 1559 static int sd_check_media(dev_t dev, enum dkio_state state); 1560 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1561 static void sd_delayed_cv_broadcast(void *arg); 1562 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1563 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1564 1565 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1566 1567 /* 1568 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1569 */ 1570 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1571 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1572 static void sd_wm_cache_destructor(void *wm, void *un); 1573 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1574 daddr_t endb, ushort_t typ); 1575 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1576 daddr_t endb); 1577 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1578 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1579 static void sd_read_modify_write_task(void * arg); 1580 static int 1581 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1582 struct buf **bpp); 1583 1584 1585 /* 1586 * Function prototypes for failfast support. 1587 */ 1588 static void sd_failfast_flushq(struct sd_lun *un); 1589 static int sd_failfast_flushq_callback(struct buf *bp); 1590 1591 /* 1592 * Function prototypes to check for lsi devices 1593 */ 1594 static void sd_is_lsi(struct sd_lun *un); 1595 1596 /* 1597 * Function prototypes for partial DMA support 1598 */ 1599 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1600 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1601 1602 1603 /* Function prototypes for cmlb */ 1604 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1605 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1606 1607 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1608 1609 /* 1610 * Constants for failfast support: 1611 * 1612 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1613 * failfast processing being performed. 1614 * 1615 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1616 * failfast processing on all bufs with B_FAILFAST set. 1617 */ 1618 1619 #define SD_FAILFAST_INACTIVE 0 1620 #define SD_FAILFAST_ACTIVE 1 1621 1622 /* 1623 * Bitmask to control behavior of buf(9S) flushes when a transition to 1624 * the failfast state occurs. Optional bits include: 1625 * 1626 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1627 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1628 * be flushed. 1629 * 1630 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1631 * driver, in addition to the regular wait queue. This includes the xbuf 1632 * queues. When clear, only the driver's wait queue will be flushed. 1633 */ 1634 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1635 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1636 1637 /* 1638 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1639 * to flush all queues within the driver. 1640 */ 1641 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1642 1643 1644 /* 1645 * SD Testing Fault Injection 1646 */ 1647 #ifdef SD_FAULT_INJECTION 1648 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1649 static void sd_faultinjection(struct scsi_pkt *pktp); 1650 static void sd_injection_log(char *buf, struct sd_lun *un); 1651 #endif 1652 1653 /* 1654 * Device driver ops vector 1655 */ 1656 static struct cb_ops sd_cb_ops = { 1657 sdopen, /* open */ 1658 sdclose, /* close */ 1659 sdstrategy, /* strategy */ 1660 nodev, /* print */ 1661 sddump, /* dump */ 1662 sdread, /* read */ 1663 sdwrite, /* write */ 1664 sdioctl, /* ioctl */ 1665 nodev, /* devmap */ 1666 nodev, /* mmap */ 1667 nodev, /* segmap */ 1668 nochpoll, /* poll */ 1669 sd_prop_op, /* cb_prop_op */ 1670 0, /* streamtab */ 1671 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1672 CB_REV, /* cb_rev */ 1673 sdaread, /* async I/O read entry point */ 1674 sdawrite /* async I/O write entry point */ 1675 }; 1676 1677 struct dev_ops sd_ops = { 1678 DEVO_REV, /* devo_rev, */ 1679 0, /* refcnt */ 1680 sdinfo, /* info */ 1681 nulldev, /* identify */ 1682 sdprobe, /* probe */ 1683 sdattach, /* attach */ 1684 sddetach, /* detach */ 1685 nodev, /* reset */ 1686 &sd_cb_ops, /* driver operations */ 1687 NULL, /* bus operations */ 1688 sdpower, /* power */ 1689 ddi_quiesce_not_needed, /* quiesce */ 1690 }; 1691 1692 /* 1693 * This is the loadable module wrapper. 1694 */ 1695 #include <sys/modctl.h> 1696 1697 #ifndef XPV_HVM_DRIVER 1698 static struct modldrv modldrv = { 1699 &mod_driverops, /* Type of module. This one is a driver */ 1700 SD_MODULE_NAME, /* Module name. */ 1701 &sd_ops /* driver ops */ 1702 }; 1703 1704 static struct modlinkage modlinkage = { 1705 MODREV_1, &modldrv, NULL 1706 }; 1707 1708 #else /* XPV_HVM_DRIVER */ 1709 static struct modlmisc modlmisc = { 1710 &mod_miscops, /* Type of module. This one is a misc */ 1711 "HVM " SD_MODULE_NAME, /* Module name. */ 1712 }; 1713 1714 static struct modlinkage modlinkage = { 1715 MODREV_1, &modlmisc, NULL 1716 }; 1717 1718 #endif /* XPV_HVM_DRIVER */ 1719 1720 static cmlb_tg_ops_t sd_tgops = { 1721 TG_DK_OPS_VERSION_1, 1722 sd_tg_rdwr, 1723 sd_tg_getinfo 1724 }; 1725 1726 static struct scsi_asq_key_strings sd_additional_codes[] = { 1727 0x81, 0, "Logical Unit is Reserved", 1728 0x85, 0, "Audio Address Not Valid", 1729 0xb6, 0, "Media Load Mechanism Failed", 1730 0xB9, 0, "Audio Play Operation Aborted", 1731 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1732 0x53, 2, "Medium removal prevented", 1733 0x6f, 0, "Authentication failed during key exchange", 1734 0x6f, 1, "Key not present", 1735 0x6f, 2, "Key not established", 1736 0x6f, 3, "Read without proper authentication", 1737 0x6f, 4, "Mismatched region to this logical unit", 1738 0x6f, 5, "Region reset count error", 1739 0xffff, 0x0, NULL 1740 }; 1741 1742 1743 /* 1744 * Struct for passing printing information for sense data messages 1745 */ 1746 struct sd_sense_info { 1747 int ssi_severity; 1748 int ssi_pfa_flag; 1749 }; 1750 1751 /* 1752 * Table of function pointers for iostart-side routines. Separate "chains" 1753 * of layered function calls are formed by placing the function pointers 1754 * sequentially in the desired order. Functions are called according to an 1755 * incrementing table index ordering. The last function in each chain must 1756 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1757 * in the sd_iodone_chain[] array. 1758 * 1759 * Note: It may seem more natural to organize both the iostart and iodone 1760 * functions together, into an array of structures (or some similar 1761 * organization) with a common index, rather than two separate arrays which 1762 * must be maintained in synchronization. The purpose of this division is 1763 * to achieve improved performance: individual arrays allows for more 1764 * effective cache line utilization on certain platforms. 1765 */ 1766 1767 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1768 1769 1770 static sd_chain_t sd_iostart_chain[] = { 1771 1772 /* Chain for buf IO for disk drive targets (PM enabled) */ 1773 sd_mapblockaddr_iostart, /* Index: 0 */ 1774 sd_pm_iostart, /* Index: 1 */ 1775 sd_core_iostart, /* Index: 2 */ 1776 1777 /* Chain for buf IO for disk drive targets (PM disabled) */ 1778 sd_mapblockaddr_iostart, /* Index: 3 */ 1779 sd_core_iostart, /* Index: 4 */ 1780 1781 /* Chain for buf IO for removable-media targets (PM enabled) */ 1782 sd_mapblockaddr_iostart, /* Index: 5 */ 1783 sd_mapblocksize_iostart, /* Index: 6 */ 1784 sd_pm_iostart, /* Index: 7 */ 1785 sd_core_iostart, /* Index: 8 */ 1786 1787 /* Chain for buf IO for removable-media targets (PM disabled) */ 1788 sd_mapblockaddr_iostart, /* Index: 9 */ 1789 sd_mapblocksize_iostart, /* Index: 10 */ 1790 sd_core_iostart, /* Index: 11 */ 1791 1792 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1793 sd_mapblockaddr_iostart, /* Index: 12 */ 1794 sd_checksum_iostart, /* Index: 13 */ 1795 sd_pm_iostart, /* Index: 14 */ 1796 sd_core_iostart, /* Index: 15 */ 1797 1798 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1799 sd_mapblockaddr_iostart, /* Index: 16 */ 1800 sd_checksum_iostart, /* Index: 17 */ 1801 sd_core_iostart, /* Index: 18 */ 1802 1803 /* Chain for USCSI commands (all targets) */ 1804 sd_pm_iostart, /* Index: 19 */ 1805 sd_core_iostart, /* Index: 20 */ 1806 1807 /* Chain for checksumming USCSI commands (all targets) */ 1808 sd_checksum_uscsi_iostart, /* Index: 21 */ 1809 sd_pm_iostart, /* Index: 22 */ 1810 sd_core_iostart, /* Index: 23 */ 1811 1812 /* Chain for "direct" USCSI commands (all targets) */ 1813 sd_core_iostart, /* Index: 24 */ 1814 1815 /* Chain for "direct priority" USCSI commands (all targets) */ 1816 sd_core_iostart, /* Index: 25 */ 1817 }; 1818 1819 /* 1820 * Macros to locate the first function of each iostart chain in the 1821 * sd_iostart_chain[] array. These are located by the index in the array. 1822 */ 1823 #define SD_CHAIN_DISK_IOSTART 0 1824 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1825 #define SD_CHAIN_RMMEDIA_IOSTART 5 1826 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1827 #define SD_CHAIN_CHKSUM_IOSTART 12 1828 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1829 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1830 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1831 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1832 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1833 1834 1835 /* 1836 * Table of function pointers for the iodone-side routines for the driver- 1837 * internal layering mechanism. The calling sequence for iodone routines 1838 * uses a decrementing table index, so the last routine called in a chain 1839 * must be at the lowest array index location for that chain. The last 1840 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1841 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1842 * of the functions in an iodone side chain must correspond to the ordering 1843 * of the iostart routines for that chain. Note that there is no iodone 1844 * side routine that corresponds to sd_core_iostart(), so there is no 1845 * entry in the table for this. 1846 */ 1847 1848 static sd_chain_t sd_iodone_chain[] = { 1849 1850 /* Chain for buf IO for disk drive targets (PM enabled) */ 1851 sd_buf_iodone, /* Index: 0 */ 1852 sd_mapblockaddr_iodone, /* Index: 1 */ 1853 sd_pm_iodone, /* Index: 2 */ 1854 1855 /* Chain for buf IO for disk drive targets (PM disabled) */ 1856 sd_buf_iodone, /* Index: 3 */ 1857 sd_mapblockaddr_iodone, /* Index: 4 */ 1858 1859 /* Chain for buf IO for removable-media targets (PM enabled) */ 1860 sd_buf_iodone, /* Index: 5 */ 1861 sd_mapblockaddr_iodone, /* Index: 6 */ 1862 sd_mapblocksize_iodone, /* Index: 7 */ 1863 sd_pm_iodone, /* Index: 8 */ 1864 1865 /* Chain for buf IO for removable-media targets (PM disabled) */ 1866 sd_buf_iodone, /* Index: 9 */ 1867 sd_mapblockaddr_iodone, /* Index: 10 */ 1868 sd_mapblocksize_iodone, /* Index: 11 */ 1869 1870 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1871 sd_buf_iodone, /* Index: 12 */ 1872 sd_mapblockaddr_iodone, /* Index: 13 */ 1873 sd_checksum_iodone, /* Index: 14 */ 1874 sd_pm_iodone, /* Index: 15 */ 1875 1876 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1877 sd_buf_iodone, /* Index: 16 */ 1878 sd_mapblockaddr_iodone, /* Index: 17 */ 1879 sd_checksum_iodone, /* Index: 18 */ 1880 1881 /* Chain for USCSI commands (non-checksum targets) */ 1882 sd_uscsi_iodone, /* Index: 19 */ 1883 sd_pm_iodone, /* Index: 20 */ 1884 1885 /* Chain for USCSI commands (checksum targets) */ 1886 sd_uscsi_iodone, /* Index: 21 */ 1887 sd_checksum_uscsi_iodone, /* Index: 22 */ 1888 sd_pm_iodone, /* Index: 22 */ 1889 1890 /* Chain for "direct" USCSI commands (all targets) */ 1891 sd_uscsi_iodone, /* Index: 24 */ 1892 1893 /* Chain for "direct priority" USCSI commands (all targets) */ 1894 sd_uscsi_iodone, /* Index: 25 */ 1895 }; 1896 1897 1898 /* 1899 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1900 * each iodone-side chain. These are located by the array index, but as the 1901 * iodone side functions are called in a decrementing-index order, the 1902 * highest index number in each chain must be specified (as these correspond 1903 * to the first function in the iodone chain that will be called by the core 1904 * at IO completion time). 1905 */ 1906 1907 #define SD_CHAIN_DISK_IODONE 2 1908 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1909 #define SD_CHAIN_RMMEDIA_IODONE 8 1910 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1911 #define SD_CHAIN_CHKSUM_IODONE 15 1912 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1913 #define SD_CHAIN_USCSI_CMD_IODONE 20 1914 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1915 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1916 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1917 1918 1919 1920 1921 /* 1922 * Array to map a layering chain index to the appropriate initpkt routine. 1923 * The redundant entries are present so that the index used for accessing 1924 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1925 * with this table as well. 1926 */ 1927 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1928 1929 static sd_initpkt_t sd_initpkt_map[] = { 1930 1931 /* Chain for buf IO for disk drive targets (PM enabled) */ 1932 sd_initpkt_for_buf, /* Index: 0 */ 1933 sd_initpkt_for_buf, /* Index: 1 */ 1934 sd_initpkt_for_buf, /* Index: 2 */ 1935 1936 /* Chain for buf IO for disk drive targets (PM disabled) */ 1937 sd_initpkt_for_buf, /* Index: 3 */ 1938 sd_initpkt_for_buf, /* Index: 4 */ 1939 1940 /* Chain for buf IO for removable-media targets (PM enabled) */ 1941 sd_initpkt_for_buf, /* Index: 5 */ 1942 sd_initpkt_for_buf, /* Index: 6 */ 1943 sd_initpkt_for_buf, /* Index: 7 */ 1944 sd_initpkt_for_buf, /* Index: 8 */ 1945 1946 /* Chain for buf IO for removable-media targets (PM disabled) */ 1947 sd_initpkt_for_buf, /* Index: 9 */ 1948 sd_initpkt_for_buf, /* Index: 10 */ 1949 sd_initpkt_for_buf, /* Index: 11 */ 1950 1951 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1952 sd_initpkt_for_buf, /* Index: 12 */ 1953 sd_initpkt_for_buf, /* Index: 13 */ 1954 sd_initpkt_for_buf, /* Index: 14 */ 1955 sd_initpkt_for_buf, /* Index: 15 */ 1956 1957 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1958 sd_initpkt_for_buf, /* Index: 16 */ 1959 sd_initpkt_for_buf, /* Index: 17 */ 1960 sd_initpkt_for_buf, /* Index: 18 */ 1961 1962 /* Chain for USCSI commands (non-checksum targets) */ 1963 sd_initpkt_for_uscsi, /* Index: 19 */ 1964 sd_initpkt_for_uscsi, /* Index: 20 */ 1965 1966 /* Chain for USCSI commands (checksum targets) */ 1967 sd_initpkt_for_uscsi, /* Index: 21 */ 1968 sd_initpkt_for_uscsi, /* Index: 22 */ 1969 sd_initpkt_for_uscsi, /* Index: 22 */ 1970 1971 /* Chain for "direct" USCSI commands (all targets) */ 1972 sd_initpkt_for_uscsi, /* Index: 24 */ 1973 1974 /* Chain for "direct priority" USCSI commands (all targets) */ 1975 sd_initpkt_for_uscsi, /* Index: 25 */ 1976 1977 }; 1978 1979 1980 /* 1981 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1982 * The redundant entries are present so that the index used for accessing 1983 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1984 * with this table as well. 1985 */ 1986 typedef void (*sd_destroypkt_t)(struct buf *); 1987 1988 static sd_destroypkt_t sd_destroypkt_map[] = { 1989 1990 /* Chain for buf IO for disk drive targets (PM enabled) */ 1991 sd_destroypkt_for_buf, /* Index: 0 */ 1992 sd_destroypkt_for_buf, /* Index: 1 */ 1993 sd_destroypkt_for_buf, /* Index: 2 */ 1994 1995 /* Chain for buf IO for disk drive targets (PM disabled) */ 1996 sd_destroypkt_for_buf, /* Index: 3 */ 1997 sd_destroypkt_for_buf, /* Index: 4 */ 1998 1999 /* Chain for buf IO for removable-media targets (PM enabled) */ 2000 sd_destroypkt_for_buf, /* Index: 5 */ 2001 sd_destroypkt_for_buf, /* Index: 6 */ 2002 sd_destroypkt_for_buf, /* Index: 7 */ 2003 sd_destroypkt_for_buf, /* Index: 8 */ 2004 2005 /* Chain for buf IO for removable-media targets (PM disabled) */ 2006 sd_destroypkt_for_buf, /* Index: 9 */ 2007 sd_destroypkt_for_buf, /* Index: 10 */ 2008 sd_destroypkt_for_buf, /* Index: 11 */ 2009 2010 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2011 sd_destroypkt_for_buf, /* Index: 12 */ 2012 sd_destroypkt_for_buf, /* Index: 13 */ 2013 sd_destroypkt_for_buf, /* Index: 14 */ 2014 sd_destroypkt_for_buf, /* Index: 15 */ 2015 2016 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2017 sd_destroypkt_for_buf, /* Index: 16 */ 2018 sd_destroypkt_for_buf, /* Index: 17 */ 2019 sd_destroypkt_for_buf, /* Index: 18 */ 2020 2021 /* Chain for USCSI commands (non-checksum targets) */ 2022 sd_destroypkt_for_uscsi, /* Index: 19 */ 2023 sd_destroypkt_for_uscsi, /* Index: 20 */ 2024 2025 /* Chain for USCSI commands (checksum targets) */ 2026 sd_destroypkt_for_uscsi, /* Index: 21 */ 2027 sd_destroypkt_for_uscsi, /* Index: 22 */ 2028 sd_destroypkt_for_uscsi, /* Index: 22 */ 2029 2030 /* Chain for "direct" USCSI commands (all targets) */ 2031 sd_destroypkt_for_uscsi, /* Index: 24 */ 2032 2033 /* Chain for "direct priority" USCSI commands (all targets) */ 2034 sd_destroypkt_for_uscsi, /* Index: 25 */ 2035 2036 }; 2037 2038 2039 2040 /* 2041 * Array to map a layering chain index to the appropriate chain "type". 2042 * The chain type indicates a specific property/usage of the chain. 2043 * The redundant entries are present so that the index used for accessing 2044 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2045 * with this table as well. 2046 */ 2047 2048 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2049 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2050 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2051 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2052 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2053 /* (for error recovery) */ 2054 2055 static int sd_chain_type_map[] = { 2056 2057 /* Chain for buf IO for disk drive targets (PM enabled) */ 2058 SD_CHAIN_BUFIO, /* Index: 0 */ 2059 SD_CHAIN_BUFIO, /* Index: 1 */ 2060 SD_CHAIN_BUFIO, /* Index: 2 */ 2061 2062 /* Chain for buf IO for disk drive targets (PM disabled) */ 2063 SD_CHAIN_BUFIO, /* Index: 3 */ 2064 SD_CHAIN_BUFIO, /* Index: 4 */ 2065 2066 /* Chain for buf IO for removable-media targets (PM enabled) */ 2067 SD_CHAIN_BUFIO, /* Index: 5 */ 2068 SD_CHAIN_BUFIO, /* Index: 6 */ 2069 SD_CHAIN_BUFIO, /* Index: 7 */ 2070 SD_CHAIN_BUFIO, /* Index: 8 */ 2071 2072 /* Chain for buf IO for removable-media targets (PM disabled) */ 2073 SD_CHAIN_BUFIO, /* Index: 9 */ 2074 SD_CHAIN_BUFIO, /* Index: 10 */ 2075 SD_CHAIN_BUFIO, /* Index: 11 */ 2076 2077 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2078 SD_CHAIN_BUFIO, /* Index: 12 */ 2079 SD_CHAIN_BUFIO, /* Index: 13 */ 2080 SD_CHAIN_BUFIO, /* Index: 14 */ 2081 SD_CHAIN_BUFIO, /* Index: 15 */ 2082 2083 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2084 SD_CHAIN_BUFIO, /* Index: 16 */ 2085 SD_CHAIN_BUFIO, /* Index: 17 */ 2086 SD_CHAIN_BUFIO, /* Index: 18 */ 2087 2088 /* Chain for USCSI commands (non-checksum targets) */ 2089 SD_CHAIN_USCSI, /* Index: 19 */ 2090 SD_CHAIN_USCSI, /* Index: 20 */ 2091 2092 /* Chain for USCSI commands (checksum targets) */ 2093 SD_CHAIN_USCSI, /* Index: 21 */ 2094 SD_CHAIN_USCSI, /* Index: 22 */ 2095 SD_CHAIN_USCSI, /* Index: 22 */ 2096 2097 /* Chain for "direct" USCSI commands (all targets) */ 2098 SD_CHAIN_DIRECT, /* Index: 24 */ 2099 2100 /* Chain for "direct priority" USCSI commands (all targets) */ 2101 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2102 }; 2103 2104 2105 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2106 #define SD_IS_BUFIO(xp) \ 2107 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2108 2109 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2110 #define SD_IS_DIRECT_PRIORITY(xp) \ 2111 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2112 2113 2114 2115 /* 2116 * Struct, array, and macros to map a specific chain to the appropriate 2117 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2118 * 2119 * The sd_chain_index_map[] array is used at attach time to set the various 2120 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2121 * chain to be used with the instance. This allows different instances to use 2122 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2123 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2124 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2125 * dynamically & without the use of locking; and (2) a layer may update the 2126 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2127 * to allow for deferred processing of an IO within the same chain from a 2128 * different execution context. 2129 */ 2130 2131 struct sd_chain_index { 2132 int sci_iostart_index; 2133 int sci_iodone_index; 2134 }; 2135 2136 static struct sd_chain_index sd_chain_index_map[] = { 2137 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2138 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2139 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2140 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2141 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2142 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2143 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2144 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2145 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2146 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2147 }; 2148 2149 2150 /* 2151 * The following are indexes into the sd_chain_index_map[] array. 2152 */ 2153 2154 /* un->un_buf_chain_type must be set to one of these */ 2155 #define SD_CHAIN_INFO_DISK 0 2156 #define SD_CHAIN_INFO_DISK_NO_PM 1 2157 #define SD_CHAIN_INFO_RMMEDIA 2 2158 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2159 #define SD_CHAIN_INFO_CHKSUM 4 2160 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2161 2162 /* un->un_uscsi_chain_type must be set to one of these */ 2163 #define SD_CHAIN_INFO_USCSI_CMD 6 2164 /* USCSI with PM disabled is the same as DIRECT */ 2165 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2166 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2167 2168 /* un->un_direct_chain_type must be set to one of these */ 2169 #define SD_CHAIN_INFO_DIRECT_CMD 8 2170 2171 /* un->un_priority_chain_type must be set to one of these */ 2172 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2173 2174 /* size for devid inquiries */ 2175 #define MAX_INQUIRY_SIZE 0xF0 2176 2177 /* 2178 * Macros used by functions to pass a given buf(9S) struct along to the 2179 * next function in the layering chain for further processing. 2180 * 2181 * In the following macros, passing more than three arguments to the called 2182 * routines causes the optimizer for the SPARC compiler to stop doing tail 2183 * call elimination which results in significant performance degradation. 2184 */ 2185 #define SD_BEGIN_IOSTART(index, un, bp) \ 2186 ((*(sd_iostart_chain[index]))(index, un, bp)) 2187 2188 #define SD_BEGIN_IODONE(index, un, bp) \ 2189 ((*(sd_iodone_chain[index]))(index, un, bp)) 2190 2191 #define SD_NEXT_IOSTART(index, un, bp) \ 2192 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2193 2194 #define SD_NEXT_IODONE(index, un, bp) \ 2195 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2196 2197 /* 2198 * Function: _init 2199 * 2200 * Description: This is the driver _init(9E) entry point. 2201 * 2202 * Return Code: Returns the value from mod_install(9F) or 2203 * ddi_soft_state_init(9F) as appropriate. 2204 * 2205 * Context: Called when driver module loaded. 2206 */ 2207 2208 int 2209 _init(void) 2210 { 2211 int err; 2212 2213 /* establish driver name from module name */ 2214 sd_label = (char *)mod_modname(&modlinkage); 2215 2216 #ifndef XPV_HVM_DRIVER 2217 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2218 SD_MAXUNIT); 2219 if (err != 0) { 2220 return (err); 2221 } 2222 2223 #else /* XPV_HVM_DRIVER */ 2224 /* Remove the leading "hvm_" from the module name */ 2225 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2226 sd_label += strlen("hvm_"); 2227 2228 #endif /* XPV_HVM_DRIVER */ 2229 2230 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2231 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2232 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2233 2234 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2235 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2236 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2237 2238 /* 2239 * it's ok to init here even for fibre device 2240 */ 2241 sd_scsi_probe_cache_init(); 2242 2243 sd_scsi_target_lun_init(); 2244 2245 /* 2246 * Creating taskq before mod_install ensures that all callers (threads) 2247 * that enter the module after a successful mod_install encounter 2248 * a valid taskq. 2249 */ 2250 sd_taskq_create(); 2251 2252 err = mod_install(&modlinkage); 2253 if (err != 0) { 2254 /* delete taskq if install fails */ 2255 sd_taskq_delete(); 2256 2257 mutex_destroy(&sd_detach_mutex); 2258 mutex_destroy(&sd_log_mutex); 2259 mutex_destroy(&sd_label_mutex); 2260 2261 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2262 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2263 cv_destroy(&sd_tr.srq_inprocess_cv); 2264 2265 sd_scsi_probe_cache_fini(); 2266 2267 sd_scsi_target_lun_fini(); 2268 2269 #ifndef XPV_HVM_DRIVER 2270 ddi_soft_state_fini(&sd_state); 2271 #endif /* !XPV_HVM_DRIVER */ 2272 return (err); 2273 } 2274 2275 return (err); 2276 } 2277 2278 2279 /* 2280 * Function: _fini 2281 * 2282 * Description: This is the driver _fini(9E) entry point. 2283 * 2284 * Return Code: Returns the value from mod_remove(9F) 2285 * 2286 * Context: Called when driver module is unloaded. 2287 */ 2288 2289 int 2290 _fini(void) 2291 { 2292 int err; 2293 2294 if ((err = mod_remove(&modlinkage)) != 0) { 2295 return (err); 2296 } 2297 2298 sd_taskq_delete(); 2299 2300 mutex_destroy(&sd_detach_mutex); 2301 mutex_destroy(&sd_log_mutex); 2302 mutex_destroy(&sd_label_mutex); 2303 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2304 2305 sd_scsi_probe_cache_fini(); 2306 2307 sd_scsi_target_lun_fini(); 2308 2309 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2310 cv_destroy(&sd_tr.srq_inprocess_cv); 2311 2312 #ifndef XPV_HVM_DRIVER 2313 ddi_soft_state_fini(&sd_state); 2314 #endif /* !XPV_HVM_DRIVER */ 2315 2316 return (err); 2317 } 2318 2319 2320 /* 2321 * Function: _info 2322 * 2323 * Description: This is the driver _info(9E) entry point. 2324 * 2325 * Arguments: modinfop - pointer to the driver modinfo structure 2326 * 2327 * Return Code: Returns the value from mod_info(9F). 2328 * 2329 * Context: Kernel thread context 2330 */ 2331 2332 int 2333 _info(struct modinfo *modinfop) 2334 { 2335 return (mod_info(&modlinkage, modinfop)); 2336 } 2337 2338 2339 /* 2340 * The following routines implement the driver message logging facility. 2341 * They provide component- and level- based debug output filtering. 2342 * Output may also be restricted to messages for a single instance by 2343 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2344 * to NULL, then messages for all instances are printed. 2345 * 2346 * These routines have been cloned from each other due to the language 2347 * constraints of macros and variable argument list processing. 2348 */ 2349 2350 2351 /* 2352 * Function: sd_log_err 2353 * 2354 * Description: This routine is called by the SD_ERROR macro for debug 2355 * logging of error conditions. 2356 * 2357 * Arguments: comp - driver component being logged 2358 * dev - pointer to driver info structure 2359 * fmt - error string and format to be logged 2360 */ 2361 2362 static void 2363 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2364 { 2365 va_list ap; 2366 dev_info_t *dev; 2367 2368 ASSERT(un != NULL); 2369 dev = SD_DEVINFO(un); 2370 ASSERT(dev != NULL); 2371 2372 /* 2373 * Filter messages based on the global component and level masks. 2374 * Also print if un matches the value of sd_debug_un, or if 2375 * sd_debug_un is set to NULL. 2376 */ 2377 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2378 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2379 mutex_enter(&sd_log_mutex); 2380 va_start(ap, fmt); 2381 (void) vsprintf(sd_log_buf, fmt, ap); 2382 va_end(ap); 2383 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2384 mutex_exit(&sd_log_mutex); 2385 } 2386 #ifdef SD_FAULT_INJECTION 2387 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2388 if (un->sd_injection_mask & comp) { 2389 mutex_enter(&sd_log_mutex); 2390 va_start(ap, fmt); 2391 (void) vsprintf(sd_log_buf, fmt, ap); 2392 va_end(ap); 2393 sd_injection_log(sd_log_buf, un); 2394 mutex_exit(&sd_log_mutex); 2395 } 2396 #endif 2397 } 2398 2399 2400 /* 2401 * Function: sd_log_info 2402 * 2403 * Description: This routine is called by the SD_INFO macro for debug 2404 * logging of general purpose informational conditions. 2405 * 2406 * Arguments: comp - driver component being logged 2407 * dev - pointer to driver info structure 2408 * fmt - info string and format to be logged 2409 */ 2410 2411 static void 2412 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2413 { 2414 va_list ap; 2415 dev_info_t *dev; 2416 2417 ASSERT(un != NULL); 2418 dev = SD_DEVINFO(un); 2419 ASSERT(dev != NULL); 2420 2421 /* 2422 * Filter messages based on the global component and level masks. 2423 * Also print if un matches the value of sd_debug_un, or if 2424 * sd_debug_un is set to NULL. 2425 */ 2426 if ((sd_component_mask & component) && 2427 (sd_level_mask & SD_LOGMASK_INFO) && 2428 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2429 mutex_enter(&sd_log_mutex); 2430 va_start(ap, fmt); 2431 (void) vsprintf(sd_log_buf, fmt, ap); 2432 va_end(ap); 2433 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2434 mutex_exit(&sd_log_mutex); 2435 } 2436 #ifdef SD_FAULT_INJECTION 2437 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2438 if (un->sd_injection_mask & component) { 2439 mutex_enter(&sd_log_mutex); 2440 va_start(ap, fmt); 2441 (void) vsprintf(sd_log_buf, fmt, ap); 2442 va_end(ap); 2443 sd_injection_log(sd_log_buf, un); 2444 mutex_exit(&sd_log_mutex); 2445 } 2446 #endif 2447 } 2448 2449 2450 /* 2451 * Function: sd_log_trace 2452 * 2453 * Description: This routine is called by the SD_TRACE macro for debug 2454 * logging of trace conditions (i.e. function entry/exit). 2455 * 2456 * Arguments: comp - driver component being logged 2457 * dev - pointer to driver info structure 2458 * fmt - trace string and format to be logged 2459 */ 2460 2461 static void 2462 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2463 { 2464 va_list ap; 2465 dev_info_t *dev; 2466 2467 ASSERT(un != NULL); 2468 dev = SD_DEVINFO(un); 2469 ASSERT(dev != NULL); 2470 2471 /* 2472 * Filter messages based on the global component and level masks. 2473 * Also print if un matches the value of sd_debug_un, or if 2474 * sd_debug_un is set to NULL. 2475 */ 2476 if ((sd_component_mask & component) && 2477 (sd_level_mask & SD_LOGMASK_TRACE) && 2478 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2479 mutex_enter(&sd_log_mutex); 2480 va_start(ap, fmt); 2481 (void) vsprintf(sd_log_buf, fmt, ap); 2482 va_end(ap); 2483 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2484 mutex_exit(&sd_log_mutex); 2485 } 2486 #ifdef SD_FAULT_INJECTION 2487 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2488 if (un->sd_injection_mask & component) { 2489 mutex_enter(&sd_log_mutex); 2490 va_start(ap, fmt); 2491 (void) vsprintf(sd_log_buf, fmt, ap); 2492 va_end(ap); 2493 sd_injection_log(sd_log_buf, un); 2494 mutex_exit(&sd_log_mutex); 2495 } 2496 #endif 2497 } 2498 2499 2500 /* 2501 * Function: sdprobe 2502 * 2503 * Description: This is the driver probe(9e) entry point function. 2504 * 2505 * Arguments: devi - opaque device info handle 2506 * 2507 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2508 * DDI_PROBE_FAILURE: If the probe failed. 2509 * DDI_PROBE_PARTIAL: If the instance is not present now, 2510 * but may be present in the future. 2511 */ 2512 2513 static int 2514 sdprobe(dev_info_t *devi) 2515 { 2516 struct scsi_device *devp; 2517 int rval; 2518 #ifndef XPV_HVM_DRIVER 2519 int instance = ddi_get_instance(devi); 2520 #endif /* !XPV_HVM_DRIVER */ 2521 2522 /* 2523 * if it wasn't for pln, sdprobe could actually be nulldev 2524 * in the "__fibre" case. 2525 */ 2526 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2527 return (DDI_PROBE_DONTCARE); 2528 } 2529 2530 devp = ddi_get_driver_private(devi); 2531 2532 if (devp == NULL) { 2533 /* Ooops... nexus driver is mis-configured... */ 2534 return (DDI_PROBE_FAILURE); 2535 } 2536 2537 #ifndef XPV_HVM_DRIVER 2538 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2539 return (DDI_PROBE_PARTIAL); 2540 } 2541 #endif /* !XPV_HVM_DRIVER */ 2542 2543 /* 2544 * Call the SCSA utility probe routine to see if we actually 2545 * have a target at this SCSI nexus. 2546 */ 2547 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2548 case SCSIPROBE_EXISTS: 2549 switch (devp->sd_inq->inq_dtype) { 2550 case DTYPE_DIRECT: 2551 rval = DDI_PROBE_SUCCESS; 2552 break; 2553 case DTYPE_RODIRECT: 2554 /* CDs etc. Can be removable media */ 2555 rval = DDI_PROBE_SUCCESS; 2556 break; 2557 case DTYPE_OPTICAL: 2558 /* 2559 * Rewritable optical driver HP115AA 2560 * Can also be removable media 2561 */ 2562 2563 /* 2564 * Do not attempt to bind to DTYPE_OPTICAL if 2565 * pre solaris 9 sparc sd behavior is required 2566 * 2567 * If first time through and sd_dtype_optical_bind 2568 * has not been set in /etc/system check properties 2569 */ 2570 2571 if (sd_dtype_optical_bind < 0) { 2572 sd_dtype_optical_bind = ddi_prop_get_int 2573 (DDI_DEV_T_ANY, devi, 0, 2574 "optical-device-bind", 1); 2575 } 2576 2577 if (sd_dtype_optical_bind == 0) { 2578 rval = DDI_PROBE_FAILURE; 2579 } else { 2580 rval = DDI_PROBE_SUCCESS; 2581 } 2582 break; 2583 2584 case DTYPE_NOTPRESENT: 2585 default: 2586 rval = DDI_PROBE_FAILURE; 2587 break; 2588 } 2589 break; 2590 default: 2591 rval = DDI_PROBE_PARTIAL; 2592 break; 2593 } 2594 2595 /* 2596 * This routine checks for resource allocation prior to freeing, 2597 * so it will take care of the "smart probing" case where a 2598 * scsi_probe() may or may not have been issued and will *not* 2599 * free previously-freed resources. 2600 */ 2601 scsi_unprobe(devp); 2602 return (rval); 2603 } 2604 2605 2606 /* 2607 * Function: sdinfo 2608 * 2609 * Description: This is the driver getinfo(9e) entry point function. 2610 * Given the device number, return the devinfo pointer from 2611 * the scsi_device structure or the instance number 2612 * associated with the dev_t. 2613 * 2614 * Arguments: dip - pointer to device info structure 2615 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2616 * DDI_INFO_DEVT2INSTANCE) 2617 * arg - driver dev_t 2618 * resultp - user buffer for request response 2619 * 2620 * Return Code: DDI_SUCCESS 2621 * DDI_FAILURE 2622 */ 2623 /* ARGSUSED */ 2624 static int 2625 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2626 { 2627 struct sd_lun *un; 2628 dev_t dev; 2629 int instance; 2630 int error; 2631 2632 switch (infocmd) { 2633 case DDI_INFO_DEVT2DEVINFO: 2634 dev = (dev_t)arg; 2635 instance = SDUNIT(dev); 2636 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2637 return (DDI_FAILURE); 2638 } 2639 *result = (void *) SD_DEVINFO(un); 2640 error = DDI_SUCCESS; 2641 break; 2642 case DDI_INFO_DEVT2INSTANCE: 2643 dev = (dev_t)arg; 2644 instance = SDUNIT(dev); 2645 *result = (void *)(uintptr_t)instance; 2646 error = DDI_SUCCESS; 2647 break; 2648 default: 2649 error = DDI_FAILURE; 2650 } 2651 return (error); 2652 } 2653 2654 /* 2655 * Function: sd_prop_op 2656 * 2657 * Description: This is the driver prop_op(9e) entry point function. 2658 * Return the number of blocks for the partition in question 2659 * or forward the request to the property facilities. 2660 * 2661 * Arguments: dev - device number 2662 * dip - pointer to device info structure 2663 * prop_op - property operator 2664 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2665 * name - pointer to property name 2666 * valuep - pointer or address of the user buffer 2667 * lengthp - property length 2668 * 2669 * Return Code: DDI_PROP_SUCCESS 2670 * DDI_PROP_NOT_FOUND 2671 * DDI_PROP_UNDEFINED 2672 * DDI_PROP_NO_MEMORY 2673 * DDI_PROP_BUF_TOO_SMALL 2674 */ 2675 2676 static int 2677 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2678 char *name, caddr_t valuep, int *lengthp) 2679 { 2680 struct sd_lun *un; 2681 2682 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2683 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2684 name, valuep, lengthp)); 2685 2686 return (cmlb_prop_op(un->un_cmlbhandle, 2687 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2688 SDPART(dev), (void *)SD_PATH_DIRECT)); 2689 } 2690 2691 /* 2692 * The following functions are for smart probing: 2693 * sd_scsi_probe_cache_init() 2694 * sd_scsi_probe_cache_fini() 2695 * sd_scsi_clear_probe_cache() 2696 * sd_scsi_probe_with_cache() 2697 */ 2698 2699 /* 2700 * Function: sd_scsi_probe_cache_init 2701 * 2702 * Description: Initializes the probe response cache mutex and head pointer. 2703 * 2704 * Context: Kernel thread context 2705 */ 2706 2707 static void 2708 sd_scsi_probe_cache_init(void) 2709 { 2710 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2711 sd_scsi_probe_cache_head = NULL; 2712 } 2713 2714 2715 /* 2716 * Function: sd_scsi_probe_cache_fini 2717 * 2718 * Description: Frees all resources associated with the probe response cache. 2719 * 2720 * Context: Kernel thread context 2721 */ 2722 2723 static void 2724 sd_scsi_probe_cache_fini(void) 2725 { 2726 struct sd_scsi_probe_cache *cp; 2727 struct sd_scsi_probe_cache *ncp; 2728 2729 /* Clean up our smart probing linked list */ 2730 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2731 ncp = cp->next; 2732 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2733 } 2734 sd_scsi_probe_cache_head = NULL; 2735 mutex_destroy(&sd_scsi_probe_cache_mutex); 2736 } 2737 2738 2739 /* 2740 * Function: sd_scsi_clear_probe_cache 2741 * 2742 * Description: This routine clears the probe response cache. This is 2743 * done when open() returns ENXIO so that when deferred 2744 * attach is attempted (possibly after a device has been 2745 * turned on) we will retry the probe. Since we don't know 2746 * which target we failed to open, we just clear the 2747 * entire cache. 2748 * 2749 * Context: Kernel thread context 2750 */ 2751 2752 static void 2753 sd_scsi_clear_probe_cache(void) 2754 { 2755 struct sd_scsi_probe_cache *cp; 2756 int i; 2757 2758 mutex_enter(&sd_scsi_probe_cache_mutex); 2759 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2760 /* 2761 * Reset all entries to SCSIPROBE_EXISTS. This will 2762 * force probing to be performed the next time 2763 * sd_scsi_probe_with_cache is called. 2764 */ 2765 for (i = 0; i < NTARGETS_WIDE; i++) { 2766 cp->cache[i] = SCSIPROBE_EXISTS; 2767 } 2768 } 2769 mutex_exit(&sd_scsi_probe_cache_mutex); 2770 } 2771 2772 2773 /* 2774 * Function: sd_scsi_probe_with_cache 2775 * 2776 * Description: This routine implements support for a scsi device probe 2777 * with cache. The driver maintains a cache of the target 2778 * responses to scsi probes. If we get no response from a 2779 * target during a probe inquiry, we remember that, and we 2780 * avoid additional calls to scsi_probe on non-zero LUNs 2781 * on the same target until the cache is cleared. By doing 2782 * so we avoid the 1/4 sec selection timeout for nonzero 2783 * LUNs. lun0 of a target is always probed. 2784 * 2785 * Arguments: devp - Pointer to a scsi_device(9S) structure 2786 * waitfunc - indicates what the allocator routines should 2787 * do when resources are not available. This value 2788 * is passed on to scsi_probe() when that routine 2789 * is called. 2790 * 2791 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2792 * otherwise the value returned by scsi_probe(9F). 2793 * 2794 * Context: Kernel thread context 2795 */ 2796 2797 static int 2798 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2799 { 2800 struct sd_scsi_probe_cache *cp; 2801 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2802 int lun, tgt; 2803 2804 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2805 SCSI_ADDR_PROP_LUN, 0); 2806 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2807 SCSI_ADDR_PROP_TARGET, -1); 2808 2809 /* Make sure caching enabled and target in range */ 2810 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2811 /* do it the old way (no cache) */ 2812 return (scsi_probe(devp, waitfn)); 2813 } 2814 2815 mutex_enter(&sd_scsi_probe_cache_mutex); 2816 2817 /* Find the cache for this scsi bus instance */ 2818 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2819 if (cp->pdip == pdip) { 2820 break; 2821 } 2822 } 2823 2824 /* If we can't find a cache for this pdip, create one */ 2825 if (cp == NULL) { 2826 int i; 2827 2828 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2829 KM_SLEEP); 2830 cp->pdip = pdip; 2831 cp->next = sd_scsi_probe_cache_head; 2832 sd_scsi_probe_cache_head = cp; 2833 for (i = 0; i < NTARGETS_WIDE; i++) { 2834 cp->cache[i] = SCSIPROBE_EXISTS; 2835 } 2836 } 2837 2838 mutex_exit(&sd_scsi_probe_cache_mutex); 2839 2840 /* Recompute the cache for this target if LUN zero */ 2841 if (lun == 0) { 2842 cp->cache[tgt] = SCSIPROBE_EXISTS; 2843 } 2844 2845 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2846 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2847 return (SCSIPROBE_NORESP); 2848 } 2849 2850 /* Do the actual probe; save & return the result */ 2851 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2852 } 2853 2854 2855 /* 2856 * Function: sd_scsi_target_lun_init 2857 * 2858 * Description: Initializes the attached lun chain mutex and head pointer. 2859 * 2860 * Context: Kernel thread context 2861 */ 2862 2863 static void 2864 sd_scsi_target_lun_init(void) 2865 { 2866 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2867 sd_scsi_target_lun_head = NULL; 2868 } 2869 2870 2871 /* 2872 * Function: sd_scsi_target_lun_fini 2873 * 2874 * Description: Frees all resources associated with the attached lun 2875 * chain 2876 * 2877 * Context: Kernel thread context 2878 */ 2879 2880 static void 2881 sd_scsi_target_lun_fini(void) 2882 { 2883 struct sd_scsi_hba_tgt_lun *cp; 2884 struct sd_scsi_hba_tgt_lun *ncp; 2885 2886 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2887 ncp = cp->next; 2888 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2889 } 2890 sd_scsi_target_lun_head = NULL; 2891 mutex_destroy(&sd_scsi_target_lun_mutex); 2892 } 2893 2894 2895 /* 2896 * Function: sd_scsi_get_target_lun_count 2897 * 2898 * Description: This routine will check in the attached lun chain to see 2899 * how many luns are attached on the required SCSI controller 2900 * and target. Currently, some capabilities like tagged queue 2901 * are supported per target based by HBA. So all luns in a 2902 * target have the same capabilities. Based on this assumption, 2903 * sd should only set these capabilities once per target. This 2904 * function is called when sd needs to decide how many luns 2905 * already attached on a target. 2906 * 2907 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2908 * controller device. 2909 * target - The target ID on the controller's SCSI bus. 2910 * 2911 * Return Code: The number of luns attached on the required target and 2912 * controller. 2913 * -1 if target ID is not in parallel SCSI scope or the given 2914 * dip is not in the chain. 2915 * 2916 * Context: Kernel thread context 2917 */ 2918 2919 static int 2920 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2921 { 2922 struct sd_scsi_hba_tgt_lun *cp; 2923 2924 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2925 return (-1); 2926 } 2927 2928 mutex_enter(&sd_scsi_target_lun_mutex); 2929 2930 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2931 if (cp->pdip == dip) { 2932 break; 2933 } 2934 } 2935 2936 mutex_exit(&sd_scsi_target_lun_mutex); 2937 2938 if (cp == NULL) { 2939 return (-1); 2940 } 2941 2942 return (cp->nlun[target]); 2943 } 2944 2945 2946 /* 2947 * Function: sd_scsi_update_lun_on_target 2948 * 2949 * Description: This routine is used to update the attached lun chain when a 2950 * lun is attached or detached on a target. 2951 * 2952 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2953 * controller device. 2954 * target - The target ID on the controller's SCSI bus. 2955 * flag - Indicate the lun is attached or detached. 2956 * 2957 * Context: Kernel thread context 2958 */ 2959 2960 static void 2961 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2962 { 2963 struct sd_scsi_hba_tgt_lun *cp; 2964 2965 mutex_enter(&sd_scsi_target_lun_mutex); 2966 2967 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2968 if (cp->pdip == dip) { 2969 break; 2970 } 2971 } 2972 2973 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2974 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2975 KM_SLEEP); 2976 cp->pdip = dip; 2977 cp->next = sd_scsi_target_lun_head; 2978 sd_scsi_target_lun_head = cp; 2979 } 2980 2981 mutex_exit(&sd_scsi_target_lun_mutex); 2982 2983 if (cp != NULL) { 2984 if (flag == SD_SCSI_LUN_ATTACH) { 2985 cp->nlun[target] ++; 2986 } else { 2987 cp->nlun[target] --; 2988 } 2989 } 2990 } 2991 2992 2993 /* 2994 * Function: sd_spin_up_unit 2995 * 2996 * Description: Issues the following commands to spin-up the device: 2997 * START STOP UNIT, and INQUIRY. 2998 * 2999 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3000 * structure for this target. 3001 * 3002 * Return Code: 0 - success 3003 * EIO - failure 3004 * EACCES - reservation conflict 3005 * 3006 * Context: Kernel thread context 3007 */ 3008 3009 static int 3010 sd_spin_up_unit(sd_ssc_t *ssc) 3011 { 3012 size_t resid = 0; 3013 int has_conflict = FALSE; 3014 uchar_t *bufaddr; 3015 int status; 3016 struct sd_lun *un; 3017 3018 ASSERT(ssc != NULL); 3019 un = ssc->ssc_un; 3020 ASSERT(un != NULL); 3021 3022 /* 3023 * Send a throwaway START UNIT command. 3024 * 3025 * If we fail on this, we don't care presently what precisely 3026 * is wrong. EMC's arrays will also fail this with a check 3027 * condition (0x2/0x4/0x3) if the device is "inactive," but 3028 * we don't want to fail the attach because it may become 3029 * "active" later. 3030 */ 3031 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3032 SD_PATH_DIRECT); 3033 3034 if (status != 0) { 3035 if (status == EACCES) 3036 has_conflict = TRUE; 3037 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3038 } 3039 3040 /* 3041 * Send another INQUIRY command to the target. This is necessary for 3042 * non-removable media direct access devices because their INQUIRY data 3043 * may not be fully qualified until they are spun up (perhaps via the 3044 * START command above). Note: This seems to be needed for some 3045 * legacy devices only.) The INQUIRY command should succeed even if a 3046 * Reservation Conflict is present. 3047 */ 3048 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3049 3050 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3051 != 0) { 3052 kmem_free(bufaddr, SUN_INQSIZE); 3053 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3054 return (EIO); 3055 } 3056 3057 /* 3058 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3059 * Note that this routine does not return a failure here even if the 3060 * INQUIRY command did not return any data. This is a legacy behavior. 3061 */ 3062 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3063 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3064 } 3065 3066 kmem_free(bufaddr, SUN_INQSIZE); 3067 3068 /* If we hit a reservation conflict above, tell the caller. */ 3069 if (has_conflict == TRUE) { 3070 return (EACCES); 3071 } 3072 3073 return (0); 3074 } 3075 3076 #ifdef _LP64 3077 /* 3078 * Function: sd_enable_descr_sense 3079 * 3080 * Description: This routine attempts to select descriptor sense format 3081 * using the Control mode page. Devices that support 64 bit 3082 * LBAs (for >2TB luns) should also implement descriptor 3083 * sense data so we will call this function whenever we see 3084 * a lun larger than 2TB. If for some reason the device 3085 * supports 64 bit LBAs but doesn't support descriptor sense 3086 * presumably the mode select will fail. Everything will 3087 * continue to work normally except that we will not get 3088 * complete sense data for commands that fail with an LBA 3089 * larger than 32 bits. 3090 * 3091 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3092 * structure for this target. 3093 * 3094 * Context: Kernel thread context only 3095 */ 3096 3097 static void 3098 sd_enable_descr_sense(sd_ssc_t *ssc) 3099 { 3100 uchar_t *header; 3101 struct mode_control_scsi3 *ctrl_bufp; 3102 size_t buflen; 3103 size_t bd_len; 3104 int status; 3105 struct sd_lun *un; 3106 3107 ASSERT(ssc != NULL); 3108 un = ssc->ssc_un; 3109 ASSERT(un != NULL); 3110 3111 /* 3112 * Read MODE SENSE page 0xA, Control Mode Page 3113 */ 3114 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3115 sizeof (struct mode_control_scsi3); 3116 header = kmem_zalloc(buflen, KM_SLEEP); 3117 3118 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3119 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3120 3121 if (status != 0) { 3122 SD_ERROR(SD_LOG_COMMON, un, 3123 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3124 goto eds_exit; 3125 } 3126 3127 /* 3128 * Determine size of Block Descriptors in order to locate 3129 * the mode page data. ATAPI devices return 0, SCSI devices 3130 * should return MODE_BLK_DESC_LENGTH. 3131 */ 3132 bd_len = ((struct mode_header *)header)->bdesc_length; 3133 3134 /* Clear the mode data length field for MODE SELECT */ 3135 ((struct mode_header *)header)->length = 0; 3136 3137 ctrl_bufp = (struct mode_control_scsi3 *) 3138 (header + MODE_HEADER_LENGTH + bd_len); 3139 3140 /* 3141 * If the page length is smaller than the expected value, 3142 * the target device doesn't support D_SENSE. Bail out here. 3143 */ 3144 if (ctrl_bufp->mode_page.length < 3145 sizeof (struct mode_control_scsi3) - 2) { 3146 SD_ERROR(SD_LOG_COMMON, un, 3147 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3148 goto eds_exit; 3149 } 3150 3151 /* 3152 * Clear PS bit for MODE SELECT 3153 */ 3154 ctrl_bufp->mode_page.ps = 0; 3155 3156 /* 3157 * Set D_SENSE to enable descriptor sense format. 3158 */ 3159 ctrl_bufp->d_sense = 1; 3160 3161 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3162 3163 /* 3164 * Use MODE SELECT to commit the change to the D_SENSE bit 3165 */ 3166 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3167 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3168 3169 if (status != 0) { 3170 SD_INFO(SD_LOG_COMMON, un, 3171 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3172 } else { 3173 kmem_free(header, buflen); 3174 return; 3175 } 3176 3177 eds_exit: 3178 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3179 kmem_free(header, buflen); 3180 } 3181 3182 /* 3183 * Function: sd_reenable_dsense_task 3184 * 3185 * Description: Re-enable descriptor sense after device or bus reset 3186 * 3187 * Context: Executes in a taskq() thread context 3188 */ 3189 static void 3190 sd_reenable_dsense_task(void *arg) 3191 { 3192 struct sd_lun *un = arg; 3193 sd_ssc_t *ssc; 3194 3195 ASSERT(un != NULL); 3196 3197 ssc = sd_ssc_init(un); 3198 sd_enable_descr_sense(ssc); 3199 sd_ssc_fini(ssc); 3200 } 3201 #endif /* _LP64 */ 3202 3203 /* 3204 * Function: sd_set_mmc_caps 3205 * 3206 * Description: This routine determines if the device is MMC compliant and if 3207 * the device supports CDDA via a mode sense of the CDVD 3208 * capabilities mode page. Also checks if the device is a 3209 * dvdram writable device. 3210 * 3211 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3212 * structure for this target. 3213 * 3214 * Context: Kernel thread context only 3215 */ 3216 3217 static void 3218 sd_set_mmc_caps(sd_ssc_t *ssc) 3219 { 3220 struct mode_header_grp2 *sense_mhp; 3221 uchar_t *sense_page; 3222 caddr_t buf; 3223 int bd_len; 3224 int status; 3225 struct uscsi_cmd com; 3226 int rtn; 3227 uchar_t *out_data_rw, *out_data_hd; 3228 uchar_t *rqbuf_rw, *rqbuf_hd; 3229 struct sd_lun *un; 3230 3231 ASSERT(ssc != NULL); 3232 un = ssc->ssc_un; 3233 ASSERT(un != NULL); 3234 3235 /* 3236 * The flags which will be set in this function are - mmc compliant, 3237 * dvdram writable device, cdda support. Initialize them to FALSE 3238 * and if a capability is detected - it will be set to TRUE. 3239 */ 3240 un->un_f_mmc_cap = FALSE; 3241 un->un_f_dvdram_writable_device = FALSE; 3242 un->un_f_cfg_cdda = FALSE; 3243 3244 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3245 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3246 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3247 3248 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3249 3250 if (status != 0) { 3251 /* command failed; just return */ 3252 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3253 return; 3254 } 3255 /* 3256 * If the mode sense request for the CDROM CAPABILITIES 3257 * page (0x2A) succeeds the device is assumed to be MMC. 3258 */ 3259 un->un_f_mmc_cap = TRUE; 3260 3261 /* Get to the page data */ 3262 sense_mhp = (struct mode_header_grp2 *)buf; 3263 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3264 sense_mhp->bdesc_length_lo; 3265 if (bd_len > MODE_BLK_DESC_LENGTH) { 3266 /* 3267 * We did not get back the expected block descriptor 3268 * length so we cannot determine if the device supports 3269 * CDDA. However, we still indicate the device is MMC 3270 * according to the successful response to the page 3271 * 0x2A mode sense request. 3272 */ 3273 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3274 "sd_set_mmc_caps: Mode Sense returned " 3275 "invalid block descriptor length\n"); 3276 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3277 return; 3278 } 3279 3280 /* See if read CDDA is supported */ 3281 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3282 bd_len); 3283 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3284 3285 /* See if writing DVD RAM is supported. */ 3286 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3287 if (un->un_f_dvdram_writable_device == TRUE) { 3288 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3289 return; 3290 } 3291 3292 /* 3293 * If the device presents DVD or CD capabilities in the mode 3294 * page, we can return here since a RRD will not have 3295 * these capabilities. 3296 */ 3297 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3298 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3299 return; 3300 } 3301 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3302 3303 /* 3304 * If un->un_f_dvdram_writable_device is still FALSE, 3305 * check for a Removable Rigid Disk (RRD). A RRD 3306 * device is identified by the features RANDOM_WRITABLE and 3307 * HARDWARE_DEFECT_MANAGEMENT. 3308 */ 3309 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3310 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3311 3312 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3313 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3314 RANDOM_WRITABLE, SD_PATH_STANDARD); 3315 3316 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3317 3318 if (rtn != 0) { 3319 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3320 kmem_free(rqbuf_rw, SENSE_LENGTH); 3321 return; 3322 } 3323 3324 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3325 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3326 3327 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3328 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3329 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3330 3331 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3332 3333 if (rtn == 0) { 3334 /* 3335 * We have good information, check for random writable 3336 * and hardware defect features. 3337 */ 3338 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3339 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3340 un->un_f_dvdram_writable_device = TRUE; 3341 } 3342 } 3343 3344 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3345 kmem_free(rqbuf_rw, SENSE_LENGTH); 3346 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3347 kmem_free(rqbuf_hd, SENSE_LENGTH); 3348 } 3349 3350 /* 3351 * Function: sd_check_for_writable_cd 3352 * 3353 * Description: This routine determines if the media in the device is 3354 * writable or not. It uses the get configuration command (0x46) 3355 * to determine if the media is writable 3356 * 3357 * Arguments: un - driver soft state (unit) structure 3358 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3359 * chain and the normal command waitq, or 3360 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3361 * "direct" chain and bypass the normal command 3362 * waitq. 3363 * 3364 * Context: Never called at interrupt context. 3365 */ 3366 3367 static void 3368 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3369 { 3370 struct uscsi_cmd com; 3371 uchar_t *out_data; 3372 uchar_t *rqbuf; 3373 int rtn; 3374 uchar_t *out_data_rw, *out_data_hd; 3375 uchar_t *rqbuf_rw, *rqbuf_hd; 3376 struct mode_header_grp2 *sense_mhp; 3377 uchar_t *sense_page; 3378 caddr_t buf; 3379 int bd_len; 3380 int status; 3381 struct sd_lun *un; 3382 3383 ASSERT(ssc != NULL); 3384 un = ssc->ssc_un; 3385 ASSERT(un != NULL); 3386 ASSERT(mutex_owned(SD_MUTEX(un))); 3387 3388 /* 3389 * Initialize the writable media to false, if configuration info. 3390 * tells us otherwise then only we will set it. 3391 */ 3392 un->un_f_mmc_writable_media = FALSE; 3393 mutex_exit(SD_MUTEX(un)); 3394 3395 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3396 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3397 3398 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3399 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3400 3401 if (rtn != 0) 3402 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3403 3404 mutex_enter(SD_MUTEX(un)); 3405 if (rtn == 0) { 3406 /* 3407 * We have good information, check for writable DVD. 3408 */ 3409 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3410 un->un_f_mmc_writable_media = TRUE; 3411 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3412 kmem_free(rqbuf, SENSE_LENGTH); 3413 return; 3414 } 3415 } 3416 3417 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3418 kmem_free(rqbuf, SENSE_LENGTH); 3419 3420 /* 3421 * Determine if this is a RRD type device. 3422 */ 3423 mutex_exit(SD_MUTEX(un)); 3424 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3425 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3426 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3427 3428 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3429 3430 mutex_enter(SD_MUTEX(un)); 3431 if (status != 0) { 3432 /* command failed; just return */ 3433 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3434 return; 3435 } 3436 3437 /* Get to the page data */ 3438 sense_mhp = (struct mode_header_grp2 *)buf; 3439 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3440 if (bd_len > MODE_BLK_DESC_LENGTH) { 3441 /* 3442 * We did not get back the expected block descriptor length so 3443 * we cannot check the mode page. 3444 */ 3445 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3446 "sd_check_for_writable_cd: Mode Sense returned " 3447 "invalid block descriptor length\n"); 3448 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3449 return; 3450 } 3451 3452 /* 3453 * If the device presents DVD or CD capabilities in the mode 3454 * page, we can return here since a RRD device will not have 3455 * these capabilities. 3456 */ 3457 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3458 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3459 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3460 return; 3461 } 3462 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3463 3464 /* 3465 * If un->un_f_mmc_writable_media is still FALSE, 3466 * check for RRD type media. A RRD device is identified 3467 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3468 */ 3469 mutex_exit(SD_MUTEX(un)); 3470 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3471 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3472 3473 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3474 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3475 RANDOM_WRITABLE, path_flag); 3476 3477 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3478 if (rtn != 0) { 3479 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3480 kmem_free(rqbuf_rw, SENSE_LENGTH); 3481 mutex_enter(SD_MUTEX(un)); 3482 return; 3483 } 3484 3485 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3486 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3487 3488 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3489 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3490 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3491 3492 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3493 mutex_enter(SD_MUTEX(un)); 3494 if (rtn == 0) { 3495 /* 3496 * We have good information, check for random writable 3497 * and hardware defect features as current. 3498 */ 3499 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3500 (out_data_rw[10] & 0x1) && 3501 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3502 (out_data_hd[10] & 0x1)) { 3503 un->un_f_mmc_writable_media = TRUE; 3504 } 3505 } 3506 3507 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3508 kmem_free(rqbuf_rw, SENSE_LENGTH); 3509 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3510 kmem_free(rqbuf_hd, SENSE_LENGTH); 3511 } 3512 3513 /* 3514 * Function: sd_read_unit_properties 3515 * 3516 * Description: The following implements a property lookup mechanism. 3517 * Properties for particular disks (keyed on vendor, model 3518 * and rev numbers) are sought in the sd.conf file via 3519 * sd_process_sdconf_file(), and if not found there, are 3520 * looked for in a list hardcoded in this driver via 3521 * sd_process_sdconf_table() Once located the properties 3522 * are used to update the driver unit structure. 3523 * 3524 * Arguments: un - driver soft state (unit) structure 3525 */ 3526 3527 static void 3528 sd_read_unit_properties(struct sd_lun *un) 3529 { 3530 /* 3531 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3532 * the "sd-config-list" property (from the sd.conf file) or if 3533 * there was not a match for the inquiry vid/pid. If this event 3534 * occurs the static driver configuration table is searched for 3535 * a match. 3536 */ 3537 ASSERT(un != NULL); 3538 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3539 sd_process_sdconf_table(un); 3540 } 3541 3542 /* check for LSI device */ 3543 sd_is_lsi(un); 3544 3545 3546 } 3547 3548 3549 /* 3550 * Function: sd_process_sdconf_file 3551 * 3552 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3553 * driver's config file (ie, sd.conf) and update the driver 3554 * soft state structure accordingly. 3555 * 3556 * Arguments: un - driver soft state (unit) structure 3557 * 3558 * Return Code: SD_SUCCESS - The properties were successfully set according 3559 * to the driver configuration file. 3560 * SD_FAILURE - The driver config list was not obtained or 3561 * there was no vid/pid match. This indicates that 3562 * the static config table should be used. 3563 * 3564 * The config file has a property, "sd-config-list". Currently we support 3565 * two kinds of formats. For both formats, the value of this property 3566 * is a list of duplets: 3567 * 3568 * sd-config-list= 3569 * <duplet>, 3570 * [,<duplet>]*; 3571 * 3572 * For the improved format, where 3573 * 3574 * <duplet>:= "<vid+pid>","<tunable-list>" 3575 * 3576 * and 3577 * 3578 * <tunable-list>:= <tunable> [, <tunable> ]*; 3579 * <tunable> = <name> : <value> 3580 * 3581 * The <vid+pid> is the string that is returned by the target device on a 3582 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3583 * to apply to all target devices with the specified <vid+pid>. 3584 * 3585 * Each <tunable> is a "<name> : <value>" pair. 3586 * 3587 * For the old format, the structure of each duplet is as follows: 3588 * 3589 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3590 * 3591 * The first entry of the duplet is the device ID string (the concatenated 3592 * vid & pid; not to be confused with a device_id). This is defined in 3593 * the same way as in the sd_disk_table. 3594 * 3595 * The second part of the duplet is a string that identifies a 3596 * data-property-name-list. The data-property-name-list is defined as 3597 * follows: 3598 * 3599 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3600 * 3601 * The syntax of <data-property-name> depends on the <version> field. 3602 * 3603 * If version = SD_CONF_VERSION_1 we have the following syntax: 3604 * 3605 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3606 * 3607 * where the prop0 value will be used to set prop0 if bit0 set in the 3608 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3609 * 3610 */ 3611 3612 static int 3613 sd_process_sdconf_file(struct sd_lun *un) 3614 { 3615 char **config_list = NULL; 3616 uint_t nelements; 3617 char *vidptr; 3618 int vidlen; 3619 char *dnlist_ptr; 3620 char *dataname_ptr; 3621 char *dataname_lasts; 3622 int *data_list = NULL; 3623 uint_t data_list_len; 3624 int rval = SD_FAILURE; 3625 int i; 3626 3627 ASSERT(un != NULL); 3628 3629 /* Obtain the configuration list associated with the .conf file */ 3630 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3631 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3632 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3633 return (SD_FAILURE); 3634 } 3635 3636 /* 3637 * Compare vids in each duplet to the inquiry vid - if a match is 3638 * made, get the data value and update the soft state structure 3639 * accordingly. 3640 * 3641 * Each duplet should show as a pair of strings, return SD_FAILURE 3642 * otherwise. 3643 */ 3644 if (nelements & 1) { 3645 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3646 "sd-config-list should show as pairs of strings.\n"); 3647 if (config_list) 3648 ddi_prop_free(config_list); 3649 return (SD_FAILURE); 3650 } 3651 3652 for (i = 0; i < nelements; i += 2) { 3653 /* 3654 * Note: The assumption here is that each vid entry is on 3655 * a unique line from its associated duplet. 3656 */ 3657 vidptr = config_list[i]; 3658 vidlen = (int)strlen(vidptr); 3659 if ((vidlen == 0) || 3660 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3661 continue; 3662 } 3663 3664 /* 3665 * dnlist contains 1 or more blank separated 3666 * data-property-name entries 3667 */ 3668 dnlist_ptr = config_list[i + 1]; 3669 3670 if (strchr(dnlist_ptr, ':') != NULL) { 3671 /* 3672 * Decode the improved format sd-config-list. 3673 */ 3674 sd_nvpair_str_decode(un, dnlist_ptr); 3675 } else { 3676 /* 3677 * The old format sd-config-list, loop through all 3678 * data-property-name entries in the 3679 * data-property-name-list 3680 * setting the properties for each. 3681 */ 3682 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3683 &dataname_lasts); dataname_ptr != NULL; 3684 dataname_ptr = sd_strtok_r(NULL, " \t", 3685 &dataname_lasts)) { 3686 int version; 3687 3688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3689 "sd_process_sdconf_file: disk:%s, " 3690 "data:%s\n", vidptr, dataname_ptr); 3691 3692 /* Get the data list */ 3693 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3694 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3695 &data_list_len) != DDI_PROP_SUCCESS) { 3696 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3697 "sd_process_sdconf_file: data " 3698 "property (%s) has no value\n", 3699 dataname_ptr); 3700 continue; 3701 } 3702 3703 version = data_list[0]; 3704 3705 if (version == SD_CONF_VERSION_1) { 3706 sd_tunables values; 3707 3708 /* Set the properties */ 3709 if (sd_chk_vers1_data(un, data_list[1], 3710 &data_list[2], data_list_len, 3711 dataname_ptr) == SD_SUCCESS) { 3712 sd_get_tunables_from_conf(un, 3713 data_list[1], &data_list[2], 3714 &values); 3715 sd_set_vers1_properties(un, 3716 data_list[1], &values); 3717 rval = SD_SUCCESS; 3718 } else { 3719 rval = SD_FAILURE; 3720 } 3721 } else { 3722 scsi_log(SD_DEVINFO(un), sd_label, 3723 CE_WARN, "data property %s version " 3724 "0x%x is invalid.", 3725 dataname_ptr, version); 3726 rval = SD_FAILURE; 3727 } 3728 if (data_list) 3729 ddi_prop_free(data_list); 3730 } 3731 } 3732 } 3733 3734 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3735 if (config_list) { 3736 ddi_prop_free(config_list); 3737 } 3738 3739 return (rval); 3740 } 3741 3742 /* 3743 * Function: sd_nvpair_str_decode() 3744 * 3745 * Description: Parse the improved format sd-config-list to get 3746 * each entry of tunable, which includes a name-value pair. 3747 * Then call sd_set_properties() to set the property. 3748 * 3749 * Arguments: un - driver soft state (unit) structure 3750 * nvpair_str - the tunable list 3751 */ 3752 static void 3753 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3754 { 3755 char *nv, *name, *value, *token; 3756 char *nv_lasts, *v_lasts, *x_lasts; 3757 3758 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3759 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3760 token = sd_strtok_r(nv, ":", &v_lasts); 3761 name = sd_strtok_r(token, " \t", &x_lasts); 3762 token = sd_strtok_r(NULL, ":", &v_lasts); 3763 value = sd_strtok_r(token, " \t", &x_lasts); 3764 if (name == NULL || value == NULL) { 3765 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3766 "sd_nvpair_str_decode: " 3767 "name or value is not valid!\n"); 3768 } else { 3769 sd_set_properties(un, name, value); 3770 } 3771 } 3772 } 3773 3774 /* 3775 * Function: sd_strtok_r() 3776 * 3777 * Description: This function uses strpbrk and strspn to break 3778 * string into tokens on sequentially subsequent calls. Return 3779 * NULL when no non-separator characters remain. The first 3780 * argument is NULL for subsequent calls. 3781 */ 3782 static char * 3783 sd_strtok_r(char *string, const char *sepset, char **lasts) 3784 { 3785 char *q, *r; 3786 3787 /* First or subsequent call */ 3788 if (string == NULL) 3789 string = *lasts; 3790 3791 if (string == NULL) 3792 return (NULL); 3793 3794 /* Skip leading separators */ 3795 q = string + strspn(string, sepset); 3796 3797 if (*q == '\0') 3798 return (NULL); 3799 3800 if ((r = strpbrk(q, sepset)) == NULL) 3801 *lasts = NULL; 3802 else { 3803 *r = '\0'; 3804 *lasts = r + 1; 3805 } 3806 return (q); 3807 } 3808 3809 /* 3810 * Function: sd_set_properties() 3811 * 3812 * Description: Set device properties based on the improved 3813 * format sd-config-list. 3814 * 3815 * Arguments: un - driver soft state (unit) structure 3816 * name - supported tunable name 3817 * value - tunable value 3818 */ 3819 static void 3820 sd_set_properties(struct sd_lun *un, char *name, char *value) 3821 { 3822 char *endptr = NULL; 3823 long val = 0; 3824 3825 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3826 if (strcasecmp(value, "true") == 0) { 3827 un->un_f_suppress_cache_flush = TRUE; 3828 } else if (strcasecmp(value, "false") == 0) { 3829 un->un_f_suppress_cache_flush = FALSE; 3830 } else { 3831 goto value_invalid; 3832 } 3833 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3834 "suppress_cache_flush flag set to %d\n", 3835 un->un_f_suppress_cache_flush); 3836 return; 3837 } 3838 3839 if (strcasecmp(name, "controller-type") == 0) { 3840 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3841 un->un_ctype = val; 3842 } else { 3843 goto value_invalid; 3844 } 3845 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3846 "ctype set to %d\n", un->un_ctype); 3847 return; 3848 } 3849 3850 if (strcasecmp(name, "delay-busy") == 0) { 3851 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3852 un->un_busy_timeout = drv_usectohz(val / 1000); 3853 } else { 3854 goto value_invalid; 3855 } 3856 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3857 "busy_timeout set to %d\n", un->un_busy_timeout); 3858 return; 3859 } 3860 3861 if (strcasecmp(name, "disksort") == 0) { 3862 if (strcasecmp(value, "true") == 0) { 3863 un->un_f_disksort_disabled = FALSE; 3864 } else if (strcasecmp(value, "false") == 0) { 3865 un->un_f_disksort_disabled = TRUE; 3866 } else { 3867 goto value_invalid; 3868 } 3869 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3870 "disksort disabled flag set to %d\n", 3871 un->un_f_disksort_disabled); 3872 return; 3873 } 3874 3875 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3876 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3877 un->un_reserve_release_time = val; 3878 } else { 3879 goto value_invalid; 3880 } 3881 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3882 "reservation release timeout set to %d\n", 3883 un->un_reserve_release_time); 3884 return; 3885 } 3886 3887 if (strcasecmp(name, "reset-lun") == 0) { 3888 if (strcasecmp(value, "true") == 0) { 3889 un->un_f_lun_reset_enabled = TRUE; 3890 } else if (strcasecmp(value, "false") == 0) { 3891 un->un_f_lun_reset_enabled = FALSE; 3892 } else { 3893 goto value_invalid; 3894 } 3895 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3896 "lun reset enabled flag set to %d\n", 3897 un->un_f_lun_reset_enabled); 3898 return; 3899 } 3900 3901 if (strcasecmp(name, "retries-busy") == 0) { 3902 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3903 un->un_busy_retry_count = val; 3904 } else { 3905 goto value_invalid; 3906 } 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3908 "busy retry count set to %d\n", un->un_busy_retry_count); 3909 return; 3910 } 3911 3912 if (strcasecmp(name, "retries-timeout") == 0) { 3913 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3914 un->un_retry_count = val; 3915 } else { 3916 goto value_invalid; 3917 } 3918 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3919 "timeout retry count set to %d\n", un->un_retry_count); 3920 return; 3921 } 3922 3923 if (strcasecmp(name, "retries-notready") == 0) { 3924 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3925 un->un_notready_retry_count = val; 3926 } else { 3927 goto value_invalid; 3928 } 3929 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3930 "notready retry count set to %d\n", 3931 un->un_notready_retry_count); 3932 return; 3933 } 3934 3935 if (strcasecmp(name, "retries-reset") == 0) { 3936 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3937 un->un_reset_retry_count = val; 3938 } else { 3939 goto value_invalid; 3940 } 3941 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3942 "reset retry count set to %d\n", 3943 un->un_reset_retry_count); 3944 return; 3945 } 3946 3947 if (strcasecmp(name, "throttle-max") == 0) { 3948 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3949 un->un_saved_throttle = un->un_throttle = val; 3950 } else { 3951 goto value_invalid; 3952 } 3953 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3954 "throttle set to %d\n", un->un_throttle); 3955 } 3956 3957 if (strcasecmp(name, "throttle-min") == 0) { 3958 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3959 un->un_min_throttle = val; 3960 } else { 3961 goto value_invalid; 3962 } 3963 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3964 "min throttle set to %d\n", un->un_min_throttle); 3965 } 3966 3967 /* 3968 * Validate the throttle values. 3969 * If any of the numbers are invalid, set everything to defaults. 3970 */ 3971 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3972 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3973 (un->un_min_throttle > un->un_throttle)) { 3974 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3975 un->un_min_throttle = sd_min_throttle; 3976 } 3977 return; 3978 3979 value_invalid: 3980 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3981 "value of prop %s is invalid\n", name); 3982 } 3983 3984 /* 3985 * Function: sd_get_tunables_from_conf() 3986 * 3987 * 3988 * This function reads the data list from the sd.conf file and pulls 3989 * the values that can have numeric values as arguments and places 3990 * the values in the appropriate sd_tunables member. 3991 * Since the order of the data list members varies across platforms 3992 * This function reads them from the data list in a platform specific 3993 * order and places them into the correct sd_tunable member that is 3994 * consistent across all platforms. 3995 */ 3996 static void 3997 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3998 sd_tunables *values) 3999 { 4000 int i; 4001 int mask; 4002 4003 bzero(values, sizeof (sd_tunables)); 4004 4005 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4006 4007 mask = 1 << i; 4008 if (mask > flags) { 4009 break; 4010 } 4011 4012 switch (mask & flags) { 4013 case 0: /* This mask bit not set in flags */ 4014 continue; 4015 case SD_CONF_BSET_THROTTLE: 4016 values->sdt_throttle = data_list[i]; 4017 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4018 "sd_get_tunables_from_conf: throttle = %d\n", 4019 values->sdt_throttle); 4020 break; 4021 case SD_CONF_BSET_CTYPE: 4022 values->sdt_ctype = data_list[i]; 4023 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4024 "sd_get_tunables_from_conf: ctype = %d\n", 4025 values->sdt_ctype); 4026 break; 4027 case SD_CONF_BSET_NRR_COUNT: 4028 values->sdt_not_rdy_retries = data_list[i]; 4029 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4030 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4031 values->sdt_not_rdy_retries); 4032 break; 4033 case SD_CONF_BSET_BSY_RETRY_COUNT: 4034 values->sdt_busy_retries = data_list[i]; 4035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4036 "sd_get_tunables_from_conf: busy_retries = %d\n", 4037 values->sdt_busy_retries); 4038 break; 4039 case SD_CONF_BSET_RST_RETRIES: 4040 values->sdt_reset_retries = data_list[i]; 4041 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4042 "sd_get_tunables_from_conf: reset_retries = %d\n", 4043 values->sdt_reset_retries); 4044 break; 4045 case SD_CONF_BSET_RSV_REL_TIME: 4046 values->sdt_reserv_rel_time = data_list[i]; 4047 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4048 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4049 values->sdt_reserv_rel_time); 4050 break; 4051 case SD_CONF_BSET_MIN_THROTTLE: 4052 values->sdt_min_throttle = data_list[i]; 4053 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4054 "sd_get_tunables_from_conf: min_throttle = %d\n", 4055 values->sdt_min_throttle); 4056 break; 4057 case SD_CONF_BSET_DISKSORT_DISABLED: 4058 values->sdt_disk_sort_dis = data_list[i]; 4059 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4060 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4061 values->sdt_disk_sort_dis); 4062 break; 4063 case SD_CONF_BSET_LUN_RESET_ENABLED: 4064 values->sdt_lun_reset_enable = data_list[i]; 4065 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4066 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4067 "\n", values->sdt_lun_reset_enable); 4068 break; 4069 case SD_CONF_BSET_CACHE_IS_NV: 4070 values->sdt_suppress_cache_flush = data_list[i]; 4071 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4072 "sd_get_tunables_from_conf: \ 4073 suppress_cache_flush = %d" 4074 "\n", values->sdt_suppress_cache_flush); 4075 break; 4076 } 4077 } 4078 } 4079 4080 /* 4081 * Function: sd_process_sdconf_table 4082 * 4083 * Description: Search the static configuration table for a match on the 4084 * inquiry vid/pid and update the driver soft state structure 4085 * according to the table property values for the device. 4086 * 4087 * The form of a configuration table entry is: 4088 * <vid+pid>,<flags>,<property-data> 4089 * "SEAGATE ST42400N",1,0x40000, 4090 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4091 * 4092 * Arguments: un - driver soft state (unit) structure 4093 */ 4094 4095 static void 4096 sd_process_sdconf_table(struct sd_lun *un) 4097 { 4098 char *id = NULL; 4099 int table_index; 4100 int idlen; 4101 4102 ASSERT(un != NULL); 4103 for (table_index = 0; table_index < sd_disk_table_size; 4104 table_index++) { 4105 id = sd_disk_table[table_index].device_id; 4106 idlen = strlen(id); 4107 if (idlen == 0) { 4108 continue; 4109 } 4110 4111 /* 4112 * The static configuration table currently does not 4113 * implement version 10 properties. Additionally, 4114 * multiple data-property-name entries are not 4115 * implemented in the static configuration table. 4116 */ 4117 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4118 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4119 "sd_process_sdconf_table: disk %s\n", id); 4120 sd_set_vers1_properties(un, 4121 sd_disk_table[table_index].flags, 4122 sd_disk_table[table_index].properties); 4123 break; 4124 } 4125 } 4126 } 4127 4128 4129 /* 4130 * Function: sd_sdconf_id_match 4131 * 4132 * Description: This local function implements a case sensitive vid/pid 4133 * comparison as well as the boundary cases of wild card and 4134 * multiple blanks. 4135 * 4136 * Note: An implicit assumption made here is that the scsi 4137 * inquiry structure will always keep the vid, pid and 4138 * revision strings in consecutive sequence, so they can be 4139 * read as a single string. If this assumption is not the 4140 * case, a separate string, to be used for the check, needs 4141 * to be built with these strings concatenated. 4142 * 4143 * Arguments: un - driver soft state (unit) structure 4144 * id - table or config file vid/pid 4145 * idlen - length of the vid/pid (bytes) 4146 * 4147 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4148 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4149 */ 4150 4151 static int 4152 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4153 { 4154 struct scsi_inquiry *sd_inq; 4155 int rval = SD_SUCCESS; 4156 4157 ASSERT(un != NULL); 4158 sd_inq = un->un_sd->sd_inq; 4159 ASSERT(id != NULL); 4160 4161 /* 4162 * We use the inq_vid as a pointer to a buffer containing the 4163 * vid and pid and use the entire vid/pid length of the table 4164 * entry for the comparison. This works because the inq_pid 4165 * data member follows inq_vid in the scsi_inquiry structure. 4166 */ 4167 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4168 /* 4169 * The user id string is compared to the inquiry vid/pid 4170 * using a case insensitive comparison and ignoring 4171 * multiple spaces. 4172 */ 4173 rval = sd_blank_cmp(un, id, idlen); 4174 if (rval != SD_SUCCESS) { 4175 /* 4176 * User id strings that start and end with a "*" 4177 * are a special case. These do not have a 4178 * specific vendor, and the product string can 4179 * appear anywhere in the 16 byte PID portion of 4180 * the inquiry data. This is a simple strstr() 4181 * type search for the user id in the inquiry data. 4182 */ 4183 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4184 char *pidptr = &id[1]; 4185 int i; 4186 int j; 4187 int pidstrlen = idlen - 2; 4188 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4189 pidstrlen; 4190 4191 if (j < 0) { 4192 return (SD_FAILURE); 4193 } 4194 for (i = 0; i < j; i++) { 4195 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4196 pidptr, pidstrlen) == 0) { 4197 rval = SD_SUCCESS; 4198 break; 4199 } 4200 } 4201 } 4202 } 4203 } 4204 return (rval); 4205 } 4206 4207 4208 /* 4209 * Function: sd_blank_cmp 4210 * 4211 * Description: If the id string starts and ends with a space, treat 4212 * multiple consecutive spaces as equivalent to a single 4213 * space. For example, this causes a sd_disk_table entry 4214 * of " NEC CDROM " to match a device's id string of 4215 * "NEC CDROM". 4216 * 4217 * Note: The success exit condition for this routine is if 4218 * the pointer to the table entry is '\0' and the cnt of 4219 * the inquiry length is zero. This will happen if the inquiry 4220 * string returned by the device is padded with spaces to be 4221 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4222 * SCSI spec states that the inquiry string is to be padded with 4223 * spaces. 4224 * 4225 * Arguments: un - driver soft state (unit) structure 4226 * id - table or config file vid/pid 4227 * idlen - length of the vid/pid (bytes) 4228 * 4229 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4230 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4231 */ 4232 4233 static int 4234 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4235 { 4236 char *p1; 4237 char *p2; 4238 int cnt; 4239 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4240 sizeof (SD_INQUIRY(un)->inq_pid); 4241 4242 ASSERT(un != NULL); 4243 p2 = un->un_sd->sd_inq->inq_vid; 4244 ASSERT(id != NULL); 4245 p1 = id; 4246 4247 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4248 /* 4249 * Note: string p1 is terminated by a NUL but string p2 4250 * isn't. The end of p2 is determined by cnt. 4251 */ 4252 for (;;) { 4253 /* skip over any extra blanks in both strings */ 4254 while ((*p1 != '\0') && (*p1 == ' ')) { 4255 p1++; 4256 } 4257 while ((cnt != 0) && (*p2 == ' ')) { 4258 p2++; 4259 cnt--; 4260 } 4261 4262 /* compare the two strings */ 4263 if ((cnt == 0) || 4264 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4265 break; 4266 } 4267 while ((cnt > 0) && 4268 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4269 p1++; 4270 p2++; 4271 cnt--; 4272 } 4273 } 4274 } 4275 4276 /* return SD_SUCCESS if both strings match */ 4277 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4278 } 4279 4280 4281 /* 4282 * Function: sd_chk_vers1_data 4283 * 4284 * Description: Verify the version 1 device properties provided by the 4285 * user via the configuration file 4286 * 4287 * Arguments: un - driver soft state (unit) structure 4288 * flags - integer mask indicating properties to be set 4289 * prop_list - integer list of property values 4290 * list_len - number of the elements 4291 * 4292 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4293 * SD_FAILURE - Indicates the user provided data is invalid 4294 */ 4295 4296 static int 4297 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4298 int list_len, char *dataname_ptr) 4299 { 4300 int i; 4301 int mask = 1; 4302 int index = 0; 4303 4304 ASSERT(un != NULL); 4305 4306 /* Check for a NULL property name and list */ 4307 if (dataname_ptr == NULL) { 4308 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4309 "sd_chk_vers1_data: NULL data property name."); 4310 return (SD_FAILURE); 4311 } 4312 if (prop_list == NULL) { 4313 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4314 "sd_chk_vers1_data: %s NULL data property list.", 4315 dataname_ptr); 4316 return (SD_FAILURE); 4317 } 4318 4319 /* Display a warning if undefined bits are set in the flags */ 4320 if (flags & ~SD_CONF_BIT_MASK) { 4321 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4322 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4323 "Properties not set.", 4324 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4325 return (SD_FAILURE); 4326 } 4327 4328 /* 4329 * Verify the length of the list by identifying the highest bit set 4330 * in the flags and validating that the property list has a length 4331 * up to the index of this bit. 4332 */ 4333 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4334 if (flags & mask) { 4335 index++; 4336 } 4337 mask = 1 << i; 4338 } 4339 if (list_len < (index + 2)) { 4340 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4341 "sd_chk_vers1_data: " 4342 "Data property list %s size is incorrect. " 4343 "Properties not set.", dataname_ptr); 4344 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4345 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4346 return (SD_FAILURE); 4347 } 4348 return (SD_SUCCESS); 4349 } 4350 4351 4352 /* 4353 * Function: sd_set_vers1_properties 4354 * 4355 * Description: Set version 1 device properties based on a property list 4356 * retrieved from the driver configuration file or static 4357 * configuration table. Version 1 properties have the format: 4358 * 4359 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4360 * 4361 * where the prop0 value will be used to set prop0 if bit0 4362 * is set in the flags 4363 * 4364 * Arguments: un - driver soft state (unit) structure 4365 * flags - integer mask indicating properties to be set 4366 * prop_list - integer list of property values 4367 */ 4368 4369 static void 4370 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4371 { 4372 ASSERT(un != NULL); 4373 4374 /* 4375 * Set the flag to indicate cache is to be disabled. An attempt 4376 * to disable the cache via sd_cache_control() will be made 4377 * later during attach once the basic initialization is complete. 4378 */ 4379 if (flags & SD_CONF_BSET_NOCACHE) { 4380 un->un_f_opt_disable_cache = TRUE; 4381 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4382 "sd_set_vers1_properties: caching disabled flag set\n"); 4383 } 4384 4385 /* CD-specific configuration parameters */ 4386 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4387 un->un_f_cfg_playmsf_bcd = TRUE; 4388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4389 "sd_set_vers1_properties: playmsf_bcd set\n"); 4390 } 4391 if (flags & SD_CONF_BSET_READSUB_BCD) { 4392 un->un_f_cfg_readsub_bcd = TRUE; 4393 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4394 "sd_set_vers1_properties: readsub_bcd set\n"); 4395 } 4396 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4397 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4398 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4399 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4400 } 4401 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4402 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4403 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4404 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4405 } 4406 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4407 un->un_f_cfg_no_read_header = TRUE; 4408 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4409 "sd_set_vers1_properties: no_read_header set\n"); 4410 } 4411 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4412 un->un_f_cfg_read_cd_xd4 = TRUE; 4413 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4414 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4415 } 4416 4417 /* Support for devices which do not have valid/unique serial numbers */ 4418 if (flags & SD_CONF_BSET_FAB_DEVID) { 4419 un->un_f_opt_fab_devid = TRUE; 4420 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4421 "sd_set_vers1_properties: fab_devid bit set\n"); 4422 } 4423 4424 /* Support for user throttle configuration */ 4425 if (flags & SD_CONF_BSET_THROTTLE) { 4426 ASSERT(prop_list != NULL); 4427 un->un_saved_throttle = un->un_throttle = 4428 prop_list->sdt_throttle; 4429 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4430 "sd_set_vers1_properties: throttle set to %d\n", 4431 prop_list->sdt_throttle); 4432 } 4433 4434 /* Set the per disk retry count according to the conf file or table. */ 4435 if (flags & SD_CONF_BSET_NRR_COUNT) { 4436 ASSERT(prop_list != NULL); 4437 if (prop_list->sdt_not_rdy_retries) { 4438 un->un_notready_retry_count = 4439 prop_list->sdt_not_rdy_retries; 4440 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4441 "sd_set_vers1_properties: not ready retry count" 4442 " set to %d\n", un->un_notready_retry_count); 4443 } 4444 } 4445 4446 /* The controller type is reported for generic disk driver ioctls */ 4447 if (flags & SD_CONF_BSET_CTYPE) { 4448 ASSERT(prop_list != NULL); 4449 switch (prop_list->sdt_ctype) { 4450 case CTYPE_CDROM: 4451 un->un_ctype = prop_list->sdt_ctype; 4452 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4453 "sd_set_vers1_properties: ctype set to " 4454 "CTYPE_CDROM\n"); 4455 break; 4456 case CTYPE_CCS: 4457 un->un_ctype = prop_list->sdt_ctype; 4458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4459 "sd_set_vers1_properties: ctype set to " 4460 "CTYPE_CCS\n"); 4461 break; 4462 case CTYPE_ROD: /* RW optical */ 4463 un->un_ctype = prop_list->sdt_ctype; 4464 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4465 "sd_set_vers1_properties: ctype set to " 4466 "CTYPE_ROD\n"); 4467 break; 4468 default: 4469 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4470 "sd_set_vers1_properties: Could not set " 4471 "invalid ctype value (%d)", 4472 prop_list->sdt_ctype); 4473 } 4474 } 4475 4476 /* Purple failover timeout */ 4477 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4478 ASSERT(prop_list != NULL); 4479 un->un_busy_retry_count = 4480 prop_list->sdt_busy_retries; 4481 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4482 "sd_set_vers1_properties: " 4483 "busy retry count set to %d\n", 4484 un->un_busy_retry_count); 4485 } 4486 4487 /* Purple reset retry count */ 4488 if (flags & SD_CONF_BSET_RST_RETRIES) { 4489 ASSERT(prop_list != NULL); 4490 un->un_reset_retry_count = 4491 prop_list->sdt_reset_retries; 4492 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4493 "sd_set_vers1_properties: " 4494 "reset retry count set to %d\n", 4495 un->un_reset_retry_count); 4496 } 4497 4498 /* Purple reservation release timeout */ 4499 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4500 ASSERT(prop_list != NULL); 4501 un->un_reserve_release_time = 4502 prop_list->sdt_reserv_rel_time; 4503 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4504 "sd_set_vers1_properties: " 4505 "reservation release timeout set to %d\n", 4506 un->un_reserve_release_time); 4507 } 4508 4509 /* 4510 * Driver flag telling the driver to verify that no commands are pending 4511 * for a device before issuing a Test Unit Ready. This is a workaround 4512 * for a firmware bug in some Seagate eliteI drives. 4513 */ 4514 if (flags & SD_CONF_BSET_TUR_CHECK) { 4515 un->un_f_cfg_tur_check = TRUE; 4516 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4517 "sd_set_vers1_properties: tur queue check set\n"); 4518 } 4519 4520 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4521 un->un_min_throttle = prop_list->sdt_min_throttle; 4522 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4523 "sd_set_vers1_properties: min throttle set to %d\n", 4524 un->un_min_throttle); 4525 } 4526 4527 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4528 un->un_f_disksort_disabled = 4529 (prop_list->sdt_disk_sort_dis != 0) ? 4530 TRUE : FALSE; 4531 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4532 "sd_set_vers1_properties: disksort disabled " 4533 "flag set to %d\n", 4534 prop_list->sdt_disk_sort_dis); 4535 } 4536 4537 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4538 un->un_f_lun_reset_enabled = 4539 (prop_list->sdt_lun_reset_enable != 0) ? 4540 TRUE : FALSE; 4541 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4542 "sd_set_vers1_properties: lun reset enabled " 4543 "flag set to %d\n", 4544 prop_list->sdt_lun_reset_enable); 4545 } 4546 4547 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4548 un->un_f_suppress_cache_flush = 4549 (prop_list->sdt_suppress_cache_flush != 0) ? 4550 TRUE : FALSE; 4551 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4552 "sd_set_vers1_properties: suppress_cache_flush " 4553 "flag set to %d\n", 4554 prop_list->sdt_suppress_cache_flush); 4555 } 4556 4557 /* 4558 * Validate the throttle values. 4559 * If any of the numbers are invalid, set everything to defaults. 4560 */ 4561 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4562 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4563 (un->un_min_throttle > un->un_throttle)) { 4564 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4565 un->un_min_throttle = sd_min_throttle; 4566 } 4567 } 4568 4569 /* 4570 * Function: sd_is_lsi() 4571 * 4572 * Description: Check for lsi devices, step through the static device 4573 * table to match vid/pid. 4574 * 4575 * Args: un - ptr to sd_lun 4576 * 4577 * Notes: When creating new LSI property, need to add the new LSI property 4578 * to this function. 4579 */ 4580 static void 4581 sd_is_lsi(struct sd_lun *un) 4582 { 4583 char *id = NULL; 4584 int table_index; 4585 int idlen; 4586 void *prop; 4587 4588 ASSERT(un != NULL); 4589 for (table_index = 0; table_index < sd_disk_table_size; 4590 table_index++) { 4591 id = sd_disk_table[table_index].device_id; 4592 idlen = strlen(id); 4593 if (idlen == 0) { 4594 continue; 4595 } 4596 4597 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4598 prop = sd_disk_table[table_index].properties; 4599 if (prop == &lsi_properties || 4600 prop == &lsi_oem_properties || 4601 prop == &lsi_properties_scsi || 4602 prop == &symbios_properties) { 4603 un->un_f_cfg_is_lsi = TRUE; 4604 } 4605 break; 4606 } 4607 } 4608 } 4609 4610 /* 4611 * Function: sd_get_physical_geometry 4612 * 4613 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4614 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4615 * target, and use this information to initialize the physical 4616 * geometry cache specified by pgeom_p. 4617 * 4618 * MODE SENSE is an optional command, so failure in this case 4619 * does not necessarily denote an error. We want to use the 4620 * MODE SENSE commands to derive the physical geometry of the 4621 * device, but if either command fails, the logical geometry is 4622 * used as the fallback for disk label geometry in cmlb. 4623 * 4624 * This requires that un->un_blockcount and un->un_tgt_blocksize 4625 * have already been initialized for the current target and 4626 * that the current values be passed as args so that we don't 4627 * end up ever trying to use -1 as a valid value. This could 4628 * happen if either value is reset while we're not holding 4629 * the mutex. 4630 * 4631 * Arguments: un - driver soft state (unit) structure 4632 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4633 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4634 * to use the USCSI "direct" chain and bypass the normal 4635 * command waitq. 4636 * 4637 * Context: Kernel thread only (can sleep). 4638 */ 4639 4640 static int 4641 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4642 diskaddr_t capacity, int lbasize, int path_flag) 4643 { 4644 struct mode_format *page3p; 4645 struct mode_geometry *page4p; 4646 struct mode_header *headerp; 4647 int sector_size; 4648 int nsect; 4649 int nhead; 4650 int ncyl; 4651 int intrlv; 4652 int spc; 4653 diskaddr_t modesense_capacity; 4654 int rpm; 4655 int bd_len; 4656 int mode_header_length; 4657 uchar_t *p3bufp; 4658 uchar_t *p4bufp; 4659 int cdbsize; 4660 int ret = EIO; 4661 sd_ssc_t *ssc; 4662 int status; 4663 4664 ASSERT(un != NULL); 4665 4666 if (lbasize == 0) { 4667 if (ISCD(un)) { 4668 lbasize = 2048; 4669 } else { 4670 lbasize = un->un_sys_blocksize; 4671 } 4672 } 4673 pgeom_p->g_secsize = (unsigned short)lbasize; 4674 4675 /* 4676 * If the unit is a cd/dvd drive MODE SENSE page three 4677 * and MODE SENSE page four are reserved (see SBC spec 4678 * and MMC spec). To prevent soft errors just return 4679 * using the default LBA size. 4680 */ 4681 if (ISCD(un)) 4682 return (ret); 4683 4684 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4685 4686 /* 4687 * Retrieve MODE SENSE page 3 - Format Device Page 4688 */ 4689 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4690 ssc = sd_ssc_init(un); 4691 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4692 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4693 if (status != 0) { 4694 SD_ERROR(SD_LOG_COMMON, un, 4695 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4696 goto page3_exit; 4697 } 4698 4699 /* 4700 * Determine size of Block Descriptors in order to locate the mode 4701 * page data. ATAPI devices return 0, SCSI devices should return 4702 * MODE_BLK_DESC_LENGTH. 4703 */ 4704 headerp = (struct mode_header *)p3bufp; 4705 if (un->un_f_cfg_is_atapi == TRUE) { 4706 struct mode_header_grp2 *mhp = 4707 (struct mode_header_grp2 *)headerp; 4708 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4709 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4710 } else { 4711 mode_header_length = MODE_HEADER_LENGTH; 4712 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4713 } 4714 4715 if (bd_len > MODE_BLK_DESC_LENGTH) { 4716 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4717 "sd_get_physical_geometry: received unexpected bd_len " 4718 "of %d, page3\n", bd_len); 4719 status = EIO; 4720 goto page3_exit; 4721 } 4722 4723 page3p = (struct mode_format *) 4724 ((caddr_t)headerp + mode_header_length + bd_len); 4725 4726 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4727 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4728 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4729 "%d\n", page3p->mode_page.code); 4730 status = EIO; 4731 goto page3_exit; 4732 } 4733 4734 /* 4735 * Use this physical geometry data only if BOTH MODE SENSE commands 4736 * complete successfully; otherwise, revert to the logical geometry. 4737 * So, we need to save everything in temporary variables. 4738 */ 4739 sector_size = BE_16(page3p->data_bytes_sect); 4740 4741 /* 4742 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4743 */ 4744 if (sector_size == 0) { 4745 sector_size = un->un_sys_blocksize; 4746 } else { 4747 sector_size &= ~(un->un_sys_blocksize - 1); 4748 } 4749 4750 nsect = BE_16(page3p->sect_track); 4751 intrlv = BE_16(page3p->interleave); 4752 4753 SD_INFO(SD_LOG_COMMON, un, 4754 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4755 SD_INFO(SD_LOG_COMMON, un, 4756 " mode page: %d; nsect: %d; sector size: %d;\n", 4757 page3p->mode_page.code, nsect, sector_size); 4758 SD_INFO(SD_LOG_COMMON, un, 4759 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4760 BE_16(page3p->track_skew), 4761 BE_16(page3p->cylinder_skew)); 4762 4763 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4764 4765 /* 4766 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4767 */ 4768 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4769 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4770 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4771 if (status != 0) { 4772 SD_ERROR(SD_LOG_COMMON, un, 4773 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4774 goto page4_exit; 4775 } 4776 4777 /* 4778 * Determine size of Block Descriptors in order to locate the mode 4779 * page data. ATAPI devices return 0, SCSI devices should return 4780 * MODE_BLK_DESC_LENGTH. 4781 */ 4782 headerp = (struct mode_header *)p4bufp; 4783 if (un->un_f_cfg_is_atapi == TRUE) { 4784 struct mode_header_grp2 *mhp = 4785 (struct mode_header_grp2 *)headerp; 4786 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4787 } else { 4788 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4789 } 4790 4791 if (bd_len > MODE_BLK_DESC_LENGTH) { 4792 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4793 "sd_get_physical_geometry: received unexpected bd_len of " 4794 "%d, page4\n", bd_len); 4795 status = EIO; 4796 goto page4_exit; 4797 } 4798 4799 page4p = (struct mode_geometry *) 4800 ((caddr_t)headerp + mode_header_length + bd_len); 4801 4802 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4803 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4804 "sd_get_physical_geometry: mode sense pg4 code mismatch " 4805 "%d\n", page4p->mode_page.code); 4806 status = EIO; 4807 goto page4_exit; 4808 } 4809 4810 /* 4811 * Stash the data now, after we know that both commands completed. 4812 */ 4813 4814 4815 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4816 spc = nhead * nsect; 4817 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4818 rpm = BE_16(page4p->rpm); 4819 4820 modesense_capacity = spc * ncyl; 4821 4822 SD_INFO(SD_LOG_COMMON, un, 4823 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4824 SD_INFO(SD_LOG_COMMON, un, 4825 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4826 SD_INFO(SD_LOG_COMMON, un, 4827 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4828 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4829 (void *)pgeom_p, capacity); 4830 4831 /* 4832 * Compensate if the drive's geometry is not rectangular, i.e., 4833 * the product of C * H * S returned by MODE SENSE >= that returned 4834 * by read capacity. This is an idiosyncrasy of the original x86 4835 * disk subsystem. 4836 */ 4837 if (modesense_capacity >= capacity) { 4838 SD_INFO(SD_LOG_COMMON, un, 4839 "sd_get_physical_geometry: adjusting acyl; " 4840 "old: %d; new: %d\n", pgeom_p->g_acyl, 4841 (modesense_capacity - capacity + spc - 1) / spc); 4842 if (sector_size != 0) { 4843 /* 1243403: NEC D38x7 drives don't support sec size */ 4844 pgeom_p->g_secsize = (unsigned short)sector_size; 4845 } 4846 pgeom_p->g_nsect = (unsigned short)nsect; 4847 pgeom_p->g_nhead = (unsigned short)nhead; 4848 pgeom_p->g_capacity = capacity; 4849 pgeom_p->g_acyl = 4850 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4851 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4852 } 4853 4854 pgeom_p->g_rpm = (unsigned short)rpm; 4855 pgeom_p->g_intrlv = (unsigned short)intrlv; 4856 ret = 0; 4857 4858 SD_INFO(SD_LOG_COMMON, un, 4859 "sd_get_physical_geometry: mode sense geometry:\n"); 4860 SD_INFO(SD_LOG_COMMON, un, 4861 " nsect: %d; sector size: %d; interlv: %d\n", 4862 nsect, sector_size, intrlv); 4863 SD_INFO(SD_LOG_COMMON, un, 4864 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4865 nhead, ncyl, rpm, modesense_capacity); 4866 SD_INFO(SD_LOG_COMMON, un, 4867 "sd_get_physical_geometry: (cached)\n"); 4868 SD_INFO(SD_LOG_COMMON, un, 4869 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4870 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4871 pgeom_p->g_nhead, pgeom_p->g_nsect); 4872 SD_INFO(SD_LOG_COMMON, un, 4873 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4874 pgeom_p->g_secsize, pgeom_p->g_capacity, 4875 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4876 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4877 4878 page4_exit: 4879 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4880 4881 page3_exit: 4882 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4883 4884 if (status != 0) { 4885 if (status == EIO) { 4886 /* 4887 * Some disks do not support mode sense(6), we 4888 * should ignore this kind of error(sense key is 4889 * 0x5 - illegal request). 4890 */ 4891 uint8_t *sensep; 4892 int senlen; 4893 4894 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4895 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4896 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4897 4898 if (senlen > 0 && 4899 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4900 sd_ssc_assessment(ssc, 4901 SD_FMT_IGNORE_COMPROMISE); 4902 } else { 4903 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4904 } 4905 } else { 4906 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4907 } 4908 } 4909 sd_ssc_fini(ssc); 4910 return (ret); 4911 } 4912 4913 /* 4914 * Function: sd_get_virtual_geometry 4915 * 4916 * Description: Ask the controller to tell us about the target device. 4917 * 4918 * Arguments: un - pointer to softstate 4919 * capacity - disk capacity in #blocks 4920 * lbasize - disk block size in bytes 4921 * 4922 * Context: Kernel thread only 4923 */ 4924 4925 static int 4926 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4927 diskaddr_t capacity, int lbasize) 4928 { 4929 uint_t geombuf; 4930 int spc; 4931 4932 ASSERT(un != NULL); 4933 4934 /* Set sector size, and total number of sectors */ 4935 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4936 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4937 4938 /* Let the HBA tell us its geometry */ 4939 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4940 4941 /* A value of -1 indicates an undefined "geometry" property */ 4942 if (geombuf == (-1)) { 4943 return (EINVAL); 4944 } 4945 4946 /* Initialize the logical geometry cache. */ 4947 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4948 lgeom_p->g_nsect = geombuf & 0xffff; 4949 lgeom_p->g_secsize = un->un_sys_blocksize; 4950 4951 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4952 4953 /* 4954 * Note: The driver originally converted the capacity value from 4955 * target blocks to system blocks. However, the capacity value passed 4956 * to this routine is already in terms of system blocks (this scaling 4957 * is done when the READ CAPACITY command is issued and processed). 4958 * This 'error' may have gone undetected because the usage of g_ncyl 4959 * (which is based upon g_capacity) is very limited within the driver 4960 */ 4961 lgeom_p->g_capacity = capacity; 4962 4963 /* 4964 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4965 * hba may return zero values if the device has been removed. 4966 */ 4967 if (spc == 0) { 4968 lgeom_p->g_ncyl = 0; 4969 } else { 4970 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4971 } 4972 lgeom_p->g_acyl = 0; 4973 4974 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4975 return (0); 4976 4977 } 4978 /* 4979 * Function: sd_update_block_info 4980 * 4981 * Description: Calculate a byte count to sector count bitshift value 4982 * from sector size. 4983 * 4984 * Arguments: un: unit struct. 4985 * lbasize: new target sector size 4986 * capacity: new target capacity, ie. block count 4987 * 4988 * Context: Kernel thread context 4989 */ 4990 4991 static void 4992 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4993 { 4994 if (lbasize != 0) { 4995 un->un_tgt_blocksize = lbasize; 4996 un->un_f_tgt_blocksize_is_valid = TRUE; 4997 } 4998 4999 if (capacity != 0) { 5000 un->un_blockcount = capacity; 5001 un->un_f_blockcount_is_valid = TRUE; 5002 } 5003 } 5004 5005 5006 /* 5007 * Function: sd_register_devid 5008 * 5009 * Description: This routine will obtain the device id information from the 5010 * target, obtain the serial number, and register the device 5011 * id with the ddi framework. 5012 * 5013 * Arguments: devi - the system's dev_info_t for the device. 5014 * un - driver soft state (unit) structure 5015 * reservation_flag - indicates if a reservation conflict 5016 * occurred during attach 5017 * 5018 * Context: Kernel Thread 5019 */ 5020 static void 5021 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5022 { 5023 int rval = 0; 5024 uchar_t *inq80 = NULL; 5025 size_t inq80_len = MAX_INQUIRY_SIZE; 5026 size_t inq80_resid = 0; 5027 uchar_t *inq83 = NULL; 5028 size_t inq83_len = MAX_INQUIRY_SIZE; 5029 size_t inq83_resid = 0; 5030 int dlen, len; 5031 char *sn; 5032 struct sd_lun *un; 5033 5034 ASSERT(ssc != NULL); 5035 un = ssc->ssc_un; 5036 ASSERT(un != NULL); 5037 ASSERT(mutex_owned(SD_MUTEX(un))); 5038 ASSERT((SD_DEVINFO(un)) == devi); 5039 5040 5041 /* 5042 * We check the availability of the World Wide Name (0x83) and Unit 5043 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5044 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5045 * 0x83 is available, that is the best choice. Our next choice is 5046 * 0x80. If neither are available, we munge the devid from the device 5047 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5048 * to fabricate a devid for non-Sun qualified disks. 5049 */ 5050 if (sd_check_vpd_page_support(ssc) == 0) { 5051 /* collect page 80 data if available */ 5052 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5053 5054 mutex_exit(SD_MUTEX(un)); 5055 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5056 5057 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5058 0x01, 0x80, &inq80_resid); 5059 5060 if (rval != 0) { 5061 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5062 kmem_free(inq80, inq80_len); 5063 inq80 = NULL; 5064 inq80_len = 0; 5065 } else if (ddi_prop_exists( 5066 DDI_DEV_T_NONE, SD_DEVINFO(un), 5067 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5068 INQUIRY_SERIAL_NO) == 0) { 5069 /* 5070 * If we don't already have a serial number 5071 * property, do quick verify of data returned 5072 * and define property. 5073 */ 5074 dlen = inq80_len - inq80_resid; 5075 len = (size_t)inq80[3]; 5076 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5077 /* 5078 * Ensure sn termination, skip leading 5079 * blanks, and create property 5080 * 'inquiry-serial-no'. 5081 */ 5082 sn = (char *)&inq80[4]; 5083 sn[len] = 0; 5084 while (*sn && (*sn == ' ')) 5085 sn++; 5086 if (*sn) { 5087 (void) ddi_prop_update_string( 5088 DDI_DEV_T_NONE, 5089 SD_DEVINFO(un), 5090 INQUIRY_SERIAL_NO, sn); 5091 } 5092 } 5093 } 5094 mutex_enter(SD_MUTEX(un)); 5095 } 5096 5097 /* collect page 83 data if available */ 5098 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5099 mutex_exit(SD_MUTEX(un)); 5100 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5101 5102 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5103 0x01, 0x83, &inq83_resid); 5104 5105 if (rval != 0) { 5106 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5107 kmem_free(inq83, inq83_len); 5108 inq83 = NULL; 5109 inq83_len = 0; 5110 } 5111 mutex_enter(SD_MUTEX(un)); 5112 } 5113 } 5114 5115 /* 5116 * If transport has already registered a devid for this target 5117 * then that takes precedence over the driver's determination 5118 * of the devid. 5119 * 5120 * NOTE: The reason this check is done here instead of at the beginning 5121 * of the function is to allow the code above to create the 5122 * 'inquiry-serial-no' property. 5123 */ 5124 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5125 ASSERT(un->un_devid); 5126 un->un_f_devid_transport_defined = TRUE; 5127 goto cleanup; /* use devid registered by the transport */ 5128 } 5129 5130 /* 5131 * This is the case of antiquated Sun disk drives that have the 5132 * FAB_DEVID property set in the disk_table. These drives 5133 * manage the devid's by storing them in last 2 available sectors 5134 * on the drive and have them fabricated by the ddi layer by calling 5135 * ddi_devid_init and passing the DEVID_FAB flag. 5136 */ 5137 if (un->un_f_opt_fab_devid == TRUE) { 5138 /* 5139 * Depending on EINVAL isn't reliable, since a reserved disk 5140 * may result in invalid geometry, so check to make sure a 5141 * reservation conflict did not occur during attach. 5142 */ 5143 if ((sd_get_devid(ssc) == EINVAL) && 5144 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5145 /* 5146 * The devid is invalid AND there is no reservation 5147 * conflict. Fabricate a new devid. 5148 */ 5149 (void) sd_create_devid(ssc); 5150 } 5151 5152 /* Register the devid if it exists */ 5153 if (un->un_devid != NULL) { 5154 (void) ddi_devid_register(SD_DEVINFO(un), 5155 un->un_devid); 5156 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5157 "sd_register_devid: Devid Fabricated\n"); 5158 } 5159 goto cleanup; 5160 } 5161 5162 /* encode best devid possible based on data available */ 5163 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5164 (char *)ddi_driver_name(SD_DEVINFO(un)), 5165 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5166 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5167 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5168 5169 /* devid successfully encoded, register devid */ 5170 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5171 5172 } else { 5173 /* 5174 * Unable to encode a devid based on data available. 5175 * This is not a Sun qualified disk. Older Sun disk 5176 * drives that have the SD_FAB_DEVID property 5177 * set in the disk_table and non Sun qualified 5178 * disks are treated in the same manner. These 5179 * drives manage the devid's by storing them in 5180 * last 2 available sectors on the drive and 5181 * have them fabricated by the ddi layer by 5182 * calling ddi_devid_init and passing the 5183 * DEVID_FAB flag. 5184 * Create a fabricate devid only if there's no 5185 * fabricate devid existed. 5186 */ 5187 if (sd_get_devid(ssc) == EINVAL) { 5188 (void) sd_create_devid(ssc); 5189 } 5190 un->un_f_opt_fab_devid = TRUE; 5191 5192 /* Register the devid if it exists */ 5193 if (un->un_devid != NULL) { 5194 (void) ddi_devid_register(SD_DEVINFO(un), 5195 un->un_devid); 5196 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5197 "sd_register_devid: devid fabricated using " 5198 "ddi framework\n"); 5199 } 5200 } 5201 5202 cleanup: 5203 /* clean up resources */ 5204 if (inq80 != NULL) { 5205 kmem_free(inq80, inq80_len); 5206 } 5207 if (inq83 != NULL) { 5208 kmem_free(inq83, inq83_len); 5209 } 5210 } 5211 5212 5213 5214 /* 5215 * Function: sd_get_devid 5216 * 5217 * Description: This routine will return 0 if a valid device id has been 5218 * obtained from the target and stored in the soft state. If a 5219 * valid device id has not been previously read and stored, a 5220 * read attempt will be made. 5221 * 5222 * Arguments: un - driver soft state (unit) structure 5223 * 5224 * Return Code: 0 if we successfully get the device id 5225 * 5226 * Context: Kernel Thread 5227 */ 5228 5229 static int 5230 sd_get_devid(sd_ssc_t *ssc) 5231 { 5232 struct dk_devid *dkdevid; 5233 ddi_devid_t tmpid; 5234 uint_t *ip; 5235 size_t sz; 5236 diskaddr_t blk; 5237 int status; 5238 int chksum; 5239 int i; 5240 size_t buffer_size; 5241 struct sd_lun *un; 5242 5243 ASSERT(ssc != NULL); 5244 un = ssc->ssc_un; 5245 ASSERT(un != NULL); 5246 ASSERT(mutex_owned(SD_MUTEX(un))); 5247 5248 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5249 un); 5250 5251 if (un->un_devid != NULL) { 5252 return (0); 5253 } 5254 5255 mutex_exit(SD_MUTEX(un)); 5256 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5257 (void *)SD_PATH_DIRECT) != 0) { 5258 mutex_enter(SD_MUTEX(un)); 5259 return (EINVAL); 5260 } 5261 5262 /* 5263 * Read and verify device id, stored in the reserved cylinders at the 5264 * end of the disk. Backup label is on the odd sectors of the last 5265 * track of the last cylinder. Device id will be on track of the next 5266 * to last cylinder. 5267 */ 5268 mutex_enter(SD_MUTEX(un)); 5269 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5270 mutex_exit(SD_MUTEX(un)); 5271 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5272 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5273 SD_PATH_DIRECT); 5274 5275 if (status != 0) { 5276 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5277 goto error; 5278 } 5279 5280 /* Validate the revision */ 5281 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5282 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5283 status = EINVAL; 5284 goto error; 5285 } 5286 5287 /* Calculate the checksum */ 5288 chksum = 0; 5289 ip = (uint_t *)dkdevid; 5290 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5291 i++) { 5292 chksum ^= ip[i]; 5293 } 5294 5295 /* Compare the checksums */ 5296 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5297 status = EINVAL; 5298 goto error; 5299 } 5300 5301 /* Validate the device id */ 5302 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5303 status = EINVAL; 5304 goto error; 5305 } 5306 5307 /* 5308 * Store the device id in the driver soft state 5309 */ 5310 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5311 tmpid = kmem_alloc(sz, KM_SLEEP); 5312 5313 mutex_enter(SD_MUTEX(un)); 5314 5315 un->un_devid = tmpid; 5316 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5317 5318 kmem_free(dkdevid, buffer_size); 5319 5320 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5321 5322 return (status); 5323 error: 5324 mutex_enter(SD_MUTEX(un)); 5325 kmem_free(dkdevid, buffer_size); 5326 return (status); 5327 } 5328 5329 5330 /* 5331 * Function: sd_create_devid 5332 * 5333 * Description: This routine will fabricate the device id and write it 5334 * to the disk. 5335 * 5336 * Arguments: un - driver soft state (unit) structure 5337 * 5338 * Return Code: value of the fabricated device id 5339 * 5340 * Context: Kernel Thread 5341 */ 5342 5343 static ddi_devid_t 5344 sd_create_devid(sd_ssc_t *ssc) 5345 { 5346 struct sd_lun *un; 5347 5348 ASSERT(ssc != NULL); 5349 un = ssc->ssc_un; 5350 ASSERT(un != NULL); 5351 5352 /* Fabricate the devid */ 5353 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5354 == DDI_FAILURE) { 5355 return (NULL); 5356 } 5357 5358 /* Write the devid to disk */ 5359 if (sd_write_deviceid(ssc) != 0) { 5360 ddi_devid_free(un->un_devid); 5361 un->un_devid = NULL; 5362 } 5363 5364 return (un->un_devid); 5365 } 5366 5367 5368 /* 5369 * Function: sd_write_deviceid 5370 * 5371 * Description: This routine will write the device id to the disk 5372 * reserved sector. 5373 * 5374 * Arguments: un - driver soft state (unit) structure 5375 * 5376 * Return Code: EINVAL 5377 * value returned by sd_send_scsi_cmd 5378 * 5379 * Context: Kernel Thread 5380 */ 5381 5382 static int 5383 sd_write_deviceid(sd_ssc_t *ssc) 5384 { 5385 struct dk_devid *dkdevid; 5386 diskaddr_t blk; 5387 uint_t *ip, chksum; 5388 int status; 5389 int i; 5390 struct sd_lun *un; 5391 5392 ASSERT(ssc != NULL); 5393 un = ssc->ssc_un; 5394 ASSERT(un != NULL); 5395 ASSERT(mutex_owned(SD_MUTEX(un))); 5396 5397 mutex_exit(SD_MUTEX(un)); 5398 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5399 (void *)SD_PATH_DIRECT) != 0) { 5400 mutex_enter(SD_MUTEX(un)); 5401 return (-1); 5402 } 5403 5404 5405 /* Allocate the buffer */ 5406 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5407 5408 /* Fill in the revision */ 5409 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5410 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5411 5412 /* Copy in the device id */ 5413 mutex_enter(SD_MUTEX(un)); 5414 bcopy(un->un_devid, &dkdevid->dkd_devid, 5415 ddi_devid_sizeof(un->un_devid)); 5416 mutex_exit(SD_MUTEX(un)); 5417 5418 /* Calculate the checksum */ 5419 chksum = 0; 5420 ip = (uint_t *)dkdevid; 5421 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5422 i++) { 5423 chksum ^= ip[i]; 5424 } 5425 5426 /* Fill-in checksum */ 5427 DKD_FORMCHKSUM(chksum, dkdevid); 5428 5429 /* Write the reserved sector */ 5430 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5431 SD_PATH_DIRECT); 5432 if (status != 0) 5433 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5434 5435 kmem_free(dkdevid, un->un_sys_blocksize); 5436 5437 mutex_enter(SD_MUTEX(un)); 5438 return (status); 5439 } 5440 5441 5442 /* 5443 * Function: sd_check_vpd_page_support 5444 * 5445 * Description: This routine sends an inquiry command with the EVPD bit set and 5446 * a page code of 0x00 to the device. It is used to determine which 5447 * vital product pages are available to find the devid. We are 5448 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5449 * device does not support that command. 5450 * 5451 * Arguments: un - driver soft state (unit) structure 5452 * 5453 * Return Code: 0 - success 5454 * 1 - check condition 5455 * 5456 * Context: This routine can sleep. 5457 */ 5458 5459 static int 5460 sd_check_vpd_page_support(sd_ssc_t *ssc) 5461 { 5462 uchar_t *page_list = NULL; 5463 uchar_t page_length = 0xff; /* Use max possible length */ 5464 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5465 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5466 int rval = 0; 5467 int counter; 5468 struct sd_lun *un; 5469 5470 ASSERT(ssc != NULL); 5471 un = ssc->ssc_un; 5472 ASSERT(un != NULL); 5473 ASSERT(mutex_owned(SD_MUTEX(un))); 5474 5475 mutex_exit(SD_MUTEX(un)); 5476 5477 /* 5478 * We'll set the page length to the maximum to save figuring it out 5479 * with an additional call. 5480 */ 5481 page_list = kmem_zalloc(page_length, KM_SLEEP); 5482 5483 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5484 page_code, NULL); 5485 5486 if (rval != 0) 5487 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5488 5489 mutex_enter(SD_MUTEX(un)); 5490 5491 /* 5492 * Now we must validate that the device accepted the command, as some 5493 * drives do not support it. If the drive does support it, we will 5494 * return 0, and the supported pages will be in un_vpd_page_mask. If 5495 * not, we return -1. 5496 */ 5497 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5498 /* Loop to find one of the 2 pages we need */ 5499 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5500 5501 /* 5502 * Pages are returned in ascending order, and 0x83 is what we 5503 * are hoping for. 5504 */ 5505 while ((page_list[counter] <= 0x86) && 5506 (counter <= (page_list[VPD_PAGE_LENGTH] + 5507 VPD_HEAD_OFFSET))) { 5508 /* 5509 * Add 3 because page_list[3] is the number of 5510 * pages minus 3 5511 */ 5512 5513 switch (page_list[counter]) { 5514 case 0x00: 5515 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5516 break; 5517 case 0x80: 5518 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5519 break; 5520 case 0x81: 5521 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5522 break; 5523 case 0x82: 5524 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5525 break; 5526 case 0x83: 5527 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5528 break; 5529 case 0x86: 5530 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5531 break; 5532 } 5533 counter++; 5534 } 5535 5536 } else { 5537 rval = -1; 5538 5539 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5540 "sd_check_vpd_page_support: This drive does not implement " 5541 "VPD pages.\n"); 5542 } 5543 5544 kmem_free(page_list, page_length); 5545 5546 return (rval); 5547 } 5548 5549 5550 /* 5551 * Function: sd_setup_pm 5552 * 5553 * Description: Initialize Power Management on the device 5554 * 5555 * Context: Kernel Thread 5556 */ 5557 5558 static void 5559 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5560 { 5561 uint_t log_page_size; 5562 uchar_t *log_page_data; 5563 int rval = 0; 5564 struct sd_lun *un; 5565 5566 ASSERT(ssc != NULL); 5567 un = ssc->ssc_un; 5568 ASSERT(un != NULL); 5569 5570 /* 5571 * Since we are called from attach, holding a mutex for 5572 * un is unnecessary. Because some of the routines called 5573 * from here require SD_MUTEX to not be held, assert this 5574 * right up front. 5575 */ 5576 ASSERT(!mutex_owned(SD_MUTEX(un))); 5577 /* 5578 * Since the sd device does not have the 'reg' property, 5579 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5580 * The following code is to tell cpr that this device 5581 * DOES need to be suspended and resumed. 5582 */ 5583 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5584 "pm-hardware-state", "needs-suspend-resume"); 5585 5586 /* 5587 * This complies with the new power management framework 5588 * for certain desktop machines. Create the pm_components 5589 * property as a string array property. 5590 */ 5591 if (un->un_f_pm_supported) { 5592 /* 5593 * not all devices have a motor, try it first. 5594 * some devices may return ILLEGAL REQUEST, some 5595 * will hang 5596 * The following START_STOP_UNIT is used to check if target 5597 * device has a motor. 5598 */ 5599 un->un_f_start_stop_supported = TRUE; 5600 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5601 SD_PATH_DIRECT); 5602 5603 if (rval != 0) { 5604 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5605 un->un_f_start_stop_supported = FALSE; 5606 } 5607 5608 /* 5609 * create pm properties anyways otherwise the parent can't 5610 * go to sleep 5611 */ 5612 (void) sd_create_pm_components(devi, un); 5613 un->un_f_pm_is_enabled = TRUE; 5614 return; 5615 } 5616 5617 if (!un->un_f_log_sense_supported) { 5618 un->un_power_level = SD_SPINDLE_ON; 5619 un->un_f_pm_is_enabled = FALSE; 5620 return; 5621 } 5622 5623 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5624 5625 #ifdef SDDEBUG 5626 if (sd_force_pm_supported) { 5627 /* Force a successful result */ 5628 rval = 1; 5629 } 5630 #endif 5631 5632 /* 5633 * If the start-stop cycle counter log page is not supported 5634 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5635 * then we should not create the pm_components property. 5636 */ 5637 if (rval == -1) { 5638 /* 5639 * Error. 5640 * Reading log sense failed, most likely this is 5641 * an older drive that does not support log sense. 5642 * If this fails auto-pm is not supported. 5643 */ 5644 un->un_power_level = SD_SPINDLE_ON; 5645 un->un_f_pm_is_enabled = FALSE; 5646 5647 } else if (rval == 0) { 5648 /* 5649 * Page not found. 5650 * The start stop cycle counter is implemented as page 5651 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5652 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5653 */ 5654 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5655 /* 5656 * Page found, use this one. 5657 */ 5658 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5659 un->un_f_pm_is_enabled = TRUE; 5660 } else { 5661 /* 5662 * Error or page not found. 5663 * auto-pm is not supported for this device. 5664 */ 5665 un->un_power_level = SD_SPINDLE_ON; 5666 un->un_f_pm_is_enabled = FALSE; 5667 } 5668 } else { 5669 /* 5670 * Page found, use it. 5671 */ 5672 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5673 un->un_f_pm_is_enabled = TRUE; 5674 } 5675 5676 5677 if (un->un_f_pm_is_enabled == TRUE) { 5678 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5679 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5680 5681 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5682 log_page_size, un->un_start_stop_cycle_page, 5683 0x01, 0, SD_PATH_DIRECT); 5684 5685 if (rval != 0) { 5686 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5687 } 5688 5689 #ifdef SDDEBUG 5690 if (sd_force_pm_supported) { 5691 /* Force a successful result */ 5692 rval = 0; 5693 } 5694 #endif 5695 5696 /* 5697 * If the Log sense for Page( Start/stop cycle counter page) 5698 * succeeds, then power management is supported and we can 5699 * enable auto-pm. 5700 */ 5701 if (rval == 0) { 5702 (void) sd_create_pm_components(devi, un); 5703 } else { 5704 un->un_power_level = SD_SPINDLE_ON; 5705 un->un_f_pm_is_enabled = FALSE; 5706 } 5707 5708 kmem_free(log_page_data, log_page_size); 5709 } 5710 } 5711 5712 5713 /* 5714 * Function: sd_create_pm_components 5715 * 5716 * Description: Initialize PM property. 5717 * 5718 * Context: Kernel thread context 5719 */ 5720 5721 static void 5722 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5723 { 5724 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5725 5726 ASSERT(!mutex_owned(SD_MUTEX(un))); 5727 5728 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5729 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5730 /* 5731 * When components are initially created they are idle, 5732 * power up any non-removables. 5733 * Note: the return value of pm_raise_power can't be used 5734 * for determining if PM should be enabled for this device. 5735 * Even if you check the return values and remove this 5736 * property created above, the PM framework will not honor the 5737 * change after the first call to pm_raise_power. Hence, 5738 * removal of that property does not help if pm_raise_power 5739 * fails. In the case of removable media, the start/stop 5740 * will fail if the media is not present. 5741 */ 5742 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5743 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5744 mutex_enter(SD_MUTEX(un)); 5745 un->un_power_level = SD_SPINDLE_ON; 5746 mutex_enter(&un->un_pm_mutex); 5747 /* Set to on and not busy. */ 5748 un->un_pm_count = 0; 5749 } else { 5750 mutex_enter(SD_MUTEX(un)); 5751 un->un_power_level = SD_SPINDLE_OFF; 5752 mutex_enter(&un->un_pm_mutex); 5753 /* Set to off. */ 5754 un->un_pm_count = -1; 5755 } 5756 mutex_exit(&un->un_pm_mutex); 5757 mutex_exit(SD_MUTEX(un)); 5758 } else { 5759 un->un_power_level = SD_SPINDLE_ON; 5760 un->un_f_pm_is_enabled = FALSE; 5761 } 5762 } 5763 5764 5765 /* 5766 * Function: sd_ddi_suspend 5767 * 5768 * Description: Performs system power-down operations. This includes 5769 * setting the drive state to indicate its suspended so 5770 * that no new commands will be accepted. Also, wait for 5771 * all commands that are in transport or queued to a timer 5772 * for retry to complete. All timeout threads are cancelled. 5773 * 5774 * Return Code: DDI_FAILURE or DDI_SUCCESS 5775 * 5776 * Context: Kernel thread context 5777 */ 5778 5779 static int 5780 sd_ddi_suspend(dev_info_t *devi) 5781 { 5782 struct sd_lun *un; 5783 clock_t wait_cmds_complete; 5784 5785 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5786 if (un == NULL) { 5787 return (DDI_FAILURE); 5788 } 5789 5790 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5791 5792 mutex_enter(SD_MUTEX(un)); 5793 5794 /* Return success if the device is already suspended. */ 5795 if (un->un_state == SD_STATE_SUSPENDED) { 5796 mutex_exit(SD_MUTEX(un)); 5797 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5798 "device already suspended, exiting\n"); 5799 return (DDI_SUCCESS); 5800 } 5801 5802 /* Return failure if the device is being used by HA */ 5803 if (un->un_resvd_status & 5804 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5805 mutex_exit(SD_MUTEX(un)); 5806 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5807 "device in use by HA, exiting\n"); 5808 return (DDI_FAILURE); 5809 } 5810 5811 /* 5812 * Return failure if the device is in a resource wait 5813 * or power changing state. 5814 */ 5815 if ((un->un_state == SD_STATE_RWAIT) || 5816 (un->un_state == SD_STATE_PM_CHANGING)) { 5817 mutex_exit(SD_MUTEX(un)); 5818 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5819 "device in resource wait state, exiting\n"); 5820 return (DDI_FAILURE); 5821 } 5822 5823 5824 un->un_save_state = un->un_last_state; 5825 New_state(un, SD_STATE_SUSPENDED); 5826 5827 /* 5828 * Wait for all commands that are in transport or queued to a timer 5829 * for retry to complete. 5830 * 5831 * While waiting, no new commands will be accepted or sent because of 5832 * the new state we set above. 5833 * 5834 * Wait till current operation has completed. If we are in the resource 5835 * wait state (with an intr outstanding) then we need to wait till the 5836 * intr completes and starts the next cmd. We want to wait for 5837 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5838 */ 5839 wait_cmds_complete = ddi_get_lbolt() + 5840 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5841 5842 while (un->un_ncmds_in_transport != 0) { 5843 /* 5844 * Fail if commands do not finish in the specified time. 5845 */ 5846 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5847 wait_cmds_complete) == -1) { 5848 /* 5849 * Undo the state changes made above. Everything 5850 * must go back to it's original value. 5851 */ 5852 Restore_state(un); 5853 un->un_last_state = un->un_save_state; 5854 /* Wake up any threads that might be waiting. */ 5855 cv_broadcast(&un->un_suspend_cv); 5856 mutex_exit(SD_MUTEX(un)); 5857 SD_ERROR(SD_LOG_IO_PM, un, 5858 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5859 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5860 return (DDI_FAILURE); 5861 } 5862 } 5863 5864 /* 5865 * Cancel SCSI watch thread and timeouts, if any are active 5866 */ 5867 5868 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5869 opaque_t temp_token = un->un_swr_token; 5870 mutex_exit(SD_MUTEX(un)); 5871 scsi_watch_suspend(temp_token); 5872 mutex_enter(SD_MUTEX(un)); 5873 } 5874 5875 if (un->un_reset_throttle_timeid != NULL) { 5876 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5877 un->un_reset_throttle_timeid = NULL; 5878 mutex_exit(SD_MUTEX(un)); 5879 (void) untimeout(temp_id); 5880 mutex_enter(SD_MUTEX(un)); 5881 } 5882 5883 if (un->un_dcvb_timeid != NULL) { 5884 timeout_id_t temp_id = un->un_dcvb_timeid; 5885 un->un_dcvb_timeid = NULL; 5886 mutex_exit(SD_MUTEX(un)); 5887 (void) untimeout(temp_id); 5888 mutex_enter(SD_MUTEX(un)); 5889 } 5890 5891 mutex_enter(&un->un_pm_mutex); 5892 if (un->un_pm_timeid != NULL) { 5893 timeout_id_t temp_id = un->un_pm_timeid; 5894 un->un_pm_timeid = NULL; 5895 mutex_exit(&un->un_pm_mutex); 5896 mutex_exit(SD_MUTEX(un)); 5897 (void) untimeout(temp_id); 5898 mutex_enter(SD_MUTEX(un)); 5899 } else { 5900 mutex_exit(&un->un_pm_mutex); 5901 } 5902 5903 if (un->un_retry_timeid != NULL) { 5904 timeout_id_t temp_id = un->un_retry_timeid; 5905 un->un_retry_timeid = NULL; 5906 mutex_exit(SD_MUTEX(un)); 5907 (void) untimeout(temp_id); 5908 mutex_enter(SD_MUTEX(un)); 5909 5910 if (un->un_retry_bp != NULL) { 5911 un->un_retry_bp->av_forw = un->un_waitq_headp; 5912 un->un_waitq_headp = un->un_retry_bp; 5913 if (un->un_waitq_tailp == NULL) { 5914 un->un_waitq_tailp = un->un_retry_bp; 5915 } 5916 un->un_retry_bp = NULL; 5917 un->un_retry_statp = NULL; 5918 } 5919 } 5920 5921 if (un->un_direct_priority_timeid != NULL) { 5922 timeout_id_t temp_id = un->un_direct_priority_timeid; 5923 un->un_direct_priority_timeid = NULL; 5924 mutex_exit(SD_MUTEX(un)); 5925 (void) untimeout(temp_id); 5926 mutex_enter(SD_MUTEX(un)); 5927 } 5928 5929 if (un->un_f_is_fibre == TRUE) { 5930 /* 5931 * Remove callbacks for insert and remove events 5932 */ 5933 if (un->un_insert_event != NULL) { 5934 mutex_exit(SD_MUTEX(un)); 5935 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5936 mutex_enter(SD_MUTEX(un)); 5937 un->un_insert_event = NULL; 5938 } 5939 5940 if (un->un_remove_event != NULL) { 5941 mutex_exit(SD_MUTEX(un)); 5942 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5943 mutex_enter(SD_MUTEX(un)); 5944 un->un_remove_event = NULL; 5945 } 5946 } 5947 5948 mutex_exit(SD_MUTEX(un)); 5949 5950 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5951 5952 return (DDI_SUCCESS); 5953 } 5954 5955 5956 /* 5957 * Function: sd_ddi_pm_suspend 5958 * 5959 * Description: Set the drive state to low power. 5960 * Someone else is required to actually change the drive 5961 * power level. 5962 * 5963 * Arguments: un - driver soft state (unit) structure 5964 * 5965 * Return Code: DDI_FAILURE or DDI_SUCCESS 5966 * 5967 * Context: Kernel thread context 5968 */ 5969 5970 static int 5971 sd_ddi_pm_suspend(struct sd_lun *un) 5972 { 5973 ASSERT(un != NULL); 5974 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5975 5976 ASSERT(!mutex_owned(SD_MUTEX(un))); 5977 mutex_enter(SD_MUTEX(un)); 5978 5979 /* 5980 * Exit if power management is not enabled for this device, or if 5981 * the device is being used by HA. 5982 */ 5983 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5984 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5985 mutex_exit(SD_MUTEX(un)); 5986 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5987 return (DDI_SUCCESS); 5988 } 5989 5990 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5991 un->un_ncmds_in_driver); 5992 5993 /* 5994 * See if the device is not busy, ie.: 5995 * - we have no commands in the driver for this device 5996 * - not waiting for resources 5997 */ 5998 if ((un->un_ncmds_in_driver == 0) && 5999 (un->un_state != SD_STATE_RWAIT)) { 6000 /* 6001 * The device is not busy, so it is OK to go to low power state. 6002 * Indicate low power, but rely on someone else to actually 6003 * change it. 6004 */ 6005 mutex_enter(&un->un_pm_mutex); 6006 un->un_pm_count = -1; 6007 mutex_exit(&un->un_pm_mutex); 6008 un->un_power_level = SD_SPINDLE_OFF; 6009 } 6010 6011 mutex_exit(SD_MUTEX(un)); 6012 6013 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6014 6015 return (DDI_SUCCESS); 6016 } 6017 6018 6019 /* 6020 * Function: sd_ddi_resume 6021 * 6022 * Description: Performs system power-up operations.. 6023 * 6024 * Return Code: DDI_SUCCESS 6025 * DDI_FAILURE 6026 * 6027 * Context: Kernel thread context 6028 */ 6029 6030 static int 6031 sd_ddi_resume(dev_info_t *devi) 6032 { 6033 struct sd_lun *un; 6034 6035 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6036 if (un == NULL) { 6037 return (DDI_FAILURE); 6038 } 6039 6040 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6041 6042 mutex_enter(SD_MUTEX(un)); 6043 Restore_state(un); 6044 6045 /* 6046 * Restore the state which was saved to give the 6047 * the right state in un_last_state 6048 */ 6049 un->un_last_state = un->un_save_state; 6050 /* 6051 * Note: throttle comes back at full. 6052 * Also note: this MUST be done before calling pm_raise_power 6053 * otherwise the system can get hung in biowait. The scenario where 6054 * this'll happen is under cpr suspend. Writing of the system 6055 * state goes through sddump, which writes 0 to un_throttle. If 6056 * writing the system state then fails, example if the partition is 6057 * too small, then cpr attempts a resume. If throttle isn't restored 6058 * from the saved value until after calling pm_raise_power then 6059 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6060 * in biowait. 6061 */ 6062 un->un_throttle = un->un_saved_throttle; 6063 6064 /* 6065 * The chance of failure is very rare as the only command done in power 6066 * entry point is START command when you transition from 0->1 or 6067 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6068 * which suspend was done. Ignore the return value as the resume should 6069 * not be failed. In the case of removable media the media need not be 6070 * inserted and hence there is a chance that raise power will fail with 6071 * media not present. 6072 */ 6073 if (un->un_f_attach_spinup) { 6074 mutex_exit(SD_MUTEX(un)); 6075 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6076 mutex_enter(SD_MUTEX(un)); 6077 } 6078 6079 /* 6080 * Don't broadcast to the suspend cv and therefore possibly 6081 * start I/O until after power has been restored. 6082 */ 6083 cv_broadcast(&un->un_suspend_cv); 6084 cv_broadcast(&un->un_state_cv); 6085 6086 /* restart thread */ 6087 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6088 scsi_watch_resume(un->un_swr_token); 6089 } 6090 6091 #if (defined(__fibre)) 6092 if (un->un_f_is_fibre == TRUE) { 6093 /* 6094 * Add callbacks for insert and remove events 6095 */ 6096 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6097 sd_init_event_callbacks(un); 6098 } 6099 } 6100 #endif 6101 6102 /* 6103 * Transport any pending commands to the target. 6104 * 6105 * If this is a low-activity device commands in queue will have to wait 6106 * until new commands come in, which may take awhile. Also, we 6107 * specifically don't check un_ncmds_in_transport because we know that 6108 * there really are no commands in progress after the unit was 6109 * suspended and we could have reached the throttle level, been 6110 * suspended, and have no new commands coming in for awhile. Highly 6111 * unlikely, but so is the low-activity disk scenario. 6112 */ 6113 ddi_xbuf_dispatch(un->un_xbuf_attr); 6114 6115 sd_start_cmds(un, NULL); 6116 mutex_exit(SD_MUTEX(un)); 6117 6118 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6119 6120 return (DDI_SUCCESS); 6121 } 6122 6123 6124 /* 6125 * Function: sd_ddi_pm_resume 6126 * 6127 * Description: Set the drive state to powered on. 6128 * Someone else is required to actually change the drive 6129 * power level. 6130 * 6131 * Arguments: un - driver soft state (unit) structure 6132 * 6133 * Return Code: DDI_SUCCESS 6134 * 6135 * Context: Kernel thread context 6136 */ 6137 6138 static int 6139 sd_ddi_pm_resume(struct sd_lun *un) 6140 { 6141 ASSERT(un != NULL); 6142 6143 ASSERT(!mutex_owned(SD_MUTEX(un))); 6144 mutex_enter(SD_MUTEX(un)); 6145 un->un_power_level = SD_SPINDLE_ON; 6146 6147 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6148 mutex_enter(&un->un_pm_mutex); 6149 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6150 un->un_pm_count++; 6151 ASSERT(un->un_pm_count == 0); 6152 /* 6153 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6154 * un_suspend_cv is for a system resume, not a power management 6155 * device resume. (4297749) 6156 * cv_broadcast(&un->un_suspend_cv); 6157 */ 6158 } 6159 mutex_exit(&un->un_pm_mutex); 6160 mutex_exit(SD_MUTEX(un)); 6161 6162 return (DDI_SUCCESS); 6163 } 6164 6165 6166 /* 6167 * Function: sd_pm_idletimeout_handler 6168 * 6169 * Description: A timer routine that's active only while a device is busy. 6170 * The purpose is to extend slightly the pm framework's busy 6171 * view of the device to prevent busy/idle thrashing for 6172 * back-to-back commands. Do this by comparing the current time 6173 * to the time at which the last command completed and when the 6174 * difference is greater than sd_pm_idletime, call 6175 * pm_idle_component. In addition to indicating idle to the pm 6176 * framework, update the chain type to again use the internal pm 6177 * layers of the driver. 6178 * 6179 * Arguments: arg - driver soft state (unit) structure 6180 * 6181 * Context: Executes in a timeout(9F) thread context 6182 */ 6183 6184 static void 6185 sd_pm_idletimeout_handler(void *arg) 6186 { 6187 struct sd_lun *un = arg; 6188 6189 time_t now; 6190 6191 mutex_enter(&sd_detach_mutex); 6192 if (un->un_detach_count != 0) { 6193 /* Abort if the instance is detaching */ 6194 mutex_exit(&sd_detach_mutex); 6195 return; 6196 } 6197 mutex_exit(&sd_detach_mutex); 6198 6199 now = ddi_get_time(); 6200 /* 6201 * Grab both mutexes, in the proper order, since we're accessing 6202 * both PM and softstate variables. 6203 */ 6204 mutex_enter(SD_MUTEX(un)); 6205 mutex_enter(&un->un_pm_mutex); 6206 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6207 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6208 /* 6209 * Update the chain types. 6210 * This takes affect on the next new command received. 6211 */ 6212 if (un->un_f_non_devbsize_supported) { 6213 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6214 } else { 6215 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6216 } 6217 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6218 6219 SD_TRACE(SD_LOG_IO_PM, un, 6220 "sd_pm_idletimeout_handler: idling device\n"); 6221 (void) pm_idle_component(SD_DEVINFO(un), 0); 6222 un->un_pm_idle_timeid = NULL; 6223 } else { 6224 un->un_pm_idle_timeid = 6225 timeout(sd_pm_idletimeout_handler, un, 6226 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6227 } 6228 mutex_exit(&un->un_pm_mutex); 6229 mutex_exit(SD_MUTEX(un)); 6230 } 6231 6232 6233 /* 6234 * Function: sd_pm_timeout_handler 6235 * 6236 * Description: Callback to tell framework we are idle. 6237 * 6238 * Context: timeout(9f) thread context. 6239 */ 6240 6241 static void 6242 sd_pm_timeout_handler(void *arg) 6243 { 6244 struct sd_lun *un = arg; 6245 6246 (void) pm_idle_component(SD_DEVINFO(un), 0); 6247 mutex_enter(&un->un_pm_mutex); 6248 un->un_pm_timeid = NULL; 6249 mutex_exit(&un->un_pm_mutex); 6250 } 6251 6252 6253 /* 6254 * Function: sdpower 6255 * 6256 * Description: PM entry point. 6257 * 6258 * Return Code: DDI_SUCCESS 6259 * DDI_FAILURE 6260 * 6261 * Context: Kernel thread context 6262 */ 6263 6264 static int 6265 sdpower(dev_info_t *devi, int component, int level) 6266 { 6267 struct sd_lun *un; 6268 int instance; 6269 int rval = DDI_SUCCESS; 6270 uint_t i, log_page_size, maxcycles, ncycles; 6271 uchar_t *log_page_data; 6272 int log_sense_page; 6273 int medium_present; 6274 time_t intvlp; 6275 dev_t dev; 6276 struct pm_trans_data sd_pm_tran_data; 6277 uchar_t save_state; 6278 int sval; 6279 uchar_t state_before_pm; 6280 int got_semaphore_here; 6281 sd_ssc_t *ssc; 6282 6283 instance = ddi_get_instance(devi); 6284 6285 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6286 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6287 component != 0) { 6288 return (DDI_FAILURE); 6289 } 6290 6291 dev = sd_make_device(SD_DEVINFO(un)); 6292 ssc = sd_ssc_init(un); 6293 6294 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6295 6296 /* 6297 * Must synchronize power down with close. 6298 * Attempt to decrement/acquire the open/close semaphore, 6299 * but do NOT wait on it. If it's not greater than zero, 6300 * ie. it can't be decremented without waiting, then 6301 * someone else, either open or close, already has it 6302 * and the try returns 0. Use that knowledge here to determine 6303 * if it's OK to change the device power level. 6304 * Also, only increment it on exit if it was decremented, ie. gotten, 6305 * here. 6306 */ 6307 got_semaphore_here = sema_tryp(&un->un_semoclose); 6308 6309 mutex_enter(SD_MUTEX(un)); 6310 6311 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6312 un->un_ncmds_in_driver); 6313 6314 /* 6315 * If un_ncmds_in_driver is non-zero it indicates commands are 6316 * already being processed in the driver, or if the semaphore was 6317 * not gotten here it indicates an open or close is being processed. 6318 * At the same time somebody is requesting to go low power which 6319 * can't happen, therefore we need to return failure. 6320 */ 6321 if ((level == SD_SPINDLE_OFF) && 6322 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6323 mutex_exit(SD_MUTEX(un)); 6324 6325 if (got_semaphore_here != 0) { 6326 sema_v(&un->un_semoclose); 6327 } 6328 SD_TRACE(SD_LOG_IO_PM, un, 6329 "sdpower: exit, device has queued cmds.\n"); 6330 6331 goto sdpower_failed; 6332 } 6333 6334 /* 6335 * if it is OFFLINE that means the disk is completely dead 6336 * in our case we have to put the disk in on or off by sending commands 6337 * Of course that will fail anyway so return back here. 6338 * 6339 * Power changes to a device that's OFFLINE or SUSPENDED 6340 * are not allowed. 6341 */ 6342 if ((un->un_state == SD_STATE_OFFLINE) || 6343 (un->un_state == SD_STATE_SUSPENDED)) { 6344 mutex_exit(SD_MUTEX(un)); 6345 6346 if (got_semaphore_here != 0) { 6347 sema_v(&un->un_semoclose); 6348 } 6349 SD_TRACE(SD_LOG_IO_PM, un, 6350 "sdpower: exit, device is off-line.\n"); 6351 6352 goto sdpower_failed; 6353 } 6354 6355 /* 6356 * Change the device's state to indicate it's power level 6357 * is being changed. Do this to prevent a power off in the 6358 * middle of commands, which is especially bad on devices 6359 * that are really powered off instead of just spun down. 6360 */ 6361 state_before_pm = un->un_state; 6362 un->un_state = SD_STATE_PM_CHANGING; 6363 6364 mutex_exit(SD_MUTEX(un)); 6365 6366 /* 6367 * If "pm-capable" property is set to TRUE by HBA drivers, 6368 * bypass the following checking, otherwise, check the log 6369 * sense information for this device 6370 */ 6371 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6372 /* 6373 * Get the log sense information to understand whether the 6374 * the powercycle counts have gone beyond the threshhold. 6375 */ 6376 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6377 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6378 6379 mutex_enter(SD_MUTEX(un)); 6380 log_sense_page = un->un_start_stop_cycle_page; 6381 mutex_exit(SD_MUTEX(un)); 6382 6383 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6384 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6385 6386 if (rval != 0) { 6387 if (rval == EIO) 6388 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6389 else 6390 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6391 } 6392 6393 #ifdef SDDEBUG 6394 if (sd_force_pm_supported) { 6395 /* Force a successful result */ 6396 rval = 0; 6397 } 6398 #endif 6399 if (rval != 0) { 6400 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6401 "Log Sense Failed\n"); 6402 6403 kmem_free(log_page_data, log_page_size); 6404 /* Cannot support power management on those drives */ 6405 6406 if (got_semaphore_here != 0) { 6407 sema_v(&un->un_semoclose); 6408 } 6409 /* 6410 * On exit put the state back to it's original value 6411 * and broadcast to anyone waiting for the power 6412 * change completion. 6413 */ 6414 mutex_enter(SD_MUTEX(un)); 6415 un->un_state = state_before_pm; 6416 cv_broadcast(&un->un_suspend_cv); 6417 mutex_exit(SD_MUTEX(un)); 6418 SD_TRACE(SD_LOG_IO_PM, un, 6419 "sdpower: exit, Log Sense Failed.\n"); 6420 6421 goto sdpower_failed; 6422 } 6423 6424 /* 6425 * From the page data - Convert the essential information to 6426 * pm_trans_data 6427 */ 6428 maxcycles = 6429 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6430 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6431 6432 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6433 6434 ncycles = 6435 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6436 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6437 6438 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6439 6440 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6441 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6442 log_page_data[8+i]; 6443 } 6444 6445 kmem_free(log_page_data, log_page_size); 6446 6447 /* 6448 * Call pm_trans_check routine to get the Ok from 6449 * the global policy 6450 */ 6451 6452 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6453 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6454 6455 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6456 #ifdef SDDEBUG 6457 if (sd_force_pm_supported) { 6458 /* Force a successful result */ 6459 rval = 1; 6460 } 6461 #endif 6462 switch (rval) { 6463 case 0: 6464 /* 6465 * Not Ok to Power cycle or error in parameters passed 6466 * Would have given the advised time to consider power 6467 * cycle. Based on the new intvlp parameter we are 6468 * supposed to pretend we are busy so that pm framework 6469 * will never call our power entry point. Because of 6470 * that install a timeout handler and wait for the 6471 * recommended time to elapse so that power management 6472 * can be effective again. 6473 * 6474 * To effect this behavior, call pm_busy_component to 6475 * indicate to the framework this device is busy. 6476 * By not adjusting un_pm_count the rest of PM in 6477 * the driver will function normally, and independent 6478 * of this but because the framework is told the device 6479 * is busy it won't attempt powering down until it gets 6480 * a matching idle. The timeout handler sends this. 6481 * Note: sd_pm_entry can't be called here to do this 6482 * because sdpower may have been called as a result 6483 * of a call to pm_raise_power from within sd_pm_entry. 6484 * 6485 * If a timeout handler is already active then 6486 * don't install another. 6487 */ 6488 mutex_enter(&un->un_pm_mutex); 6489 if (un->un_pm_timeid == NULL) { 6490 un->un_pm_timeid = 6491 timeout(sd_pm_timeout_handler, 6492 un, intvlp * drv_usectohz(1000000)); 6493 mutex_exit(&un->un_pm_mutex); 6494 (void) pm_busy_component(SD_DEVINFO(un), 0); 6495 } else { 6496 mutex_exit(&un->un_pm_mutex); 6497 } 6498 if (got_semaphore_here != 0) { 6499 sema_v(&un->un_semoclose); 6500 } 6501 /* 6502 * On exit put the state back to it's original value 6503 * and broadcast to anyone waiting for the power 6504 * change completion. 6505 */ 6506 mutex_enter(SD_MUTEX(un)); 6507 un->un_state = state_before_pm; 6508 cv_broadcast(&un->un_suspend_cv); 6509 mutex_exit(SD_MUTEX(un)); 6510 6511 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6512 "trans check Failed, not ok to power cycle.\n"); 6513 6514 goto sdpower_failed; 6515 case -1: 6516 if (got_semaphore_here != 0) { 6517 sema_v(&un->un_semoclose); 6518 } 6519 /* 6520 * On exit put the state back to it's original value 6521 * and broadcast to anyone waiting for the power 6522 * change completion. 6523 */ 6524 mutex_enter(SD_MUTEX(un)); 6525 un->un_state = state_before_pm; 6526 cv_broadcast(&un->un_suspend_cv); 6527 mutex_exit(SD_MUTEX(un)); 6528 SD_TRACE(SD_LOG_IO_PM, un, 6529 "sdpower: exit, trans check command Failed.\n"); 6530 6531 goto sdpower_failed; 6532 } 6533 } 6534 6535 if (level == SD_SPINDLE_OFF) { 6536 /* 6537 * Save the last state... if the STOP FAILS we need it 6538 * for restoring 6539 */ 6540 mutex_enter(SD_MUTEX(un)); 6541 save_state = un->un_last_state; 6542 /* 6543 * There must not be any cmds. getting processed 6544 * in the driver when we get here. Power to the 6545 * device is potentially going off. 6546 */ 6547 ASSERT(un->un_ncmds_in_driver == 0); 6548 mutex_exit(SD_MUTEX(un)); 6549 6550 /* 6551 * For now suspend the device completely before spindle is 6552 * turned off 6553 */ 6554 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6555 if (got_semaphore_here != 0) { 6556 sema_v(&un->un_semoclose); 6557 } 6558 /* 6559 * On exit put the state back to it's original value 6560 * and broadcast to anyone waiting for the power 6561 * change completion. 6562 */ 6563 mutex_enter(SD_MUTEX(un)); 6564 un->un_state = state_before_pm; 6565 cv_broadcast(&un->un_suspend_cv); 6566 mutex_exit(SD_MUTEX(un)); 6567 SD_TRACE(SD_LOG_IO_PM, un, 6568 "sdpower: exit, PM suspend Failed.\n"); 6569 6570 goto sdpower_failed; 6571 } 6572 } 6573 6574 /* 6575 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6576 * close, or strategy. Dump no long uses this routine, it uses it's 6577 * own code so it can be done in polled mode. 6578 */ 6579 6580 medium_present = TRUE; 6581 6582 /* 6583 * When powering up, issue a TUR in case the device is at unit 6584 * attention. Don't do retries. Bypass the PM layer, otherwise 6585 * a deadlock on un_pm_busy_cv will occur. 6586 */ 6587 if (level == SD_SPINDLE_ON) { 6588 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6589 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6590 if (sval != 0) 6591 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6592 } 6593 6594 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6595 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6596 6597 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6598 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6599 SD_PATH_DIRECT); 6600 if (sval != 0) { 6601 if (sval == EIO) 6602 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6603 else 6604 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6605 } 6606 6607 /* Command failed, check for media present. */ 6608 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6609 medium_present = FALSE; 6610 } 6611 6612 /* 6613 * The conditions of interest here are: 6614 * if a spindle off with media present fails, 6615 * then restore the state and return an error. 6616 * else if a spindle on fails, 6617 * then return an error (there's no state to restore). 6618 * In all other cases we setup for the new state 6619 * and return success. 6620 */ 6621 switch (level) { 6622 case SD_SPINDLE_OFF: 6623 if ((medium_present == TRUE) && (sval != 0)) { 6624 /* The stop command from above failed */ 6625 rval = DDI_FAILURE; 6626 /* 6627 * The stop command failed, and we have media 6628 * present. Put the level back by calling the 6629 * sd_pm_resume() and set the state back to 6630 * it's previous value. 6631 */ 6632 (void) sd_ddi_pm_resume(un); 6633 mutex_enter(SD_MUTEX(un)); 6634 un->un_last_state = save_state; 6635 mutex_exit(SD_MUTEX(un)); 6636 break; 6637 } 6638 /* 6639 * The stop command from above succeeded. 6640 */ 6641 if (un->un_f_monitor_media_state) { 6642 /* 6643 * Terminate watch thread in case of removable media 6644 * devices going into low power state. This is as per 6645 * the requirements of pm framework, otherwise commands 6646 * will be generated for the device (through watch 6647 * thread), even when the device is in low power state. 6648 */ 6649 mutex_enter(SD_MUTEX(un)); 6650 un->un_f_watcht_stopped = FALSE; 6651 if (un->un_swr_token != NULL) { 6652 opaque_t temp_token = un->un_swr_token; 6653 un->un_f_watcht_stopped = TRUE; 6654 un->un_swr_token = NULL; 6655 mutex_exit(SD_MUTEX(un)); 6656 (void) scsi_watch_request_terminate(temp_token, 6657 SCSI_WATCH_TERMINATE_ALL_WAIT); 6658 } else { 6659 mutex_exit(SD_MUTEX(un)); 6660 } 6661 } 6662 break; 6663 6664 default: /* The level requested is spindle on... */ 6665 /* 6666 * Legacy behavior: return success on a failed spinup 6667 * if there is no media in the drive. 6668 * Do this by looking at medium_present here. 6669 */ 6670 if ((sval != 0) && medium_present) { 6671 /* The start command from above failed */ 6672 rval = DDI_FAILURE; 6673 break; 6674 } 6675 /* 6676 * The start command from above succeeded 6677 * Resume the devices now that we have 6678 * started the disks 6679 */ 6680 (void) sd_ddi_pm_resume(un); 6681 6682 /* 6683 * Resume the watch thread since it was suspended 6684 * when the device went into low power mode. 6685 */ 6686 if (un->un_f_monitor_media_state) { 6687 mutex_enter(SD_MUTEX(un)); 6688 if (un->un_f_watcht_stopped == TRUE) { 6689 opaque_t temp_token; 6690 6691 un->un_f_watcht_stopped = FALSE; 6692 mutex_exit(SD_MUTEX(un)); 6693 temp_token = scsi_watch_request_submit( 6694 SD_SCSI_DEVP(un), 6695 sd_check_media_time, 6696 SENSE_LENGTH, sd_media_watch_cb, 6697 (caddr_t)dev); 6698 mutex_enter(SD_MUTEX(un)); 6699 un->un_swr_token = temp_token; 6700 } 6701 mutex_exit(SD_MUTEX(un)); 6702 } 6703 } 6704 if (got_semaphore_here != 0) { 6705 sema_v(&un->un_semoclose); 6706 } 6707 /* 6708 * On exit put the state back to it's original value 6709 * and broadcast to anyone waiting for the power 6710 * change completion. 6711 */ 6712 mutex_enter(SD_MUTEX(un)); 6713 un->un_state = state_before_pm; 6714 cv_broadcast(&un->un_suspend_cv); 6715 mutex_exit(SD_MUTEX(un)); 6716 6717 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6718 6719 sd_ssc_fini(ssc); 6720 return (rval); 6721 6722 sdpower_failed: 6723 6724 sd_ssc_fini(ssc); 6725 return (DDI_FAILURE); 6726 } 6727 6728 6729 6730 /* 6731 * Function: sdattach 6732 * 6733 * Description: Driver's attach(9e) entry point function. 6734 * 6735 * Arguments: devi - opaque device info handle 6736 * cmd - attach type 6737 * 6738 * Return Code: DDI_SUCCESS 6739 * DDI_FAILURE 6740 * 6741 * Context: Kernel thread context 6742 */ 6743 6744 static int 6745 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6746 { 6747 switch (cmd) { 6748 case DDI_ATTACH: 6749 return (sd_unit_attach(devi)); 6750 case DDI_RESUME: 6751 return (sd_ddi_resume(devi)); 6752 default: 6753 break; 6754 } 6755 return (DDI_FAILURE); 6756 } 6757 6758 6759 /* 6760 * Function: sddetach 6761 * 6762 * Description: Driver's detach(9E) entry point function. 6763 * 6764 * Arguments: devi - opaque device info handle 6765 * cmd - detach type 6766 * 6767 * Return Code: DDI_SUCCESS 6768 * DDI_FAILURE 6769 * 6770 * Context: Kernel thread context 6771 */ 6772 6773 static int 6774 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6775 { 6776 switch (cmd) { 6777 case DDI_DETACH: 6778 return (sd_unit_detach(devi)); 6779 case DDI_SUSPEND: 6780 return (sd_ddi_suspend(devi)); 6781 default: 6782 break; 6783 } 6784 return (DDI_FAILURE); 6785 } 6786 6787 6788 /* 6789 * Function: sd_sync_with_callback 6790 * 6791 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6792 * state while the callback routine is active. 6793 * 6794 * Arguments: un: softstate structure for the instance 6795 * 6796 * Context: Kernel thread context 6797 */ 6798 6799 static void 6800 sd_sync_with_callback(struct sd_lun *un) 6801 { 6802 ASSERT(un != NULL); 6803 6804 mutex_enter(SD_MUTEX(un)); 6805 6806 ASSERT(un->un_in_callback >= 0); 6807 6808 while (un->un_in_callback > 0) { 6809 mutex_exit(SD_MUTEX(un)); 6810 delay(2); 6811 mutex_enter(SD_MUTEX(un)); 6812 } 6813 6814 mutex_exit(SD_MUTEX(un)); 6815 } 6816 6817 /* 6818 * Function: sd_unit_attach 6819 * 6820 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6821 * the soft state structure for the device and performs 6822 * all necessary structure and device initializations. 6823 * 6824 * Arguments: devi: the system's dev_info_t for the device. 6825 * 6826 * Return Code: DDI_SUCCESS if attach is successful. 6827 * DDI_FAILURE if any part of the attach fails. 6828 * 6829 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6830 * Kernel thread context only. Can sleep. 6831 */ 6832 6833 static int 6834 sd_unit_attach(dev_info_t *devi) 6835 { 6836 struct scsi_device *devp; 6837 struct sd_lun *un; 6838 char *variantp; 6839 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6840 int instance; 6841 int rval; 6842 int wc_enabled; 6843 int tgt; 6844 uint64_t capacity; 6845 uint_t lbasize = 0; 6846 dev_info_t *pdip = ddi_get_parent(devi); 6847 int offbyone = 0; 6848 int geom_label_valid = 0; 6849 sd_ssc_t *ssc; 6850 int status; 6851 struct sd_fm_internal *sfip = NULL; 6852 int max_xfer_size; 6853 6854 /* 6855 * Retrieve the target driver's private data area. This was set 6856 * up by the HBA. 6857 */ 6858 devp = ddi_get_driver_private(devi); 6859 6860 /* 6861 * Retrieve the target ID of the device. 6862 */ 6863 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6864 SCSI_ADDR_PROP_TARGET, -1); 6865 6866 /* 6867 * Since we have no idea what state things were left in by the last 6868 * user of the device, set up some 'default' settings, ie. turn 'em 6869 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6870 * Do this before the scsi_probe, which sends an inquiry. 6871 * This is a fix for bug (4430280). 6872 * Of special importance is wide-xfer. The drive could have been left 6873 * in wide transfer mode by the last driver to communicate with it, 6874 * this includes us. If that's the case, and if the following is not 6875 * setup properly or we don't re-negotiate with the drive prior to 6876 * transferring data to/from the drive, it causes bus parity errors, 6877 * data overruns, and unexpected interrupts. This first occurred when 6878 * the fix for bug (4378686) was made. 6879 */ 6880 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6881 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6882 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6883 6884 /* 6885 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6886 * on a target. Setting it per lun instance actually sets the 6887 * capability of this target, which affects those luns already 6888 * attached on the same target. So during attach, we can only disable 6889 * this capability only when no other lun has been attached on this 6890 * target. By doing this, we assume a target has the same tagged-qing 6891 * capability for every lun. The condition can be removed when HBA 6892 * is changed to support per lun based tagged-qing capability. 6893 */ 6894 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6895 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6896 } 6897 6898 /* 6899 * Use scsi_probe() to issue an INQUIRY command to the device. 6900 * This call will allocate and fill in the scsi_inquiry structure 6901 * and point the sd_inq member of the scsi_device structure to it. 6902 * If the attach succeeds, then this memory will not be de-allocated 6903 * (via scsi_unprobe()) until the instance is detached. 6904 */ 6905 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6906 goto probe_failed; 6907 } 6908 6909 /* 6910 * Check the device type as specified in the inquiry data and 6911 * claim it if it is of a type that we support. 6912 */ 6913 switch (devp->sd_inq->inq_dtype) { 6914 case DTYPE_DIRECT: 6915 break; 6916 case DTYPE_RODIRECT: 6917 break; 6918 case DTYPE_OPTICAL: 6919 break; 6920 case DTYPE_NOTPRESENT: 6921 default: 6922 /* Unsupported device type; fail the attach. */ 6923 goto probe_failed; 6924 } 6925 6926 /* 6927 * Allocate the soft state structure for this unit. 6928 * 6929 * We rely upon this memory being set to all zeroes by 6930 * ddi_soft_state_zalloc(). We assume that any member of the 6931 * soft state structure that is not explicitly initialized by 6932 * this routine will have a value of zero. 6933 */ 6934 instance = ddi_get_instance(devp->sd_dev); 6935 #ifndef XPV_HVM_DRIVER 6936 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6937 goto probe_failed; 6938 } 6939 #endif /* !XPV_HVM_DRIVER */ 6940 6941 /* 6942 * Retrieve a pointer to the newly-allocated soft state. 6943 * 6944 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6945 * was successful, unless something has gone horribly wrong and the 6946 * ddi's soft state internals are corrupt (in which case it is 6947 * probably better to halt here than just fail the attach....) 6948 */ 6949 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6950 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6951 instance); 6952 /*NOTREACHED*/ 6953 } 6954 6955 /* 6956 * Link the back ptr of the driver soft state to the scsi_device 6957 * struct for this lun. 6958 * Save a pointer to the softstate in the driver-private area of 6959 * the scsi_device struct. 6960 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6961 * we first set un->un_sd below. 6962 */ 6963 un->un_sd = devp; 6964 devp->sd_private = (opaque_t)un; 6965 6966 /* 6967 * The following must be after devp is stored in the soft state struct. 6968 */ 6969 #ifdef SDDEBUG 6970 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6971 "%s_unit_attach: un:0x%p instance:%d\n", 6972 ddi_driver_name(devi), un, instance); 6973 #endif 6974 6975 /* 6976 * Set up the device type and node type (for the minor nodes). 6977 * By default we assume that the device can at least support the 6978 * Common Command Set. Call it a CD-ROM if it reports itself 6979 * as a RODIRECT device. 6980 */ 6981 switch (devp->sd_inq->inq_dtype) { 6982 case DTYPE_RODIRECT: 6983 un->un_node_type = DDI_NT_CD_CHAN; 6984 un->un_ctype = CTYPE_CDROM; 6985 break; 6986 case DTYPE_OPTICAL: 6987 un->un_node_type = DDI_NT_BLOCK_CHAN; 6988 un->un_ctype = CTYPE_ROD; 6989 break; 6990 default: 6991 un->un_node_type = DDI_NT_BLOCK_CHAN; 6992 un->un_ctype = CTYPE_CCS; 6993 break; 6994 } 6995 6996 /* 6997 * Try to read the interconnect type from the HBA. 6998 * 6999 * Note: This driver is currently compiled as two binaries, a parallel 7000 * scsi version (sd) and a fibre channel version (ssd). All functional 7001 * differences are determined at compile time. In the future a single 7002 * binary will be provided and the interconnect type will be used to 7003 * differentiate between fibre and parallel scsi behaviors. At that time 7004 * it will be necessary for all fibre channel HBAs to support this 7005 * property. 7006 * 7007 * set un_f_is_fiber to TRUE ( default fiber ) 7008 */ 7009 un->un_f_is_fibre = TRUE; 7010 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7011 case INTERCONNECT_SSA: 7012 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7013 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7014 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7015 break; 7016 case INTERCONNECT_PARALLEL: 7017 un->un_f_is_fibre = FALSE; 7018 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7020 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7021 break; 7022 case INTERCONNECT_SATA: 7023 un->un_f_is_fibre = FALSE; 7024 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7025 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7026 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7027 break; 7028 case INTERCONNECT_FIBRE: 7029 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7030 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7031 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7032 break; 7033 case INTERCONNECT_FABRIC: 7034 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7035 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7036 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7037 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7038 break; 7039 default: 7040 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7041 /* 7042 * The HBA does not support the "interconnect-type" property 7043 * (or did not provide a recognized type). 7044 * 7045 * Note: This will be obsoleted when a single fibre channel 7046 * and parallel scsi driver is delivered. In the meantime the 7047 * interconnect type will be set to the platform default.If that 7048 * type is not parallel SCSI, it means that we should be 7049 * assuming "ssd" semantics. However, here this also means that 7050 * the FC HBA is not supporting the "interconnect-type" property 7051 * like we expect it to, so log this occurrence. 7052 */ 7053 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7054 if (!SD_IS_PARALLEL_SCSI(un)) { 7055 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7056 "sd_unit_attach: un:0x%p Assuming " 7057 "INTERCONNECT_FIBRE\n", un); 7058 } else { 7059 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7060 "sd_unit_attach: un:0x%p Assuming " 7061 "INTERCONNECT_PARALLEL\n", un); 7062 un->un_f_is_fibre = FALSE; 7063 } 7064 #else 7065 /* 7066 * Note: This source will be implemented when a single fibre 7067 * channel and parallel scsi driver is delivered. The default 7068 * will be to assume that if a device does not support the 7069 * "interconnect-type" property it is a parallel SCSI HBA and 7070 * we will set the interconnect type for parallel scsi. 7071 */ 7072 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7073 un->un_f_is_fibre = FALSE; 7074 #endif 7075 break; 7076 } 7077 7078 if (un->un_f_is_fibre == TRUE) { 7079 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7080 SCSI_VERSION_3) { 7081 switch (un->un_interconnect_type) { 7082 case SD_INTERCONNECT_FIBRE: 7083 case SD_INTERCONNECT_SSA: 7084 un->un_node_type = DDI_NT_BLOCK_WWN; 7085 break; 7086 default: 7087 break; 7088 } 7089 } 7090 } 7091 7092 /* 7093 * Initialize the Request Sense command for the target 7094 */ 7095 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7096 goto alloc_rqs_failed; 7097 } 7098 7099 /* 7100 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7101 * with separate binary for sd and ssd. 7102 * 7103 * x86 has 1 binary, un_retry_count is set base on connection type. 7104 * The hardcoded values will go away when Sparc uses 1 binary 7105 * for sd and ssd. This hardcoded values need to match 7106 * SD_RETRY_COUNT in sddef.h 7107 * The value used is base on interconnect type. 7108 * fibre = 3, parallel = 5 7109 */ 7110 #if defined(__i386) || defined(__amd64) 7111 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7112 #else 7113 un->un_retry_count = SD_RETRY_COUNT; 7114 #endif 7115 7116 /* 7117 * Set the per disk retry count to the default number of retries 7118 * for disks and CDROMs. This value can be overridden by the 7119 * disk property list or an entry in sd.conf. 7120 */ 7121 un->un_notready_retry_count = 7122 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7123 : DISK_NOT_READY_RETRY_COUNT(un); 7124 7125 /* 7126 * Set the busy retry count to the default value of un_retry_count. 7127 * This can be overridden by entries in sd.conf or the device 7128 * config table. 7129 */ 7130 un->un_busy_retry_count = un->un_retry_count; 7131 7132 /* 7133 * Init the reset threshold for retries. This number determines 7134 * how many retries must be performed before a reset can be issued 7135 * (for certain error conditions). This can be overridden by entries 7136 * in sd.conf or the device config table. 7137 */ 7138 un->un_reset_retry_count = (un->un_retry_count / 2); 7139 7140 /* 7141 * Set the victim_retry_count to the default un_retry_count 7142 */ 7143 un->un_victim_retry_count = (2 * un->un_retry_count); 7144 7145 /* 7146 * Set the reservation release timeout to the default value of 7147 * 5 seconds. This can be overridden by entries in ssd.conf or the 7148 * device config table. 7149 */ 7150 un->un_reserve_release_time = 5; 7151 7152 /* 7153 * Set up the default maximum transfer size. Note that this may 7154 * get updated later in the attach, when setting up default wide 7155 * operations for disks. 7156 */ 7157 #if defined(__i386) || defined(__amd64) 7158 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7159 un->un_partial_dma_supported = 1; 7160 #else 7161 un->un_max_xfer_size = (uint_t)maxphys; 7162 #endif 7163 7164 /* 7165 * Get "allow bus device reset" property (defaults to "enabled" if 7166 * the property was not defined). This is to disable bus resets for 7167 * certain kinds of error recovery. Note: In the future when a run-time 7168 * fibre check is available the soft state flag should default to 7169 * enabled. 7170 */ 7171 if (un->un_f_is_fibre == TRUE) { 7172 un->un_f_allow_bus_device_reset = TRUE; 7173 } else { 7174 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7175 "allow-bus-device-reset", 1) != 0) { 7176 un->un_f_allow_bus_device_reset = TRUE; 7177 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7178 "sd_unit_attach: un:0x%p Bus device reset " 7179 "enabled\n", un); 7180 } else { 7181 un->un_f_allow_bus_device_reset = FALSE; 7182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7183 "sd_unit_attach: un:0x%p Bus device reset " 7184 "disabled\n", un); 7185 } 7186 } 7187 7188 /* 7189 * Check if this is an ATAPI device. ATAPI devices use Group 1 7190 * Read/Write commands and Group 2 Mode Sense/Select commands. 7191 * 7192 * Note: The "obsolete" way of doing this is to check for the "atapi" 7193 * property. The new "variant" property with a value of "atapi" has been 7194 * introduced so that future 'variants' of standard SCSI behavior (like 7195 * atapi) could be specified by the underlying HBA drivers by supplying 7196 * a new value for the "variant" property, instead of having to define a 7197 * new property. 7198 */ 7199 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7200 un->un_f_cfg_is_atapi = TRUE; 7201 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7202 "sd_unit_attach: un:0x%p Atapi device\n", un); 7203 } 7204 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7205 &variantp) == DDI_PROP_SUCCESS) { 7206 if (strcmp(variantp, "atapi") == 0) { 7207 un->un_f_cfg_is_atapi = TRUE; 7208 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7209 "sd_unit_attach: un:0x%p Atapi device\n", un); 7210 } 7211 ddi_prop_free(variantp); 7212 } 7213 7214 un->un_cmd_timeout = SD_IO_TIME; 7215 7216 un->un_busy_timeout = SD_BSY_TIMEOUT; 7217 7218 /* Info on current states, statuses, etc. (Updated frequently) */ 7219 un->un_state = SD_STATE_NORMAL; 7220 un->un_last_state = SD_STATE_NORMAL; 7221 7222 /* Control & status info for command throttling */ 7223 un->un_throttle = sd_max_throttle; 7224 un->un_saved_throttle = sd_max_throttle; 7225 un->un_min_throttle = sd_min_throttle; 7226 7227 if (un->un_f_is_fibre == TRUE) { 7228 un->un_f_use_adaptive_throttle = TRUE; 7229 } else { 7230 un->un_f_use_adaptive_throttle = FALSE; 7231 } 7232 7233 /* Removable media support. */ 7234 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7235 un->un_mediastate = DKIO_NONE; 7236 un->un_specified_mediastate = DKIO_NONE; 7237 7238 /* CVs for suspend/resume (PM or DR) */ 7239 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7240 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7241 7242 /* Power management support. */ 7243 un->un_power_level = SD_SPINDLE_UNINIT; 7244 7245 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7246 un->un_f_wcc_inprog = 0; 7247 7248 /* 7249 * The open/close semaphore is used to serialize threads executing 7250 * in the driver's open & close entry point routines for a given 7251 * instance. 7252 */ 7253 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7254 7255 /* 7256 * The conf file entry and softstate variable is a forceful override, 7257 * meaning a non-zero value must be entered to change the default. 7258 */ 7259 un->un_f_disksort_disabled = FALSE; 7260 7261 /* 7262 * Retrieve the properties from the static driver table or the driver 7263 * configuration file (.conf) for this unit and update the soft state 7264 * for the device as needed for the indicated properties. 7265 * Note: the property configuration needs to occur here as some of the 7266 * following routines may have dependencies on soft state flags set 7267 * as part of the driver property configuration. 7268 */ 7269 sd_read_unit_properties(un); 7270 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7271 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7272 7273 /* 7274 * Only if a device has "hotpluggable" property, it is 7275 * treated as hotpluggable device. Otherwise, it is 7276 * regarded as non-hotpluggable one. 7277 */ 7278 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7279 -1) != -1) { 7280 un->un_f_is_hotpluggable = TRUE; 7281 } 7282 7283 /* 7284 * set unit's attributes(flags) according to "hotpluggable" and 7285 * RMB bit in INQUIRY data. 7286 */ 7287 sd_set_unit_attributes(un, devi); 7288 7289 /* 7290 * By default, we mark the capacity, lbasize, and geometry 7291 * as invalid. Only if we successfully read a valid capacity 7292 * will we update the un_blockcount and un_tgt_blocksize with the 7293 * valid values (the geometry will be validated later). 7294 */ 7295 un->un_f_blockcount_is_valid = FALSE; 7296 un->un_f_tgt_blocksize_is_valid = FALSE; 7297 7298 /* 7299 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7300 * otherwise. 7301 */ 7302 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7303 un->un_blockcount = 0; 7304 7305 /* 7306 * Set up the per-instance info needed to determine the correct 7307 * CDBs and other info for issuing commands to the target. 7308 */ 7309 sd_init_cdb_limits(un); 7310 7311 /* 7312 * Set up the IO chains to use, based upon the target type. 7313 */ 7314 if (un->un_f_non_devbsize_supported) { 7315 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7316 } else { 7317 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7318 } 7319 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7320 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7321 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7322 7323 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7324 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7325 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7326 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7327 7328 7329 if (ISCD(un)) { 7330 un->un_additional_codes = sd_additional_codes; 7331 } else { 7332 un->un_additional_codes = NULL; 7333 } 7334 7335 /* 7336 * Create the kstats here so they can be available for attach-time 7337 * routines that send commands to the unit (either polled or via 7338 * sd_send_scsi_cmd). 7339 * 7340 * Note: This is a critical sequence that needs to be maintained: 7341 * 1) Instantiate the kstats here, before any routines using the 7342 * iopath (i.e. sd_send_scsi_cmd). 7343 * 2) Instantiate and initialize the partition stats 7344 * (sd_set_pstats). 7345 * 3) Initialize the error stats (sd_set_errstats), following 7346 * sd_validate_geometry(),sd_register_devid(), 7347 * and sd_cache_control(). 7348 */ 7349 7350 un->un_stats = kstat_create(sd_label, instance, 7351 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7352 if (un->un_stats != NULL) { 7353 un->un_stats->ks_lock = SD_MUTEX(un); 7354 kstat_install(un->un_stats); 7355 } 7356 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7357 "sd_unit_attach: un:0x%p un_stats created\n", un); 7358 7359 sd_create_errstats(un, instance); 7360 if (un->un_errstats == NULL) { 7361 goto create_errstats_failed; 7362 } 7363 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7364 "sd_unit_attach: un:0x%p errstats created\n", un); 7365 7366 /* 7367 * The following if/else code was relocated here from below as part 7368 * of the fix for bug (4430280). However with the default setup added 7369 * on entry to this routine, it's no longer absolutely necessary for 7370 * this to be before the call to sd_spin_up_unit. 7371 */ 7372 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7373 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7374 (devp->sd_inq->inq_ansi == 5)) && 7375 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7376 7377 /* 7378 * If tagged queueing is supported by the target 7379 * and by the host adapter then we will enable it 7380 */ 7381 un->un_tagflags = 0; 7382 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7383 (un->un_f_arq_enabled == TRUE)) { 7384 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7385 1, 1) == 1) { 7386 un->un_tagflags = FLAG_STAG; 7387 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7388 "sd_unit_attach: un:0x%p tag queueing " 7389 "enabled\n", un); 7390 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7391 "untagged-qing", 0) == 1) { 7392 un->un_f_opt_queueing = TRUE; 7393 un->un_saved_throttle = un->un_throttle = 7394 min(un->un_throttle, 3); 7395 } else { 7396 un->un_f_opt_queueing = FALSE; 7397 un->un_saved_throttle = un->un_throttle = 1; 7398 } 7399 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7400 == 1) && (un->un_f_arq_enabled == TRUE)) { 7401 /* The Host Adapter supports internal queueing. */ 7402 un->un_f_opt_queueing = TRUE; 7403 un->un_saved_throttle = un->un_throttle = 7404 min(un->un_throttle, 3); 7405 } else { 7406 un->un_f_opt_queueing = FALSE; 7407 un->un_saved_throttle = un->un_throttle = 1; 7408 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7409 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7410 } 7411 7412 /* 7413 * Enable large transfers for SATA/SAS drives 7414 */ 7415 if (SD_IS_SERIAL(un)) { 7416 un->un_max_xfer_size = 7417 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7418 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7419 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7420 "sd_unit_attach: un:0x%p max transfer " 7421 "size=0x%x\n", un, un->un_max_xfer_size); 7422 7423 } 7424 7425 /* Setup or tear down default wide operations for disks */ 7426 7427 /* 7428 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7429 * and "ssd_max_xfer_size" to exist simultaneously on the same 7430 * system and be set to different values. In the future this 7431 * code may need to be updated when the ssd module is 7432 * obsoleted and removed from the system. (4299588) 7433 */ 7434 if (SD_IS_PARALLEL_SCSI(un) && 7435 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7436 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7437 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7438 1, 1) == 1) { 7439 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7440 "sd_unit_attach: un:0x%p Wide Transfer " 7441 "enabled\n", un); 7442 } 7443 7444 /* 7445 * If tagged queuing has also been enabled, then 7446 * enable large xfers 7447 */ 7448 if (un->un_saved_throttle == sd_max_throttle) { 7449 un->un_max_xfer_size = 7450 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7451 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7452 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7453 "sd_unit_attach: un:0x%p max transfer " 7454 "size=0x%x\n", un, un->un_max_xfer_size); 7455 } 7456 } else { 7457 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7458 0, 1) == 1) { 7459 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7460 "sd_unit_attach: un:0x%p " 7461 "Wide Transfer disabled\n", un); 7462 } 7463 } 7464 } else { 7465 un->un_tagflags = FLAG_STAG; 7466 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7467 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7468 } 7469 7470 /* 7471 * If this target supports LUN reset, try to enable it. 7472 */ 7473 if (un->un_f_lun_reset_enabled) { 7474 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7475 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7476 "un:0x%p lun_reset capability set\n", un); 7477 } else { 7478 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7479 "un:0x%p lun-reset capability not set\n", un); 7480 } 7481 } 7482 7483 /* 7484 * Adjust the maximum transfer size. This is to fix 7485 * the problem of partial DMA support on SPARC. Some 7486 * HBA driver, like aac, has very small dma_attr_maxxfer 7487 * size, which requires partial DMA support on SPARC. 7488 * In the future the SPARC pci nexus driver may solve 7489 * the problem instead of this fix. 7490 */ 7491 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7492 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7493 /* We need DMA partial even on sparc to ensure sddump() works */ 7494 un->un_max_xfer_size = max_xfer_size; 7495 if (un->un_partial_dma_supported == 0) 7496 un->un_partial_dma_supported = 1; 7497 } 7498 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7499 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7500 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7501 un->un_max_xfer_size) == 1) { 7502 un->un_buf_breakup_supported = 1; 7503 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7504 "un:0x%p Buf breakup enabled\n", un); 7505 } 7506 } 7507 7508 /* 7509 * Set PKT_DMA_PARTIAL flag. 7510 */ 7511 if (un->un_partial_dma_supported == 1) { 7512 un->un_pkt_flags = PKT_DMA_PARTIAL; 7513 } else { 7514 un->un_pkt_flags = 0; 7515 } 7516 7517 /* Initialize sd_ssc_t for internal uscsi commands */ 7518 ssc = sd_ssc_init(un); 7519 scsi_fm_init(devp); 7520 7521 /* 7522 * Allocate memory for SCSI FMA stuffs. 7523 */ 7524 un->un_fm_private = 7525 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7526 sfip = (struct sd_fm_internal *)un->un_fm_private; 7527 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7528 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7529 sfip->fm_ssc.ssc_un = un; 7530 7531 if (ISCD(un) || 7532 un->un_f_has_removable_media || 7533 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7534 /* 7535 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7536 * Their log are unchanged. 7537 */ 7538 sfip->fm_log_level = SD_FM_LOG_NSUP; 7539 } else { 7540 /* 7541 * If enter here, it should be non-CDROM and FM-capable 7542 * device, and it will not keep the old scsi_log as before 7543 * in /var/adm/messages. However, the property 7544 * "fm-scsi-log" will control whether the FM telemetry will 7545 * be logged in /var/adm/messages. 7546 */ 7547 int fm_scsi_log; 7548 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7549 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7550 7551 if (fm_scsi_log) 7552 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7553 else 7554 sfip->fm_log_level = SD_FM_LOG_SILENT; 7555 } 7556 7557 /* 7558 * At this point in the attach, we have enough info in the 7559 * soft state to be able to issue commands to the target. 7560 * 7561 * All command paths used below MUST issue their commands as 7562 * SD_PATH_DIRECT. This is important as intermediate layers 7563 * are not all initialized yet (such as PM). 7564 */ 7565 7566 /* 7567 * Send a TEST UNIT READY command to the device. This should clear 7568 * any outstanding UNIT ATTENTION that may be present. 7569 * 7570 * Note: Don't check for success, just track if there is a reservation, 7571 * this is a throw away command to clear any unit attentions. 7572 * 7573 * Note: This MUST be the first command issued to the target during 7574 * attach to ensure power on UNIT ATTENTIONS are cleared. 7575 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7576 * with attempts at spinning up a device with no media. 7577 */ 7578 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7579 if (status != 0) { 7580 if (status == EACCES) 7581 reservation_flag = SD_TARGET_IS_RESERVED; 7582 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7583 } 7584 7585 /* 7586 * If the device is NOT a removable media device, attempt to spin 7587 * it up (using the START_STOP_UNIT command) and read its capacity 7588 * (using the READ CAPACITY command). Note, however, that either 7589 * of these could fail and in some cases we would continue with 7590 * the attach despite the failure (see below). 7591 */ 7592 if (un->un_f_descr_format_supported) { 7593 7594 switch (sd_spin_up_unit(ssc)) { 7595 case 0: 7596 /* 7597 * Spin-up was successful; now try to read the 7598 * capacity. If successful then save the results 7599 * and mark the capacity & lbasize as valid. 7600 */ 7601 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7602 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7603 7604 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7605 &lbasize, SD_PATH_DIRECT); 7606 7607 switch (status) { 7608 case 0: { 7609 if (capacity > DK_MAX_BLOCKS) { 7610 #ifdef _LP64 7611 if ((capacity + 1) > 7612 SD_GROUP1_MAX_ADDRESS) { 7613 /* 7614 * Enable descriptor format 7615 * sense data so that we can 7616 * get 64 bit sense data 7617 * fields. 7618 */ 7619 sd_enable_descr_sense(ssc); 7620 } 7621 #else 7622 /* 32-bit kernels can't handle this */ 7623 scsi_log(SD_DEVINFO(un), 7624 sd_label, CE_WARN, 7625 "disk has %llu blocks, which " 7626 "is too large for a 32-bit " 7627 "kernel", capacity); 7628 7629 #if defined(__i386) || defined(__amd64) 7630 /* 7631 * 1TB disk was treated as (1T - 512)B 7632 * in the past, so that it might have 7633 * valid VTOC and solaris partitions, 7634 * we have to allow it to continue to 7635 * work. 7636 */ 7637 if (capacity -1 > DK_MAX_BLOCKS) 7638 #endif 7639 goto spinup_failed; 7640 #endif 7641 } 7642 7643 /* 7644 * Here it's not necessary to check the case: 7645 * the capacity of the device is bigger than 7646 * what the max hba cdb can support. Because 7647 * sd_send_scsi_READ_CAPACITY will retrieve 7648 * the capacity by sending USCSI command, which 7649 * is constrained by the max hba cdb. Actually, 7650 * sd_send_scsi_READ_CAPACITY will return 7651 * EINVAL when using bigger cdb than required 7652 * cdb length. Will handle this case in 7653 * "case EINVAL". 7654 */ 7655 7656 /* 7657 * The following relies on 7658 * sd_send_scsi_READ_CAPACITY never 7659 * returning 0 for capacity and/or lbasize. 7660 */ 7661 sd_update_block_info(un, lbasize, capacity); 7662 7663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7664 "sd_unit_attach: un:0x%p capacity = %ld " 7665 "blocks; lbasize= %ld.\n", un, 7666 un->un_blockcount, un->un_tgt_blocksize); 7667 7668 break; 7669 } 7670 case EINVAL: 7671 /* 7672 * In the case where the max-cdb-length property 7673 * is smaller than the required CDB length for 7674 * a SCSI device, a target driver can fail to 7675 * attach to that device. 7676 */ 7677 scsi_log(SD_DEVINFO(un), 7678 sd_label, CE_WARN, 7679 "disk capacity is too large " 7680 "for current cdb length"); 7681 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7682 7683 goto spinup_failed; 7684 case EACCES: 7685 /* 7686 * Should never get here if the spin-up 7687 * succeeded, but code it in anyway. 7688 * From here, just continue with the attach... 7689 */ 7690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7691 "sd_unit_attach: un:0x%p " 7692 "sd_send_scsi_READ_CAPACITY " 7693 "returned reservation conflict\n", un); 7694 reservation_flag = SD_TARGET_IS_RESERVED; 7695 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7696 break; 7697 default: 7698 /* 7699 * Likewise, should never get here if the 7700 * spin-up succeeded. Just continue with 7701 * the attach... 7702 */ 7703 if (status == EIO) 7704 sd_ssc_assessment(ssc, 7705 SD_FMT_STATUS_CHECK); 7706 else 7707 sd_ssc_assessment(ssc, 7708 SD_FMT_IGNORE); 7709 break; 7710 } 7711 break; 7712 case EACCES: 7713 /* 7714 * Device is reserved by another host. In this case 7715 * we could not spin it up or read the capacity, but 7716 * we continue with the attach anyway. 7717 */ 7718 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7719 "sd_unit_attach: un:0x%p spin-up reservation " 7720 "conflict.\n", un); 7721 reservation_flag = SD_TARGET_IS_RESERVED; 7722 break; 7723 default: 7724 /* Fail the attach if the spin-up failed. */ 7725 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7726 "sd_unit_attach: un:0x%p spin-up failed.", un); 7727 goto spinup_failed; 7728 } 7729 7730 } 7731 7732 /* 7733 * Check to see if this is a MMC drive 7734 */ 7735 if (ISCD(un)) { 7736 sd_set_mmc_caps(ssc); 7737 } 7738 7739 7740 /* 7741 * Add a zero-length attribute to tell the world we support 7742 * kernel ioctls (for layered drivers) 7743 */ 7744 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7745 DDI_KERNEL_IOCTL, NULL, 0); 7746 7747 /* 7748 * Add a boolean property to tell the world we support 7749 * the B_FAILFAST flag (for layered drivers) 7750 */ 7751 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7752 "ddi-failfast-supported", NULL, 0); 7753 7754 /* 7755 * Initialize power management 7756 */ 7757 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7758 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7759 sd_setup_pm(ssc, devi); 7760 if (un->un_f_pm_is_enabled == FALSE) { 7761 /* 7762 * For performance, point to a jump table that does 7763 * not include pm. 7764 * The direct and priority chains don't change with PM. 7765 * 7766 * Note: this is currently done based on individual device 7767 * capabilities. When an interface for determining system 7768 * power enabled state becomes available, or when additional 7769 * layers are added to the command chain, these values will 7770 * have to be re-evaluated for correctness. 7771 */ 7772 if (un->un_f_non_devbsize_supported) { 7773 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7774 } else { 7775 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7776 } 7777 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7778 } 7779 7780 /* 7781 * This property is set to 0 by HA software to avoid retries 7782 * on a reserved disk. (The preferred property name is 7783 * "retry-on-reservation-conflict") (1189689) 7784 * 7785 * Note: The use of a global here can have unintended consequences. A 7786 * per instance variable is preferable to match the capabilities of 7787 * different underlying hba's (4402600) 7788 */ 7789 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7790 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7791 sd_retry_on_reservation_conflict); 7792 if (sd_retry_on_reservation_conflict != 0) { 7793 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7794 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7795 sd_retry_on_reservation_conflict); 7796 } 7797 7798 /* Set up options for QFULL handling. */ 7799 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7800 "qfull-retries", -1)) != -1) { 7801 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7802 rval, 1); 7803 } 7804 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7805 "qfull-retry-interval", -1)) != -1) { 7806 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7807 rval, 1); 7808 } 7809 7810 /* 7811 * This just prints a message that announces the existence of the 7812 * device. The message is always printed in the system logfile, but 7813 * only appears on the console if the system is booted with the 7814 * -v (verbose) argument. 7815 */ 7816 ddi_report_dev(devi); 7817 7818 un->un_mediastate = DKIO_NONE; 7819 7820 cmlb_alloc_handle(&un->un_cmlbhandle); 7821 7822 #if defined(__i386) || defined(__amd64) 7823 /* 7824 * On x86, compensate for off-by-1 legacy error 7825 */ 7826 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7827 (lbasize == un->un_sys_blocksize)) 7828 offbyone = CMLB_OFF_BY_ONE; 7829 #endif 7830 7831 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7832 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 7833 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 7834 un->un_node_type, offbyone, un->un_cmlbhandle, 7835 (void *)SD_PATH_DIRECT) != 0) { 7836 goto cmlb_attach_failed; 7837 } 7838 7839 7840 /* 7841 * Read and validate the device's geometry (ie, disk label) 7842 * A new unformatted drive will not have a valid geometry, but 7843 * the driver needs to successfully attach to this device so 7844 * the drive can be formatted via ioctls. 7845 */ 7846 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7847 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7848 7849 mutex_enter(SD_MUTEX(un)); 7850 7851 /* 7852 * Read and initialize the devid for the unit. 7853 */ 7854 if (un->un_f_devid_supported) { 7855 sd_register_devid(ssc, devi, reservation_flag); 7856 } 7857 mutex_exit(SD_MUTEX(un)); 7858 7859 #if (defined(__fibre)) 7860 /* 7861 * Register callbacks for fibre only. You can't do this solely 7862 * on the basis of the devid_type because this is hba specific. 7863 * We need to query our hba capabilities to find out whether to 7864 * register or not. 7865 */ 7866 if (un->un_f_is_fibre) { 7867 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7868 sd_init_event_callbacks(un); 7869 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7870 "sd_unit_attach: un:0x%p event callbacks inserted", 7871 un); 7872 } 7873 } 7874 #endif 7875 7876 if (un->un_f_opt_disable_cache == TRUE) { 7877 /* 7878 * Disable both read cache and write cache. This is 7879 * the historic behavior of the keywords in the config file. 7880 */ 7881 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7882 0) { 7883 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7884 "sd_unit_attach: un:0x%p Could not disable " 7885 "caching", un); 7886 goto devid_failed; 7887 } 7888 } 7889 7890 /* 7891 * Check the value of the WCE bit now and 7892 * set un_f_write_cache_enabled accordingly. 7893 */ 7894 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7895 mutex_enter(SD_MUTEX(un)); 7896 un->un_f_write_cache_enabled = (wc_enabled != 0); 7897 mutex_exit(SD_MUTEX(un)); 7898 7899 /* 7900 * Check the value of the NV_SUP bit and set 7901 * un_f_suppress_cache_flush accordingly. 7902 */ 7903 sd_get_nv_sup(ssc); 7904 7905 /* 7906 * Find out what type of reservation this disk supports. 7907 */ 7908 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7909 7910 switch (status) { 7911 case 0: 7912 /* 7913 * SCSI-3 reservations are supported. 7914 */ 7915 un->un_reservation_type = SD_SCSI3_RESERVATION; 7916 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7917 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7918 break; 7919 case ENOTSUP: 7920 /* 7921 * The PERSISTENT RESERVE IN command would not be recognized by 7922 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7923 */ 7924 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7925 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7926 un->un_reservation_type = SD_SCSI2_RESERVATION; 7927 7928 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7929 break; 7930 default: 7931 /* 7932 * default to SCSI-3 reservations 7933 */ 7934 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7935 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7936 un->un_reservation_type = SD_SCSI3_RESERVATION; 7937 7938 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7939 break; 7940 } 7941 7942 /* 7943 * Set the pstat and error stat values here, so data obtained during the 7944 * previous attach-time routines is available. 7945 * 7946 * Note: This is a critical sequence that needs to be maintained: 7947 * 1) Instantiate the kstats before any routines using the iopath 7948 * (i.e. sd_send_scsi_cmd). 7949 * 2) Initialize the error stats (sd_set_errstats) and partition 7950 * stats (sd_set_pstats)here, following 7951 * cmlb_validate_geometry(), sd_register_devid(), and 7952 * sd_cache_control(). 7953 */ 7954 7955 if (un->un_f_pkstats_enabled && geom_label_valid) { 7956 sd_set_pstats(un); 7957 SD_TRACE(SD_LOG_IO_PARTITION, un, 7958 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7959 } 7960 7961 sd_set_errstats(un); 7962 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7963 "sd_unit_attach: un:0x%p errstats set\n", un); 7964 7965 7966 /* 7967 * After successfully attaching an instance, we record the information 7968 * of how many luns have been attached on the relative target and 7969 * controller for parallel SCSI. This information is used when sd tries 7970 * to set the tagged queuing capability in HBA. 7971 */ 7972 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7973 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7974 } 7975 7976 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7977 "sd_unit_attach: un:0x%p exit success\n", un); 7978 7979 /* Uninitialize sd_ssc_t pointer */ 7980 sd_ssc_fini(ssc); 7981 7982 return (DDI_SUCCESS); 7983 7984 /* 7985 * An error occurred during the attach; clean up & return failure. 7986 */ 7987 7988 devid_failed: 7989 7990 setup_pm_failed: 7991 ddi_remove_minor_node(devi, NULL); 7992 7993 cmlb_attach_failed: 7994 /* 7995 * Cleanup from the scsi_ifsetcap() calls (437868) 7996 */ 7997 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7998 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7999 8000 /* 8001 * Refer to the comments of setting tagged-qing in the beginning of 8002 * sd_unit_attach. We can only disable tagged queuing when there is 8003 * no lun attached on the target. 8004 */ 8005 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8006 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8007 } 8008 8009 if (un->un_f_is_fibre == FALSE) { 8010 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8011 } 8012 8013 spinup_failed: 8014 8015 /* Uninitialize sd_ssc_t pointer */ 8016 sd_ssc_fini(ssc); 8017 8018 mutex_enter(SD_MUTEX(un)); 8019 8020 /* Deallocate SCSI FMA memory spaces */ 8021 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8022 8023 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8024 if (un->un_direct_priority_timeid != NULL) { 8025 timeout_id_t temp_id = un->un_direct_priority_timeid; 8026 un->un_direct_priority_timeid = NULL; 8027 mutex_exit(SD_MUTEX(un)); 8028 (void) untimeout(temp_id); 8029 mutex_enter(SD_MUTEX(un)); 8030 } 8031 8032 /* Cancel any pending start/stop timeouts */ 8033 if (un->un_startstop_timeid != NULL) { 8034 timeout_id_t temp_id = un->un_startstop_timeid; 8035 un->un_startstop_timeid = NULL; 8036 mutex_exit(SD_MUTEX(un)); 8037 (void) untimeout(temp_id); 8038 mutex_enter(SD_MUTEX(un)); 8039 } 8040 8041 /* Cancel any pending reset-throttle timeouts */ 8042 if (un->un_reset_throttle_timeid != NULL) { 8043 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8044 un->un_reset_throttle_timeid = NULL; 8045 mutex_exit(SD_MUTEX(un)); 8046 (void) untimeout(temp_id); 8047 mutex_enter(SD_MUTEX(un)); 8048 } 8049 8050 /* Cancel any pending retry timeouts */ 8051 if (un->un_retry_timeid != NULL) { 8052 timeout_id_t temp_id = un->un_retry_timeid; 8053 un->un_retry_timeid = NULL; 8054 mutex_exit(SD_MUTEX(un)); 8055 (void) untimeout(temp_id); 8056 mutex_enter(SD_MUTEX(un)); 8057 } 8058 8059 /* Cancel any pending delayed cv broadcast timeouts */ 8060 if (un->un_dcvb_timeid != NULL) { 8061 timeout_id_t temp_id = un->un_dcvb_timeid; 8062 un->un_dcvb_timeid = NULL; 8063 mutex_exit(SD_MUTEX(un)); 8064 (void) untimeout(temp_id); 8065 mutex_enter(SD_MUTEX(un)); 8066 } 8067 8068 mutex_exit(SD_MUTEX(un)); 8069 8070 /* There should not be any in-progress I/O so ASSERT this check */ 8071 ASSERT(un->un_ncmds_in_transport == 0); 8072 ASSERT(un->un_ncmds_in_driver == 0); 8073 8074 /* Do not free the softstate if the callback routine is active */ 8075 sd_sync_with_callback(un); 8076 8077 /* 8078 * Partition stats apparently are not used with removables. These would 8079 * not have been created during attach, so no need to clean them up... 8080 */ 8081 if (un->un_errstats != NULL) { 8082 kstat_delete(un->un_errstats); 8083 un->un_errstats = NULL; 8084 } 8085 8086 create_errstats_failed: 8087 8088 if (un->un_stats != NULL) { 8089 kstat_delete(un->un_stats); 8090 un->un_stats = NULL; 8091 } 8092 8093 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8094 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8095 8096 ddi_prop_remove_all(devi); 8097 sema_destroy(&un->un_semoclose); 8098 cv_destroy(&un->un_state_cv); 8099 8100 getrbuf_failed: 8101 8102 sd_free_rqs(un); 8103 8104 alloc_rqs_failed: 8105 8106 devp->sd_private = NULL; 8107 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8108 8109 get_softstate_failed: 8110 /* 8111 * Note: the man pages are unclear as to whether or not doing a 8112 * ddi_soft_state_free(sd_state, instance) is the right way to 8113 * clean up after the ddi_soft_state_zalloc() if the subsequent 8114 * ddi_get_soft_state() fails. The implication seems to be 8115 * that the get_soft_state cannot fail if the zalloc succeeds. 8116 */ 8117 #ifndef XPV_HVM_DRIVER 8118 ddi_soft_state_free(sd_state, instance); 8119 #endif /* !XPV_HVM_DRIVER */ 8120 8121 probe_failed: 8122 scsi_unprobe(devp); 8123 8124 return (DDI_FAILURE); 8125 } 8126 8127 8128 /* 8129 * Function: sd_unit_detach 8130 * 8131 * Description: Performs DDI_DETACH processing for sddetach(). 8132 * 8133 * Return Code: DDI_SUCCESS 8134 * DDI_FAILURE 8135 * 8136 * Context: Kernel thread context 8137 */ 8138 8139 static int 8140 sd_unit_detach(dev_info_t *devi) 8141 { 8142 struct scsi_device *devp; 8143 struct sd_lun *un; 8144 int i; 8145 int tgt; 8146 dev_t dev; 8147 dev_info_t *pdip = ddi_get_parent(devi); 8148 #ifndef XPV_HVM_DRIVER 8149 int instance = ddi_get_instance(devi); 8150 #endif /* !XPV_HVM_DRIVER */ 8151 8152 mutex_enter(&sd_detach_mutex); 8153 8154 /* 8155 * Fail the detach for any of the following: 8156 * - Unable to get the sd_lun struct for the instance 8157 * - A layered driver has an outstanding open on the instance 8158 * - Another thread is already detaching this instance 8159 * - Another thread is currently performing an open 8160 */ 8161 devp = ddi_get_driver_private(devi); 8162 if ((devp == NULL) || 8163 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8164 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8165 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8166 mutex_exit(&sd_detach_mutex); 8167 return (DDI_FAILURE); 8168 } 8169 8170 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8171 8172 /* 8173 * Mark this instance as currently in a detach, to inhibit any 8174 * opens from a layered driver. 8175 */ 8176 un->un_detach_count++; 8177 mutex_exit(&sd_detach_mutex); 8178 8179 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8180 SCSI_ADDR_PROP_TARGET, -1); 8181 8182 dev = sd_make_device(SD_DEVINFO(un)); 8183 8184 #ifndef lint 8185 _NOTE(COMPETING_THREADS_NOW); 8186 #endif 8187 8188 mutex_enter(SD_MUTEX(un)); 8189 8190 /* 8191 * Fail the detach if there are any outstanding layered 8192 * opens on this device. 8193 */ 8194 for (i = 0; i < NDKMAP; i++) { 8195 if (un->un_ocmap.lyropen[i] != 0) { 8196 goto err_notclosed; 8197 } 8198 } 8199 8200 /* 8201 * Verify there are NO outstanding commands issued to this device. 8202 * ie, un_ncmds_in_transport == 0. 8203 * It's possible to have outstanding commands through the physio 8204 * code path, even though everything's closed. 8205 */ 8206 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8207 (un->un_direct_priority_timeid != NULL) || 8208 (un->un_state == SD_STATE_RWAIT)) { 8209 mutex_exit(SD_MUTEX(un)); 8210 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8211 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8212 goto err_stillbusy; 8213 } 8214 8215 /* 8216 * If we have the device reserved, release the reservation. 8217 */ 8218 if ((un->un_resvd_status & SD_RESERVE) && 8219 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8220 mutex_exit(SD_MUTEX(un)); 8221 /* 8222 * Note: sd_reserve_release sends a command to the device 8223 * via the sd_ioctlcmd() path, and can sleep. 8224 */ 8225 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8226 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8227 "sd_dr_detach: Cannot release reservation \n"); 8228 } 8229 } else { 8230 mutex_exit(SD_MUTEX(un)); 8231 } 8232 8233 /* 8234 * Untimeout any reserve recover, throttle reset, restart unit 8235 * and delayed broadcast timeout threads. Protect the timeout pointer 8236 * from getting nulled by their callback functions. 8237 */ 8238 mutex_enter(SD_MUTEX(un)); 8239 if (un->un_resvd_timeid != NULL) { 8240 timeout_id_t temp_id = un->un_resvd_timeid; 8241 un->un_resvd_timeid = NULL; 8242 mutex_exit(SD_MUTEX(un)); 8243 (void) untimeout(temp_id); 8244 mutex_enter(SD_MUTEX(un)); 8245 } 8246 8247 if (un->un_reset_throttle_timeid != NULL) { 8248 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8249 un->un_reset_throttle_timeid = NULL; 8250 mutex_exit(SD_MUTEX(un)); 8251 (void) untimeout(temp_id); 8252 mutex_enter(SD_MUTEX(un)); 8253 } 8254 8255 if (un->un_startstop_timeid != NULL) { 8256 timeout_id_t temp_id = un->un_startstop_timeid; 8257 un->un_startstop_timeid = NULL; 8258 mutex_exit(SD_MUTEX(un)); 8259 (void) untimeout(temp_id); 8260 mutex_enter(SD_MUTEX(un)); 8261 } 8262 8263 if (un->un_dcvb_timeid != NULL) { 8264 timeout_id_t temp_id = un->un_dcvb_timeid; 8265 un->un_dcvb_timeid = NULL; 8266 mutex_exit(SD_MUTEX(un)); 8267 (void) untimeout(temp_id); 8268 } else { 8269 mutex_exit(SD_MUTEX(un)); 8270 } 8271 8272 /* Remove any pending reservation reclaim requests for this device */ 8273 sd_rmv_resv_reclaim_req(dev); 8274 8275 mutex_enter(SD_MUTEX(un)); 8276 8277 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8278 if (un->un_direct_priority_timeid != NULL) { 8279 timeout_id_t temp_id = un->un_direct_priority_timeid; 8280 un->un_direct_priority_timeid = NULL; 8281 mutex_exit(SD_MUTEX(un)); 8282 (void) untimeout(temp_id); 8283 mutex_enter(SD_MUTEX(un)); 8284 } 8285 8286 /* Cancel any active multi-host disk watch thread requests */ 8287 if (un->un_mhd_token != NULL) { 8288 mutex_exit(SD_MUTEX(un)); 8289 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8290 if (scsi_watch_request_terminate(un->un_mhd_token, 8291 SCSI_WATCH_TERMINATE_NOWAIT)) { 8292 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8293 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8294 /* 8295 * Note: We are returning here after having removed 8296 * some driver timeouts above. This is consistent with 8297 * the legacy implementation but perhaps the watch 8298 * terminate call should be made with the wait flag set. 8299 */ 8300 goto err_stillbusy; 8301 } 8302 mutex_enter(SD_MUTEX(un)); 8303 un->un_mhd_token = NULL; 8304 } 8305 8306 if (un->un_swr_token != NULL) { 8307 mutex_exit(SD_MUTEX(un)); 8308 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8309 if (scsi_watch_request_terminate(un->un_swr_token, 8310 SCSI_WATCH_TERMINATE_NOWAIT)) { 8311 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8312 "sd_dr_detach: Cannot cancel swr watch request\n"); 8313 /* 8314 * Note: We are returning here after having removed 8315 * some driver timeouts above. This is consistent with 8316 * the legacy implementation but perhaps the watch 8317 * terminate call should be made with the wait flag set. 8318 */ 8319 goto err_stillbusy; 8320 } 8321 mutex_enter(SD_MUTEX(un)); 8322 un->un_swr_token = NULL; 8323 } 8324 8325 mutex_exit(SD_MUTEX(un)); 8326 8327 /* 8328 * Clear any scsi_reset_notifies. We clear the reset notifies 8329 * if we have not registered one. 8330 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8331 */ 8332 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8333 sd_mhd_reset_notify_cb, (caddr_t)un); 8334 8335 /* 8336 * protect the timeout pointers from getting nulled by 8337 * their callback functions during the cancellation process. 8338 * In such a scenario untimeout can be invoked with a null value. 8339 */ 8340 _NOTE(NO_COMPETING_THREADS_NOW); 8341 8342 mutex_enter(&un->un_pm_mutex); 8343 if (un->un_pm_idle_timeid != NULL) { 8344 timeout_id_t temp_id = un->un_pm_idle_timeid; 8345 un->un_pm_idle_timeid = NULL; 8346 mutex_exit(&un->un_pm_mutex); 8347 8348 /* 8349 * Timeout is active; cancel it. 8350 * Note that it'll never be active on a device 8351 * that does not support PM therefore we don't 8352 * have to check before calling pm_idle_component. 8353 */ 8354 (void) untimeout(temp_id); 8355 (void) pm_idle_component(SD_DEVINFO(un), 0); 8356 mutex_enter(&un->un_pm_mutex); 8357 } 8358 8359 /* 8360 * Check whether there is already a timeout scheduled for power 8361 * management. If yes then don't lower the power here, that's. 8362 * the timeout handler's job. 8363 */ 8364 if (un->un_pm_timeid != NULL) { 8365 timeout_id_t temp_id = un->un_pm_timeid; 8366 un->un_pm_timeid = NULL; 8367 mutex_exit(&un->un_pm_mutex); 8368 /* 8369 * Timeout is active; cancel it. 8370 * Note that it'll never be active on a device 8371 * that does not support PM therefore we don't 8372 * have to check before calling pm_idle_component. 8373 */ 8374 (void) untimeout(temp_id); 8375 (void) pm_idle_component(SD_DEVINFO(un), 0); 8376 8377 } else { 8378 mutex_exit(&un->un_pm_mutex); 8379 if ((un->un_f_pm_is_enabled == TRUE) && 8380 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8381 DDI_SUCCESS)) { 8382 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8383 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8384 /* 8385 * Fix for bug: 4297749, item # 13 8386 * The above test now includes a check to see if PM is 8387 * supported by this device before call 8388 * pm_lower_power(). 8389 * Note, the following is not dead code. The call to 8390 * pm_lower_power above will generate a call back into 8391 * our sdpower routine which might result in a timeout 8392 * handler getting activated. Therefore the following 8393 * code is valid and necessary. 8394 */ 8395 mutex_enter(&un->un_pm_mutex); 8396 if (un->un_pm_timeid != NULL) { 8397 timeout_id_t temp_id = un->un_pm_timeid; 8398 un->un_pm_timeid = NULL; 8399 mutex_exit(&un->un_pm_mutex); 8400 (void) untimeout(temp_id); 8401 (void) pm_idle_component(SD_DEVINFO(un), 0); 8402 } else { 8403 mutex_exit(&un->un_pm_mutex); 8404 } 8405 } 8406 } 8407 8408 /* 8409 * Cleanup from the scsi_ifsetcap() calls (437868) 8410 * Relocated here from above to be after the call to 8411 * pm_lower_power, which was getting errors. 8412 */ 8413 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8414 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8415 8416 /* 8417 * Currently, tagged queuing is supported per target based by HBA. 8418 * Setting this per lun instance actually sets the capability of this 8419 * target in HBA, which affects those luns already attached on the 8420 * same target. So during detach, we can only disable this capability 8421 * only when this is the only lun left on this target. By doing 8422 * this, we assume a target has the same tagged queuing capability 8423 * for every lun. The condition can be removed when HBA is changed to 8424 * support per lun based tagged queuing capability. 8425 */ 8426 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8427 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8428 } 8429 8430 if (un->un_f_is_fibre == FALSE) { 8431 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8432 } 8433 8434 /* 8435 * Remove any event callbacks, fibre only 8436 */ 8437 if (un->un_f_is_fibre == TRUE) { 8438 if ((un->un_insert_event != NULL) && 8439 (ddi_remove_event_handler(un->un_insert_cb_id) != 8440 DDI_SUCCESS)) { 8441 /* 8442 * Note: We are returning here after having done 8443 * substantial cleanup above. This is consistent 8444 * with the legacy implementation but this may not 8445 * be the right thing to do. 8446 */ 8447 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8448 "sd_dr_detach: Cannot cancel insert event\n"); 8449 goto err_remove_event; 8450 } 8451 un->un_insert_event = NULL; 8452 8453 if ((un->un_remove_event != NULL) && 8454 (ddi_remove_event_handler(un->un_remove_cb_id) != 8455 DDI_SUCCESS)) { 8456 /* 8457 * Note: We are returning here after having done 8458 * substantial cleanup above. This is consistent 8459 * with the legacy implementation but this may not 8460 * be the right thing to do. 8461 */ 8462 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8463 "sd_dr_detach: Cannot cancel remove event\n"); 8464 goto err_remove_event; 8465 } 8466 un->un_remove_event = NULL; 8467 } 8468 8469 /* Do not free the softstate if the callback routine is active */ 8470 sd_sync_with_callback(un); 8471 8472 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8473 cmlb_free_handle(&un->un_cmlbhandle); 8474 8475 /* 8476 * Hold the detach mutex here, to make sure that no other threads ever 8477 * can access a (partially) freed soft state structure. 8478 */ 8479 mutex_enter(&sd_detach_mutex); 8480 8481 /* 8482 * Clean up the soft state struct. 8483 * Cleanup is done in reverse order of allocs/inits. 8484 * At this point there should be no competing threads anymore. 8485 */ 8486 8487 scsi_fm_fini(devp); 8488 8489 /* 8490 * Deallocate memory for SCSI FMA. 8491 */ 8492 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8493 8494 /* 8495 * Unregister and free device id if it was not registered 8496 * by the transport. 8497 */ 8498 if (un->un_f_devid_transport_defined == FALSE) { 8499 ddi_devid_unregister(devi); 8500 if (un->un_devid) { 8501 ddi_devid_free(un->un_devid); 8502 un->un_devid = NULL; 8503 } 8504 } 8505 8506 /* 8507 * Destroy wmap cache if it exists. 8508 */ 8509 if (un->un_wm_cache != NULL) { 8510 kmem_cache_destroy(un->un_wm_cache); 8511 un->un_wm_cache = NULL; 8512 } 8513 8514 /* 8515 * kstat cleanup is done in detach for all device types (4363169). 8516 * We do not want to fail detach if the device kstats are not deleted 8517 * since there is a confusion about the devo_refcnt for the device. 8518 * We just delete the kstats and let detach complete successfully. 8519 */ 8520 if (un->un_stats != NULL) { 8521 kstat_delete(un->un_stats); 8522 un->un_stats = NULL; 8523 } 8524 if (un->un_errstats != NULL) { 8525 kstat_delete(un->un_errstats); 8526 un->un_errstats = NULL; 8527 } 8528 8529 /* Remove partition stats */ 8530 if (un->un_f_pkstats_enabled) { 8531 for (i = 0; i < NSDMAP; i++) { 8532 if (un->un_pstats[i] != NULL) { 8533 kstat_delete(un->un_pstats[i]); 8534 un->un_pstats[i] = NULL; 8535 } 8536 } 8537 } 8538 8539 /* Remove xbuf registration */ 8540 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8541 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8542 8543 /* Remove driver properties */ 8544 ddi_prop_remove_all(devi); 8545 8546 mutex_destroy(&un->un_pm_mutex); 8547 cv_destroy(&un->un_pm_busy_cv); 8548 8549 cv_destroy(&un->un_wcc_cv); 8550 8551 /* Open/close semaphore */ 8552 sema_destroy(&un->un_semoclose); 8553 8554 /* Removable media condvar. */ 8555 cv_destroy(&un->un_state_cv); 8556 8557 /* Suspend/resume condvar. */ 8558 cv_destroy(&un->un_suspend_cv); 8559 cv_destroy(&un->un_disk_busy_cv); 8560 8561 sd_free_rqs(un); 8562 8563 /* Free up soft state */ 8564 devp->sd_private = NULL; 8565 8566 bzero(un, sizeof (struct sd_lun)); 8567 #ifndef XPV_HVM_DRIVER 8568 ddi_soft_state_free(sd_state, instance); 8569 #endif /* !XPV_HVM_DRIVER */ 8570 8571 mutex_exit(&sd_detach_mutex); 8572 8573 /* This frees up the INQUIRY data associated with the device. */ 8574 scsi_unprobe(devp); 8575 8576 /* 8577 * After successfully detaching an instance, we update the information 8578 * of how many luns have been attached in the relative target and 8579 * controller for parallel SCSI. This information is used when sd tries 8580 * to set the tagged queuing capability in HBA. 8581 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8582 * check if the device is parallel SCSI. However, we don't need to 8583 * check here because we've already checked during attach. No device 8584 * that is not parallel SCSI is in the chain. 8585 */ 8586 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8587 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8588 } 8589 8590 return (DDI_SUCCESS); 8591 8592 err_notclosed: 8593 mutex_exit(SD_MUTEX(un)); 8594 8595 err_stillbusy: 8596 _NOTE(NO_COMPETING_THREADS_NOW); 8597 8598 err_remove_event: 8599 mutex_enter(&sd_detach_mutex); 8600 un->un_detach_count--; 8601 mutex_exit(&sd_detach_mutex); 8602 8603 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8604 return (DDI_FAILURE); 8605 } 8606 8607 8608 /* 8609 * Function: sd_create_errstats 8610 * 8611 * Description: This routine instantiates the device error stats. 8612 * 8613 * Note: During attach the stats are instantiated first so they are 8614 * available for attach-time routines that utilize the driver 8615 * iopath to send commands to the device. The stats are initialized 8616 * separately so data obtained during some attach-time routines is 8617 * available. (4362483) 8618 * 8619 * Arguments: un - driver soft state (unit) structure 8620 * instance - driver instance 8621 * 8622 * Context: Kernel thread context 8623 */ 8624 8625 static void 8626 sd_create_errstats(struct sd_lun *un, int instance) 8627 { 8628 struct sd_errstats *stp; 8629 char kstatmodule_err[KSTAT_STRLEN]; 8630 char kstatname[KSTAT_STRLEN]; 8631 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8632 8633 ASSERT(un != NULL); 8634 8635 if (un->un_errstats != NULL) { 8636 return; 8637 } 8638 8639 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8640 "%serr", sd_label); 8641 (void) snprintf(kstatname, sizeof (kstatname), 8642 "%s%d,err", sd_label, instance); 8643 8644 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8645 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8646 8647 if (un->un_errstats == NULL) { 8648 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8649 "sd_create_errstats: Failed kstat_create\n"); 8650 return; 8651 } 8652 8653 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8654 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8655 KSTAT_DATA_UINT32); 8656 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8657 KSTAT_DATA_UINT32); 8658 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8659 KSTAT_DATA_UINT32); 8660 kstat_named_init(&stp->sd_vid, "Vendor", 8661 KSTAT_DATA_CHAR); 8662 kstat_named_init(&stp->sd_pid, "Product", 8663 KSTAT_DATA_CHAR); 8664 kstat_named_init(&stp->sd_revision, "Revision", 8665 KSTAT_DATA_CHAR); 8666 kstat_named_init(&stp->sd_serial, "Serial No", 8667 KSTAT_DATA_CHAR); 8668 kstat_named_init(&stp->sd_capacity, "Size", 8669 KSTAT_DATA_ULONGLONG); 8670 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8671 KSTAT_DATA_UINT32); 8672 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8673 KSTAT_DATA_UINT32); 8674 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8675 KSTAT_DATA_UINT32); 8676 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8677 KSTAT_DATA_UINT32); 8678 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8679 KSTAT_DATA_UINT32); 8680 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8681 KSTAT_DATA_UINT32); 8682 8683 un->un_errstats->ks_private = un; 8684 un->un_errstats->ks_update = nulldev; 8685 8686 kstat_install(un->un_errstats); 8687 } 8688 8689 8690 /* 8691 * Function: sd_set_errstats 8692 * 8693 * Description: This routine sets the value of the vendor id, product id, 8694 * revision, serial number, and capacity device error stats. 8695 * 8696 * Note: During attach the stats are instantiated first so they are 8697 * available for attach-time routines that utilize the driver 8698 * iopath to send commands to the device. The stats are initialized 8699 * separately so data obtained during some attach-time routines is 8700 * available. (4362483) 8701 * 8702 * Arguments: un - driver soft state (unit) structure 8703 * 8704 * Context: Kernel thread context 8705 */ 8706 8707 static void 8708 sd_set_errstats(struct sd_lun *un) 8709 { 8710 struct sd_errstats *stp; 8711 8712 ASSERT(un != NULL); 8713 ASSERT(un->un_errstats != NULL); 8714 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8715 ASSERT(stp != NULL); 8716 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8717 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8718 (void) strncpy(stp->sd_revision.value.c, 8719 un->un_sd->sd_inq->inq_revision, 4); 8720 8721 /* 8722 * All the errstats are persistent across detach/attach, 8723 * so reset all the errstats here in case of the hot 8724 * replacement of disk drives, except for not changed 8725 * Sun qualified drives. 8726 */ 8727 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8728 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8729 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8730 stp->sd_softerrs.value.ui32 = 0; 8731 stp->sd_harderrs.value.ui32 = 0; 8732 stp->sd_transerrs.value.ui32 = 0; 8733 stp->sd_rq_media_err.value.ui32 = 0; 8734 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8735 stp->sd_rq_nodev_err.value.ui32 = 0; 8736 stp->sd_rq_recov_err.value.ui32 = 0; 8737 stp->sd_rq_illrq_err.value.ui32 = 0; 8738 stp->sd_rq_pfa_err.value.ui32 = 0; 8739 } 8740 8741 /* 8742 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8743 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8744 * (4376302)) 8745 */ 8746 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8747 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8748 sizeof (SD_INQUIRY(un)->inq_serial)); 8749 } 8750 8751 if (un->un_f_blockcount_is_valid != TRUE) { 8752 /* 8753 * Set capacity error stat to 0 for no media. This ensures 8754 * a valid capacity is displayed in response to 'iostat -E' 8755 * when no media is present in the device. 8756 */ 8757 stp->sd_capacity.value.ui64 = 0; 8758 } else { 8759 /* 8760 * Multiply un_blockcount by un->un_sys_blocksize to get 8761 * capacity. 8762 * 8763 * Note: for non-512 blocksize devices "un_blockcount" has been 8764 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8765 * (un_tgt_blocksize / un->un_sys_blocksize). 8766 */ 8767 stp->sd_capacity.value.ui64 = (uint64_t) 8768 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8769 } 8770 } 8771 8772 8773 /* 8774 * Function: sd_set_pstats 8775 * 8776 * Description: This routine instantiates and initializes the partition 8777 * stats for each partition with more than zero blocks. 8778 * (4363169) 8779 * 8780 * Arguments: un - driver soft state (unit) structure 8781 * 8782 * Context: Kernel thread context 8783 */ 8784 8785 static void 8786 sd_set_pstats(struct sd_lun *un) 8787 { 8788 char kstatname[KSTAT_STRLEN]; 8789 int instance; 8790 int i; 8791 diskaddr_t nblks = 0; 8792 char *partname = NULL; 8793 8794 ASSERT(un != NULL); 8795 8796 instance = ddi_get_instance(SD_DEVINFO(un)); 8797 8798 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8799 for (i = 0; i < NSDMAP; i++) { 8800 8801 if (cmlb_partinfo(un->un_cmlbhandle, i, 8802 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8803 continue; 8804 mutex_enter(SD_MUTEX(un)); 8805 8806 if ((un->un_pstats[i] == NULL) && 8807 (nblks != 0)) { 8808 8809 (void) snprintf(kstatname, sizeof (kstatname), 8810 "%s%d,%s", sd_label, instance, 8811 partname); 8812 8813 un->un_pstats[i] = kstat_create(sd_label, 8814 instance, kstatname, "partition", KSTAT_TYPE_IO, 8815 1, KSTAT_FLAG_PERSISTENT); 8816 if (un->un_pstats[i] != NULL) { 8817 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8818 kstat_install(un->un_pstats[i]); 8819 } 8820 } 8821 mutex_exit(SD_MUTEX(un)); 8822 } 8823 } 8824 8825 8826 #if (defined(__fibre)) 8827 /* 8828 * Function: sd_init_event_callbacks 8829 * 8830 * Description: This routine initializes the insertion and removal event 8831 * callbacks. (fibre only) 8832 * 8833 * Arguments: un - driver soft state (unit) structure 8834 * 8835 * Context: Kernel thread context 8836 */ 8837 8838 static void 8839 sd_init_event_callbacks(struct sd_lun *un) 8840 { 8841 ASSERT(un != NULL); 8842 8843 if ((un->un_insert_event == NULL) && 8844 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8845 &un->un_insert_event) == DDI_SUCCESS)) { 8846 /* 8847 * Add the callback for an insertion event 8848 */ 8849 (void) ddi_add_event_handler(SD_DEVINFO(un), 8850 un->un_insert_event, sd_event_callback, (void *)un, 8851 &(un->un_insert_cb_id)); 8852 } 8853 8854 if ((un->un_remove_event == NULL) && 8855 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8856 &un->un_remove_event) == DDI_SUCCESS)) { 8857 /* 8858 * Add the callback for a removal event 8859 */ 8860 (void) ddi_add_event_handler(SD_DEVINFO(un), 8861 un->un_remove_event, sd_event_callback, (void *)un, 8862 &(un->un_remove_cb_id)); 8863 } 8864 } 8865 8866 8867 /* 8868 * Function: sd_event_callback 8869 * 8870 * Description: This routine handles insert/remove events (photon). The 8871 * state is changed to OFFLINE which can be used to supress 8872 * error msgs. (fibre only) 8873 * 8874 * Arguments: un - driver soft state (unit) structure 8875 * 8876 * Context: Callout thread context 8877 */ 8878 /* ARGSUSED */ 8879 static void 8880 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8881 void *bus_impldata) 8882 { 8883 struct sd_lun *un = (struct sd_lun *)arg; 8884 8885 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8886 if (event == un->un_insert_event) { 8887 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8888 mutex_enter(SD_MUTEX(un)); 8889 if (un->un_state == SD_STATE_OFFLINE) { 8890 if (un->un_last_state != SD_STATE_SUSPENDED) { 8891 un->un_state = un->un_last_state; 8892 } else { 8893 /* 8894 * We have gone through SUSPEND/RESUME while 8895 * we were offline. Restore the last state 8896 */ 8897 un->un_state = un->un_save_state; 8898 } 8899 } 8900 mutex_exit(SD_MUTEX(un)); 8901 8902 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8903 } else if (event == un->un_remove_event) { 8904 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8905 mutex_enter(SD_MUTEX(un)); 8906 /* 8907 * We need to handle an event callback that occurs during 8908 * the suspend operation, since we don't prevent it. 8909 */ 8910 if (un->un_state != SD_STATE_OFFLINE) { 8911 if (un->un_state != SD_STATE_SUSPENDED) { 8912 New_state(un, SD_STATE_OFFLINE); 8913 } else { 8914 un->un_last_state = SD_STATE_OFFLINE; 8915 } 8916 } 8917 mutex_exit(SD_MUTEX(un)); 8918 } else { 8919 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8920 "!Unknown event\n"); 8921 } 8922 8923 } 8924 #endif 8925 8926 /* 8927 * Function: sd_cache_control() 8928 * 8929 * Description: This routine is the driver entry point for setting 8930 * read and write caching by modifying the WCE (write cache 8931 * enable) and RCD (read cache disable) bits of mode 8932 * page 8 (MODEPAGE_CACHING). 8933 * 8934 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8935 * structure for this target. 8936 * rcd_flag - flag for controlling the read cache 8937 * wce_flag - flag for controlling the write cache 8938 * 8939 * Return Code: EIO 8940 * code returned by sd_send_scsi_MODE_SENSE and 8941 * sd_send_scsi_MODE_SELECT 8942 * 8943 * Context: Kernel Thread 8944 */ 8945 8946 static int 8947 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8948 { 8949 struct mode_caching *mode_caching_page; 8950 uchar_t *header; 8951 size_t buflen; 8952 int hdrlen; 8953 int bd_len; 8954 int rval = 0; 8955 struct mode_header_grp2 *mhp; 8956 struct sd_lun *un; 8957 int status; 8958 8959 ASSERT(ssc != NULL); 8960 un = ssc->ssc_un; 8961 ASSERT(un != NULL); 8962 8963 /* 8964 * Do a test unit ready, otherwise a mode sense may not work if this 8965 * is the first command sent to the device after boot. 8966 */ 8967 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8968 if (status != 0) 8969 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8970 8971 if (un->un_f_cfg_is_atapi == TRUE) { 8972 hdrlen = MODE_HEADER_LENGTH_GRP2; 8973 } else { 8974 hdrlen = MODE_HEADER_LENGTH; 8975 } 8976 8977 /* 8978 * Allocate memory for the retrieved mode page and its headers. Set 8979 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8980 * we get all of the mode sense data otherwise, the mode select 8981 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8982 */ 8983 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8984 sizeof (struct mode_cache_scsi3); 8985 8986 header = kmem_zalloc(buflen, KM_SLEEP); 8987 8988 /* Get the information from the device. */ 8989 if (un->un_f_cfg_is_atapi == TRUE) { 8990 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 8991 MODEPAGE_CACHING, SD_PATH_DIRECT); 8992 } else { 8993 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 8994 MODEPAGE_CACHING, SD_PATH_DIRECT); 8995 } 8996 8997 if (rval != 0) { 8998 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8999 "sd_cache_control: Mode Sense Failed\n"); 9000 goto mode_sense_failed; 9001 } 9002 9003 /* 9004 * Determine size of Block Descriptors in order to locate 9005 * the mode page data. ATAPI devices return 0, SCSI devices 9006 * should return MODE_BLK_DESC_LENGTH. 9007 */ 9008 if (un->un_f_cfg_is_atapi == TRUE) { 9009 mhp = (struct mode_header_grp2 *)header; 9010 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9011 } else { 9012 bd_len = ((struct mode_header *)header)->bdesc_length; 9013 } 9014 9015 if (bd_len > MODE_BLK_DESC_LENGTH) { 9016 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9017 "sd_cache_control: Mode Sense returned invalid block " 9018 "descriptor length\n"); 9019 rval = EIO; 9020 goto mode_sense_failed; 9021 } 9022 9023 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9024 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9025 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9026 "sd_cache_control: Mode Sense caching page code mismatch " 9027 "%d\n", mode_caching_page->mode_page.code); 9028 rval = EIO; 9029 goto mode_sense_failed; 9030 } 9031 9032 /* Check the relevant bits on successful mode sense. */ 9033 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9034 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9035 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9036 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9037 9038 size_t sbuflen; 9039 uchar_t save_pg; 9040 9041 /* 9042 * Construct select buffer length based on the 9043 * length of the sense data returned. 9044 */ 9045 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 9046 sizeof (struct mode_page) + 9047 (int)mode_caching_page->mode_page.length; 9048 9049 /* 9050 * Set the caching bits as requested. 9051 */ 9052 if (rcd_flag == SD_CACHE_ENABLE) 9053 mode_caching_page->rcd = 0; 9054 else if (rcd_flag == SD_CACHE_DISABLE) 9055 mode_caching_page->rcd = 1; 9056 9057 if (wce_flag == SD_CACHE_ENABLE) 9058 mode_caching_page->wce = 1; 9059 else if (wce_flag == SD_CACHE_DISABLE) 9060 mode_caching_page->wce = 0; 9061 9062 /* 9063 * Save the page if the mode sense says the 9064 * drive supports it. 9065 */ 9066 save_pg = mode_caching_page->mode_page.ps ? 9067 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9068 9069 /* Clear reserved bits before mode select. */ 9070 mode_caching_page->mode_page.ps = 0; 9071 9072 /* 9073 * Clear out mode header for mode select. 9074 * The rest of the retrieved page will be reused. 9075 */ 9076 bzero(header, hdrlen); 9077 9078 if (un->un_f_cfg_is_atapi == TRUE) { 9079 mhp = (struct mode_header_grp2 *)header; 9080 mhp->bdesc_length_hi = bd_len >> 8; 9081 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9082 } else { 9083 ((struct mode_header *)header)->bdesc_length = bd_len; 9084 } 9085 9086 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9087 9088 /* Issue mode select to change the cache settings */ 9089 if (un->un_f_cfg_is_atapi == TRUE) { 9090 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9091 sbuflen, save_pg, SD_PATH_DIRECT); 9092 } else { 9093 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9094 sbuflen, save_pg, SD_PATH_DIRECT); 9095 } 9096 9097 } 9098 9099 9100 mode_sense_failed: 9101 9102 kmem_free(header, buflen); 9103 9104 if (rval != 0) { 9105 if (rval == EIO) 9106 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9107 else 9108 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9109 } 9110 return (rval); 9111 } 9112 9113 9114 /* 9115 * Function: sd_get_write_cache_enabled() 9116 * 9117 * Description: This routine is the driver entry point for determining if 9118 * write caching is enabled. It examines the WCE (write cache 9119 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9120 * 9121 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9122 * structure for this target. 9123 * is_enabled - pointer to int where write cache enabled state 9124 * is returned (non-zero -> write cache enabled) 9125 * 9126 * 9127 * Return Code: EIO 9128 * code returned by sd_send_scsi_MODE_SENSE 9129 * 9130 * Context: Kernel Thread 9131 * 9132 * NOTE: If ioctl is added to disable write cache, this sequence should 9133 * be followed so that no locking is required for accesses to 9134 * un->un_f_write_cache_enabled: 9135 * do mode select to clear wce 9136 * do synchronize cache to flush cache 9137 * set un->un_f_write_cache_enabled = FALSE 9138 * 9139 * Conversely, an ioctl to enable the write cache should be done 9140 * in this order: 9141 * set un->un_f_write_cache_enabled = TRUE 9142 * do mode select to set wce 9143 */ 9144 9145 static int 9146 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9147 { 9148 struct mode_caching *mode_caching_page; 9149 uchar_t *header; 9150 size_t buflen; 9151 int hdrlen; 9152 int bd_len; 9153 int rval = 0; 9154 struct sd_lun *un; 9155 int status; 9156 9157 ASSERT(ssc != NULL); 9158 un = ssc->ssc_un; 9159 ASSERT(un != NULL); 9160 ASSERT(is_enabled != NULL); 9161 9162 /* in case of error, flag as enabled */ 9163 *is_enabled = TRUE; 9164 9165 /* 9166 * Do a test unit ready, otherwise a mode sense may not work if this 9167 * is the first command sent to the device after boot. 9168 */ 9169 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9170 9171 if (status != 0) 9172 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9173 9174 if (un->un_f_cfg_is_atapi == TRUE) { 9175 hdrlen = MODE_HEADER_LENGTH_GRP2; 9176 } else { 9177 hdrlen = MODE_HEADER_LENGTH; 9178 } 9179 9180 /* 9181 * Allocate memory for the retrieved mode page and its headers. Set 9182 * a pointer to the page itself. 9183 */ 9184 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9185 header = kmem_zalloc(buflen, KM_SLEEP); 9186 9187 /* Get the information from the device. */ 9188 if (un->un_f_cfg_is_atapi == TRUE) { 9189 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9190 MODEPAGE_CACHING, SD_PATH_DIRECT); 9191 } else { 9192 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9193 MODEPAGE_CACHING, SD_PATH_DIRECT); 9194 } 9195 9196 if (rval != 0) { 9197 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9198 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9199 goto mode_sense_failed; 9200 } 9201 9202 /* 9203 * Determine size of Block Descriptors in order to locate 9204 * the mode page data. ATAPI devices return 0, SCSI devices 9205 * should return MODE_BLK_DESC_LENGTH. 9206 */ 9207 if (un->un_f_cfg_is_atapi == TRUE) { 9208 struct mode_header_grp2 *mhp; 9209 mhp = (struct mode_header_grp2 *)header; 9210 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9211 } else { 9212 bd_len = ((struct mode_header *)header)->bdesc_length; 9213 } 9214 9215 if (bd_len > MODE_BLK_DESC_LENGTH) { 9216 /* FMA should make upset complain here */ 9217 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9218 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9219 "block descriptor length\n"); 9220 rval = EIO; 9221 goto mode_sense_failed; 9222 } 9223 9224 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9225 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9226 /* FMA could make upset complain here */ 9227 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9228 "sd_get_write_cache_enabled: Mode Sense caching page " 9229 "code mismatch %d\n", mode_caching_page->mode_page.code); 9230 rval = EIO; 9231 goto mode_sense_failed; 9232 } 9233 *is_enabled = mode_caching_page->wce; 9234 9235 mode_sense_failed: 9236 if (rval == 0) { 9237 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9238 } else if (rval == EIO) { 9239 /* 9240 * Some disks do not support mode sense(6), we 9241 * should ignore this kind of error(sense key is 9242 * 0x5 - illegal request). 9243 */ 9244 uint8_t *sensep; 9245 int senlen; 9246 9247 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9248 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9249 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9250 9251 if (senlen > 0 && 9252 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9253 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9254 } else { 9255 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9256 } 9257 } else { 9258 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9259 } 9260 kmem_free(header, buflen); 9261 return (rval); 9262 } 9263 9264 /* 9265 * Function: sd_get_nv_sup() 9266 * 9267 * Description: This routine is the driver entry point for 9268 * determining whether non-volatile cache is supported. This 9269 * determination process works as follows: 9270 * 9271 * 1. sd first queries sd.conf on whether 9272 * suppress_cache_flush bit is set for this device. 9273 * 9274 * 2. if not there, then queries the internal disk table. 9275 * 9276 * 3. if either sd.conf or internal disk table specifies 9277 * cache flush be suppressed, we don't bother checking 9278 * NV_SUP bit. 9279 * 9280 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9281 * the optional INQUIRY VPD page 0x86. If the device 9282 * supports VPD page 0x86, sd examines the NV_SUP 9283 * (non-volatile cache support) bit in the INQUIRY VPD page 9284 * 0x86: 9285 * o If NV_SUP bit is set, sd assumes the device has a 9286 * non-volatile cache and set the 9287 * un_f_sync_nv_supported to TRUE. 9288 * o Otherwise cache is not non-volatile, 9289 * un_f_sync_nv_supported is set to FALSE. 9290 * 9291 * Arguments: un - driver soft state (unit) structure 9292 * 9293 * Return Code: 9294 * 9295 * Context: Kernel Thread 9296 */ 9297 9298 static void 9299 sd_get_nv_sup(sd_ssc_t *ssc) 9300 { 9301 int rval = 0; 9302 uchar_t *inq86 = NULL; 9303 size_t inq86_len = MAX_INQUIRY_SIZE; 9304 size_t inq86_resid = 0; 9305 struct dk_callback *dkc; 9306 struct sd_lun *un; 9307 9308 ASSERT(ssc != NULL); 9309 un = ssc->ssc_un; 9310 ASSERT(un != NULL); 9311 9312 mutex_enter(SD_MUTEX(un)); 9313 9314 /* 9315 * Be conservative on the device's support of 9316 * SYNC_NV bit: un_f_sync_nv_supported is 9317 * initialized to be false. 9318 */ 9319 un->un_f_sync_nv_supported = FALSE; 9320 9321 /* 9322 * If either sd.conf or internal disk table 9323 * specifies cache flush be suppressed, then 9324 * we don't bother checking NV_SUP bit. 9325 */ 9326 if (un->un_f_suppress_cache_flush == TRUE) { 9327 mutex_exit(SD_MUTEX(un)); 9328 return; 9329 } 9330 9331 if (sd_check_vpd_page_support(ssc) == 0 && 9332 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9333 mutex_exit(SD_MUTEX(un)); 9334 /* collect page 86 data if available */ 9335 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9336 9337 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9338 0x01, 0x86, &inq86_resid); 9339 9340 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9341 SD_TRACE(SD_LOG_COMMON, un, 9342 "sd_get_nv_sup: \ 9343 successfully get VPD page: %x \ 9344 PAGE LENGTH: %x BYTE 6: %x\n", 9345 inq86[1], inq86[3], inq86[6]); 9346 9347 mutex_enter(SD_MUTEX(un)); 9348 /* 9349 * check the value of NV_SUP bit: only if the device 9350 * reports NV_SUP bit to be 1, the 9351 * un_f_sync_nv_supported bit will be set to true. 9352 */ 9353 if (inq86[6] & SD_VPD_NV_SUP) { 9354 un->un_f_sync_nv_supported = TRUE; 9355 } 9356 mutex_exit(SD_MUTEX(un)); 9357 } else if (rval != 0) { 9358 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9359 } 9360 9361 kmem_free(inq86, inq86_len); 9362 } else { 9363 mutex_exit(SD_MUTEX(un)); 9364 } 9365 9366 /* 9367 * Send a SYNC CACHE command to check whether 9368 * SYNC_NV bit is supported. This command should have 9369 * un_f_sync_nv_supported set to correct value. 9370 */ 9371 mutex_enter(SD_MUTEX(un)); 9372 if (un->un_f_sync_nv_supported) { 9373 mutex_exit(SD_MUTEX(un)); 9374 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9375 dkc->dkc_flag = FLUSH_VOLATILE; 9376 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9377 9378 /* 9379 * Send a TEST UNIT READY command to the device. This should 9380 * clear any outstanding UNIT ATTENTION that may be present. 9381 */ 9382 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9383 if (rval != 0) 9384 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9385 9386 kmem_free(dkc, sizeof (struct dk_callback)); 9387 } else { 9388 mutex_exit(SD_MUTEX(un)); 9389 } 9390 9391 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9392 un_f_suppress_cache_flush is set to %d\n", 9393 un->un_f_suppress_cache_flush); 9394 } 9395 9396 /* 9397 * Function: sd_make_device 9398 * 9399 * Description: Utility routine to return the Solaris device number from 9400 * the data in the device's dev_info structure. 9401 * 9402 * Return Code: The Solaris device number 9403 * 9404 * Context: Any 9405 */ 9406 9407 static dev_t 9408 sd_make_device(dev_info_t *devi) 9409 { 9410 return (makedevice(ddi_driver_major(devi), 9411 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9412 } 9413 9414 9415 /* 9416 * Function: sd_pm_entry 9417 * 9418 * Description: Called at the start of a new command to manage power 9419 * and busy status of a device. This includes determining whether 9420 * the current power state of the device is sufficient for 9421 * performing the command or whether it must be changed. 9422 * The PM framework is notified appropriately. 9423 * Only with a return status of DDI_SUCCESS will the 9424 * component be busy to the framework. 9425 * 9426 * All callers of sd_pm_entry must check the return status 9427 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9428 * of DDI_FAILURE indicates the device failed to power up. 9429 * In this case un_pm_count has been adjusted so the result 9430 * on exit is still powered down, ie. count is less than 0. 9431 * Calling sd_pm_exit with this count value hits an ASSERT. 9432 * 9433 * Return Code: DDI_SUCCESS or DDI_FAILURE 9434 * 9435 * Context: Kernel thread context. 9436 */ 9437 9438 static int 9439 sd_pm_entry(struct sd_lun *un) 9440 { 9441 int return_status = DDI_SUCCESS; 9442 9443 ASSERT(!mutex_owned(SD_MUTEX(un))); 9444 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9445 9446 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9447 9448 if (un->un_f_pm_is_enabled == FALSE) { 9449 SD_TRACE(SD_LOG_IO_PM, un, 9450 "sd_pm_entry: exiting, PM not enabled\n"); 9451 return (return_status); 9452 } 9453 9454 /* 9455 * Just increment a counter if PM is enabled. On the transition from 9456 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9457 * the count with each IO and mark the device as idle when the count 9458 * hits 0. 9459 * 9460 * If the count is less than 0 the device is powered down. If a powered 9461 * down device is successfully powered up then the count must be 9462 * incremented to reflect the power up. Note that it'll get incremented 9463 * a second time to become busy. 9464 * 9465 * Because the following has the potential to change the device state 9466 * and must release the un_pm_mutex to do so, only one thread can be 9467 * allowed through at a time. 9468 */ 9469 9470 mutex_enter(&un->un_pm_mutex); 9471 while (un->un_pm_busy == TRUE) { 9472 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9473 } 9474 un->un_pm_busy = TRUE; 9475 9476 if (un->un_pm_count < 1) { 9477 9478 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9479 9480 /* 9481 * Indicate we are now busy so the framework won't attempt to 9482 * power down the device. This call will only fail if either 9483 * we passed a bad component number or the device has no 9484 * components. Neither of these should ever happen. 9485 */ 9486 mutex_exit(&un->un_pm_mutex); 9487 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9488 ASSERT(return_status == DDI_SUCCESS); 9489 9490 mutex_enter(&un->un_pm_mutex); 9491 9492 if (un->un_pm_count < 0) { 9493 mutex_exit(&un->un_pm_mutex); 9494 9495 SD_TRACE(SD_LOG_IO_PM, un, 9496 "sd_pm_entry: power up component\n"); 9497 9498 /* 9499 * pm_raise_power will cause sdpower to be called 9500 * which brings the device power level to the 9501 * desired state, ON in this case. If successful, 9502 * un_pm_count and un_power_level will be updated 9503 * appropriately. 9504 */ 9505 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9506 SD_SPINDLE_ON); 9507 9508 mutex_enter(&un->un_pm_mutex); 9509 9510 if (return_status != DDI_SUCCESS) { 9511 /* 9512 * Power up failed. 9513 * Idle the device and adjust the count 9514 * so the result on exit is that we're 9515 * still powered down, ie. count is less than 0. 9516 */ 9517 SD_TRACE(SD_LOG_IO_PM, un, 9518 "sd_pm_entry: power up failed," 9519 " idle the component\n"); 9520 9521 (void) pm_idle_component(SD_DEVINFO(un), 0); 9522 un->un_pm_count--; 9523 } else { 9524 /* 9525 * Device is powered up, verify the 9526 * count is non-negative. 9527 * This is debug only. 9528 */ 9529 ASSERT(un->un_pm_count == 0); 9530 } 9531 } 9532 9533 if (return_status == DDI_SUCCESS) { 9534 /* 9535 * For performance, now that the device has been tagged 9536 * as busy, and it's known to be powered up, update the 9537 * chain types to use jump tables that do not include 9538 * pm. This significantly lowers the overhead and 9539 * therefore improves performance. 9540 */ 9541 9542 mutex_exit(&un->un_pm_mutex); 9543 mutex_enter(SD_MUTEX(un)); 9544 SD_TRACE(SD_LOG_IO_PM, un, 9545 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9546 un->un_uscsi_chain_type); 9547 9548 if (un->un_f_non_devbsize_supported) { 9549 un->un_buf_chain_type = 9550 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9551 } else { 9552 un->un_buf_chain_type = 9553 SD_CHAIN_INFO_DISK_NO_PM; 9554 } 9555 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9556 9557 SD_TRACE(SD_LOG_IO_PM, un, 9558 " changed uscsi_chain_type to %d\n", 9559 un->un_uscsi_chain_type); 9560 mutex_exit(SD_MUTEX(un)); 9561 mutex_enter(&un->un_pm_mutex); 9562 9563 if (un->un_pm_idle_timeid == NULL) { 9564 /* 300 ms. */ 9565 un->un_pm_idle_timeid = 9566 timeout(sd_pm_idletimeout_handler, un, 9567 (drv_usectohz((clock_t)300000))); 9568 /* 9569 * Include an extra call to busy which keeps the 9570 * device busy with-respect-to the PM layer 9571 * until the timer fires, at which time it'll 9572 * get the extra idle call. 9573 */ 9574 (void) pm_busy_component(SD_DEVINFO(un), 0); 9575 } 9576 } 9577 } 9578 un->un_pm_busy = FALSE; 9579 /* Next... */ 9580 cv_signal(&un->un_pm_busy_cv); 9581 9582 un->un_pm_count++; 9583 9584 SD_TRACE(SD_LOG_IO_PM, un, 9585 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9586 9587 mutex_exit(&un->un_pm_mutex); 9588 9589 return (return_status); 9590 } 9591 9592 9593 /* 9594 * Function: sd_pm_exit 9595 * 9596 * Description: Called at the completion of a command to manage busy 9597 * status for the device. If the device becomes idle the 9598 * PM framework is notified. 9599 * 9600 * Context: Kernel thread context 9601 */ 9602 9603 static void 9604 sd_pm_exit(struct sd_lun *un) 9605 { 9606 ASSERT(!mutex_owned(SD_MUTEX(un))); 9607 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9608 9609 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9610 9611 /* 9612 * After attach the following flag is only read, so don't 9613 * take the penalty of acquiring a mutex for it. 9614 */ 9615 if (un->un_f_pm_is_enabled == TRUE) { 9616 9617 mutex_enter(&un->un_pm_mutex); 9618 un->un_pm_count--; 9619 9620 SD_TRACE(SD_LOG_IO_PM, un, 9621 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9622 9623 ASSERT(un->un_pm_count >= 0); 9624 if (un->un_pm_count == 0) { 9625 mutex_exit(&un->un_pm_mutex); 9626 9627 SD_TRACE(SD_LOG_IO_PM, un, 9628 "sd_pm_exit: idle component\n"); 9629 9630 (void) pm_idle_component(SD_DEVINFO(un), 0); 9631 9632 } else { 9633 mutex_exit(&un->un_pm_mutex); 9634 } 9635 } 9636 9637 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9638 } 9639 9640 9641 /* 9642 * Function: sdopen 9643 * 9644 * Description: Driver's open(9e) entry point function. 9645 * 9646 * Arguments: dev_i - pointer to device number 9647 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9648 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9649 * cred_p - user credential pointer 9650 * 9651 * Return Code: EINVAL 9652 * ENXIO 9653 * EIO 9654 * EROFS 9655 * EBUSY 9656 * 9657 * Context: Kernel thread context 9658 */ 9659 /* ARGSUSED */ 9660 static int 9661 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9662 { 9663 struct sd_lun *un; 9664 int nodelay; 9665 int part; 9666 uint64_t partmask; 9667 int instance; 9668 dev_t dev; 9669 int rval = EIO; 9670 diskaddr_t nblks = 0; 9671 diskaddr_t label_cap; 9672 9673 /* Validate the open type */ 9674 if (otyp >= OTYPCNT) { 9675 return (EINVAL); 9676 } 9677 9678 dev = *dev_p; 9679 instance = SDUNIT(dev); 9680 mutex_enter(&sd_detach_mutex); 9681 9682 /* 9683 * Fail the open if there is no softstate for the instance, or 9684 * if another thread somewhere is trying to detach the instance. 9685 */ 9686 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9687 (un->un_detach_count != 0)) { 9688 mutex_exit(&sd_detach_mutex); 9689 /* 9690 * The probe cache only needs to be cleared when open (9e) fails 9691 * with ENXIO (4238046). 9692 */ 9693 /* 9694 * un-conditionally clearing probe cache is ok with 9695 * separate sd/ssd binaries 9696 * x86 platform can be an issue with both parallel 9697 * and fibre in 1 binary 9698 */ 9699 sd_scsi_clear_probe_cache(); 9700 return (ENXIO); 9701 } 9702 9703 /* 9704 * The un_layer_count is to prevent another thread in specfs from 9705 * trying to detach the instance, which can happen when we are 9706 * called from a higher-layer driver instead of thru specfs. 9707 * This will not be needed when DDI provides a layered driver 9708 * interface that allows specfs to know that an instance is in 9709 * use by a layered driver & should not be detached. 9710 * 9711 * Note: the semantics for layered driver opens are exactly one 9712 * close for every open. 9713 */ 9714 if (otyp == OTYP_LYR) { 9715 un->un_layer_count++; 9716 } 9717 9718 /* 9719 * Keep a count of the current # of opens in progress. This is because 9720 * some layered drivers try to call us as a regular open. This can 9721 * cause problems that we cannot prevent, however by keeping this count 9722 * we can at least keep our open and detach routines from racing against 9723 * each other under such conditions. 9724 */ 9725 un->un_opens_in_progress++; 9726 mutex_exit(&sd_detach_mutex); 9727 9728 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9729 part = SDPART(dev); 9730 partmask = 1 << part; 9731 9732 /* 9733 * We use a semaphore here in order to serialize 9734 * open and close requests on the device. 9735 */ 9736 sema_p(&un->un_semoclose); 9737 9738 mutex_enter(SD_MUTEX(un)); 9739 9740 /* 9741 * All device accesses go thru sdstrategy() where we check 9742 * on suspend status but there could be a scsi_poll command, 9743 * which bypasses sdstrategy(), so we need to check pm 9744 * status. 9745 */ 9746 9747 if (!nodelay) { 9748 while ((un->un_state == SD_STATE_SUSPENDED) || 9749 (un->un_state == SD_STATE_PM_CHANGING)) { 9750 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9751 } 9752 9753 mutex_exit(SD_MUTEX(un)); 9754 if (sd_pm_entry(un) != DDI_SUCCESS) { 9755 rval = EIO; 9756 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9757 "sdopen: sd_pm_entry failed\n"); 9758 goto open_failed_with_pm; 9759 } 9760 mutex_enter(SD_MUTEX(un)); 9761 } 9762 9763 /* check for previous exclusive open */ 9764 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9765 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9766 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9767 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9768 9769 if (un->un_exclopen & (partmask)) { 9770 goto excl_open_fail; 9771 } 9772 9773 if (flag & FEXCL) { 9774 int i; 9775 if (un->un_ocmap.lyropen[part]) { 9776 goto excl_open_fail; 9777 } 9778 for (i = 0; i < (OTYPCNT - 1); i++) { 9779 if (un->un_ocmap.regopen[i] & (partmask)) { 9780 goto excl_open_fail; 9781 } 9782 } 9783 } 9784 9785 /* 9786 * Check the write permission if this is a removable media device, 9787 * NDELAY has not been set, and writable permission is requested. 9788 * 9789 * Note: If NDELAY was set and this is write-protected media the WRITE 9790 * attempt will fail with EIO as part of the I/O processing. This is a 9791 * more permissive implementation that allows the open to succeed and 9792 * WRITE attempts to fail when appropriate. 9793 */ 9794 if (un->un_f_chk_wp_open) { 9795 if ((flag & FWRITE) && (!nodelay)) { 9796 mutex_exit(SD_MUTEX(un)); 9797 /* 9798 * Defer the check for write permission on writable 9799 * DVD drive till sdstrategy and will not fail open even 9800 * if FWRITE is set as the device can be writable 9801 * depending upon the media and the media can change 9802 * after the call to open(). 9803 */ 9804 if (un->un_f_dvdram_writable_device == FALSE) { 9805 if (ISCD(un) || sr_check_wp(dev)) { 9806 rval = EROFS; 9807 mutex_enter(SD_MUTEX(un)); 9808 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9809 "write to cd or write protected media\n"); 9810 goto open_fail; 9811 } 9812 } 9813 mutex_enter(SD_MUTEX(un)); 9814 } 9815 } 9816 9817 /* 9818 * If opening in NDELAY/NONBLOCK mode, just return. 9819 * Check if disk is ready and has a valid geometry later. 9820 */ 9821 if (!nodelay) { 9822 sd_ssc_t *ssc; 9823 9824 mutex_exit(SD_MUTEX(un)); 9825 ssc = sd_ssc_init(un); 9826 rval = sd_ready_and_valid(ssc, part); 9827 sd_ssc_fini(ssc); 9828 mutex_enter(SD_MUTEX(un)); 9829 /* 9830 * Fail if device is not ready or if the number of disk 9831 * blocks is zero or negative for non CD devices. 9832 */ 9833 9834 nblks = 0; 9835 9836 if (rval == SD_READY_VALID && (!ISCD(un))) { 9837 /* if cmlb_partinfo fails, nblks remains 0 */ 9838 mutex_exit(SD_MUTEX(un)); 9839 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9840 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9841 mutex_enter(SD_MUTEX(un)); 9842 } 9843 9844 if ((rval != SD_READY_VALID) || 9845 (!ISCD(un) && nblks <= 0)) { 9846 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9847 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9848 "device not ready or invalid disk block value\n"); 9849 goto open_fail; 9850 } 9851 #if defined(__i386) || defined(__amd64) 9852 } else { 9853 uchar_t *cp; 9854 /* 9855 * x86 requires special nodelay handling, so that p0 is 9856 * always defined and accessible. 9857 * Invalidate geometry only if device is not already open. 9858 */ 9859 cp = &un->un_ocmap.chkd[0]; 9860 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9861 if (*cp != (uchar_t)0) { 9862 break; 9863 } 9864 cp++; 9865 } 9866 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9867 mutex_exit(SD_MUTEX(un)); 9868 cmlb_invalidate(un->un_cmlbhandle, 9869 (void *)SD_PATH_DIRECT); 9870 mutex_enter(SD_MUTEX(un)); 9871 } 9872 9873 #endif 9874 } 9875 9876 if (otyp == OTYP_LYR) { 9877 un->un_ocmap.lyropen[part]++; 9878 } else { 9879 un->un_ocmap.regopen[otyp] |= partmask; 9880 } 9881 9882 /* Set up open and exclusive open flags */ 9883 if (flag & FEXCL) { 9884 un->un_exclopen |= (partmask); 9885 } 9886 9887 /* 9888 * If the lun is EFI labeled and lun capacity is greater than the 9889 * capacity contained in the label, log a sys-event to notify the 9890 * interested module. 9891 * To avoid an infinite loop of logging sys-event, we only log the 9892 * event when the lun is not opened in NDELAY mode. The event handler 9893 * should open the lun in NDELAY mode. 9894 */ 9895 if (!(flag & FNDELAY)) { 9896 mutex_exit(SD_MUTEX(un)); 9897 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9898 (void*)SD_PATH_DIRECT) == 0) { 9899 mutex_enter(SD_MUTEX(un)); 9900 if (un->un_f_blockcount_is_valid && 9901 un->un_blockcount > label_cap) { 9902 mutex_exit(SD_MUTEX(un)); 9903 sd_log_lun_expansion_event(un, 9904 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9905 mutex_enter(SD_MUTEX(un)); 9906 } 9907 } else { 9908 mutex_enter(SD_MUTEX(un)); 9909 } 9910 } 9911 9912 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9913 "open of part %d type %d\n", part, otyp); 9914 9915 mutex_exit(SD_MUTEX(un)); 9916 if (!nodelay) { 9917 sd_pm_exit(un); 9918 } 9919 9920 sema_v(&un->un_semoclose); 9921 9922 mutex_enter(&sd_detach_mutex); 9923 un->un_opens_in_progress--; 9924 mutex_exit(&sd_detach_mutex); 9925 9926 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9927 return (DDI_SUCCESS); 9928 9929 excl_open_fail: 9930 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9931 rval = EBUSY; 9932 9933 open_fail: 9934 mutex_exit(SD_MUTEX(un)); 9935 9936 /* 9937 * On a failed open we must exit the pm management. 9938 */ 9939 if (!nodelay) { 9940 sd_pm_exit(un); 9941 } 9942 open_failed_with_pm: 9943 sema_v(&un->un_semoclose); 9944 9945 mutex_enter(&sd_detach_mutex); 9946 un->un_opens_in_progress--; 9947 if (otyp == OTYP_LYR) { 9948 un->un_layer_count--; 9949 } 9950 mutex_exit(&sd_detach_mutex); 9951 9952 return (rval); 9953 } 9954 9955 9956 /* 9957 * Function: sdclose 9958 * 9959 * Description: Driver's close(9e) entry point function. 9960 * 9961 * Arguments: dev - device number 9962 * flag - file status flag, informational only 9963 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9964 * cred_p - user credential pointer 9965 * 9966 * Return Code: ENXIO 9967 * 9968 * Context: Kernel thread context 9969 */ 9970 /* ARGSUSED */ 9971 static int 9972 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9973 { 9974 struct sd_lun *un; 9975 uchar_t *cp; 9976 int part; 9977 int nodelay; 9978 int rval = 0; 9979 9980 /* Validate the open type */ 9981 if (otyp >= OTYPCNT) { 9982 return (ENXIO); 9983 } 9984 9985 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9986 return (ENXIO); 9987 } 9988 9989 part = SDPART(dev); 9990 nodelay = flag & (FNDELAY | FNONBLOCK); 9991 9992 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9993 "sdclose: close of part %d type %d\n", part, otyp); 9994 9995 /* 9996 * We use a semaphore here in order to serialize 9997 * open and close requests on the device. 9998 */ 9999 sema_p(&un->un_semoclose); 10000 10001 mutex_enter(SD_MUTEX(un)); 10002 10003 /* Don't proceed if power is being changed. */ 10004 while (un->un_state == SD_STATE_PM_CHANGING) { 10005 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10006 } 10007 10008 if (un->un_exclopen & (1 << part)) { 10009 un->un_exclopen &= ~(1 << part); 10010 } 10011 10012 /* Update the open partition map */ 10013 if (otyp == OTYP_LYR) { 10014 un->un_ocmap.lyropen[part] -= 1; 10015 } else { 10016 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10017 } 10018 10019 cp = &un->un_ocmap.chkd[0]; 10020 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10021 if (*cp != NULL) { 10022 break; 10023 } 10024 cp++; 10025 } 10026 10027 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10028 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10029 10030 /* 10031 * We avoid persistance upon the last close, and set 10032 * the throttle back to the maximum. 10033 */ 10034 un->un_throttle = un->un_saved_throttle; 10035 10036 if (un->un_state == SD_STATE_OFFLINE) { 10037 if (un->un_f_is_fibre == FALSE) { 10038 scsi_log(SD_DEVINFO(un), sd_label, 10039 CE_WARN, "offline\n"); 10040 } 10041 mutex_exit(SD_MUTEX(un)); 10042 cmlb_invalidate(un->un_cmlbhandle, 10043 (void *)SD_PATH_DIRECT); 10044 mutex_enter(SD_MUTEX(un)); 10045 10046 } else { 10047 /* 10048 * Flush any outstanding writes in NVRAM cache. 10049 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10050 * cmd, it may not work for non-Pluto devices. 10051 * SYNCHRONIZE CACHE is not required for removables, 10052 * except DVD-RAM drives. 10053 * 10054 * Also note: because SYNCHRONIZE CACHE is currently 10055 * the only command issued here that requires the 10056 * drive be powered up, only do the power up before 10057 * sending the Sync Cache command. If additional 10058 * commands are added which require a powered up 10059 * drive, the following sequence may have to change. 10060 * 10061 * And finally, note that parallel SCSI on SPARC 10062 * only issues a Sync Cache to DVD-RAM, a newly 10063 * supported device. 10064 */ 10065 #if defined(__i386) || defined(__amd64) 10066 if ((un->un_f_sync_cache_supported && 10067 un->un_f_sync_cache_required) || 10068 un->un_f_dvdram_writable_device == TRUE) { 10069 #else 10070 if (un->un_f_dvdram_writable_device == TRUE) { 10071 #endif 10072 mutex_exit(SD_MUTEX(un)); 10073 if (sd_pm_entry(un) == DDI_SUCCESS) { 10074 rval = 10075 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10076 NULL); 10077 /* ignore error if not supported */ 10078 if (rval == ENOTSUP) { 10079 rval = 0; 10080 } else if (rval != 0) { 10081 rval = EIO; 10082 } 10083 sd_pm_exit(un); 10084 } else { 10085 rval = EIO; 10086 } 10087 mutex_enter(SD_MUTEX(un)); 10088 } 10089 10090 /* 10091 * For devices which supports DOOR_LOCK, send an ALLOW 10092 * MEDIA REMOVAL command, but don't get upset if it 10093 * fails. We need to raise the power of the drive before 10094 * we can call sd_send_scsi_DOORLOCK() 10095 */ 10096 if (un->un_f_doorlock_supported) { 10097 mutex_exit(SD_MUTEX(un)); 10098 if (sd_pm_entry(un) == DDI_SUCCESS) { 10099 sd_ssc_t *ssc; 10100 10101 ssc = sd_ssc_init(un); 10102 rval = sd_send_scsi_DOORLOCK(ssc, 10103 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10104 if (rval != 0) 10105 sd_ssc_assessment(ssc, 10106 SD_FMT_IGNORE); 10107 sd_ssc_fini(ssc); 10108 10109 sd_pm_exit(un); 10110 if (ISCD(un) && (rval != 0) && 10111 (nodelay != 0)) { 10112 rval = ENXIO; 10113 } 10114 } else { 10115 rval = EIO; 10116 } 10117 mutex_enter(SD_MUTEX(un)); 10118 } 10119 10120 /* 10121 * If a device has removable media, invalidate all 10122 * parameters related to media, such as geometry, 10123 * blocksize, and blockcount. 10124 */ 10125 if (un->un_f_has_removable_media) { 10126 sr_ejected(un); 10127 } 10128 10129 /* 10130 * Destroy the cache (if it exists) which was 10131 * allocated for the write maps since this is 10132 * the last close for this media. 10133 */ 10134 if (un->un_wm_cache) { 10135 /* 10136 * Check if there are pending commands. 10137 * and if there are give a warning and 10138 * do not destroy the cache. 10139 */ 10140 if (un->un_ncmds_in_driver > 0) { 10141 scsi_log(SD_DEVINFO(un), 10142 sd_label, CE_WARN, 10143 "Unable to clean up memory " 10144 "because of pending I/O\n"); 10145 } else { 10146 kmem_cache_destroy( 10147 un->un_wm_cache); 10148 un->un_wm_cache = NULL; 10149 } 10150 } 10151 } 10152 } 10153 10154 mutex_exit(SD_MUTEX(un)); 10155 sema_v(&un->un_semoclose); 10156 10157 if (otyp == OTYP_LYR) { 10158 mutex_enter(&sd_detach_mutex); 10159 /* 10160 * The detach routine may run when the layer count 10161 * drops to zero. 10162 */ 10163 un->un_layer_count--; 10164 mutex_exit(&sd_detach_mutex); 10165 } 10166 10167 return (rval); 10168 } 10169 10170 10171 /* 10172 * Function: sd_ready_and_valid 10173 * 10174 * Description: Test if device is ready and has a valid geometry. 10175 * 10176 * Arguments: ssc - sd_ssc_t will contain un 10177 * un - driver soft state (unit) structure 10178 * 10179 * Return Code: SD_READY_VALID ready and valid label 10180 * SD_NOT_READY_VALID not ready, no label 10181 * SD_RESERVED_BY_OTHERS reservation conflict 10182 * 10183 * Context: Never called at interrupt context. 10184 */ 10185 10186 static int 10187 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10188 { 10189 struct sd_errstats *stp; 10190 uint64_t capacity; 10191 uint_t lbasize; 10192 int rval = SD_READY_VALID; 10193 char name_str[48]; 10194 boolean_t is_valid; 10195 struct sd_lun *un; 10196 int status; 10197 10198 ASSERT(ssc != NULL); 10199 un = ssc->ssc_un; 10200 ASSERT(un != NULL); 10201 ASSERT(!mutex_owned(SD_MUTEX(un))); 10202 10203 mutex_enter(SD_MUTEX(un)); 10204 /* 10205 * If a device has removable media, we must check if media is 10206 * ready when checking if this device is ready and valid. 10207 */ 10208 if (un->un_f_has_removable_media) { 10209 mutex_exit(SD_MUTEX(un)); 10210 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10211 10212 if (status != 0) { 10213 rval = SD_NOT_READY_VALID; 10214 mutex_enter(SD_MUTEX(un)); 10215 10216 /* Ignore all failed status for removalbe media */ 10217 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10218 10219 goto done; 10220 } 10221 10222 is_valid = SD_IS_VALID_LABEL(un); 10223 mutex_enter(SD_MUTEX(un)); 10224 if (!is_valid || 10225 (un->un_f_blockcount_is_valid == FALSE) || 10226 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10227 10228 /* capacity has to be read every open. */ 10229 mutex_exit(SD_MUTEX(un)); 10230 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10231 &lbasize, SD_PATH_DIRECT); 10232 10233 if (status != 0) { 10234 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10235 10236 cmlb_invalidate(un->un_cmlbhandle, 10237 (void *)SD_PATH_DIRECT); 10238 mutex_enter(SD_MUTEX(un)); 10239 rval = SD_NOT_READY_VALID; 10240 10241 goto done; 10242 } else { 10243 mutex_enter(SD_MUTEX(un)); 10244 sd_update_block_info(un, lbasize, capacity); 10245 } 10246 } 10247 10248 /* 10249 * Check if the media in the device is writable or not. 10250 */ 10251 if (!is_valid && ISCD(un)) { 10252 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10253 } 10254 10255 } else { 10256 /* 10257 * Do a test unit ready to clear any unit attention from non-cd 10258 * devices. 10259 */ 10260 mutex_exit(SD_MUTEX(un)); 10261 10262 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10263 if (status != 0) { 10264 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10265 } 10266 10267 mutex_enter(SD_MUTEX(un)); 10268 } 10269 10270 10271 /* 10272 * If this is a non 512 block device, allocate space for 10273 * the wmap cache. This is being done here since every time 10274 * a media is changed this routine will be called and the 10275 * block size is a function of media rather than device. 10276 */ 10277 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10278 if (!(un->un_wm_cache)) { 10279 (void) snprintf(name_str, sizeof (name_str), 10280 "%s%d_cache", 10281 ddi_driver_name(SD_DEVINFO(un)), 10282 ddi_get_instance(SD_DEVINFO(un))); 10283 un->un_wm_cache = kmem_cache_create( 10284 name_str, sizeof (struct sd_w_map), 10285 8, sd_wm_cache_constructor, 10286 sd_wm_cache_destructor, NULL, 10287 (void *)un, NULL, 0); 10288 if (!(un->un_wm_cache)) { 10289 rval = ENOMEM; 10290 goto done; 10291 } 10292 } 10293 } 10294 10295 if (un->un_state == SD_STATE_NORMAL) { 10296 /* 10297 * If the target is not yet ready here (defined by a TUR 10298 * failure), invalidate the geometry and print an 'offline' 10299 * message. This is a legacy message, as the state of the 10300 * target is not actually changed to SD_STATE_OFFLINE. 10301 * 10302 * If the TUR fails for EACCES (Reservation Conflict), 10303 * SD_RESERVED_BY_OTHERS will be returned to indicate 10304 * reservation conflict. If the TUR fails for other 10305 * reasons, SD_NOT_READY_VALID will be returned. 10306 */ 10307 int err; 10308 10309 mutex_exit(SD_MUTEX(un)); 10310 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10311 mutex_enter(SD_MUTEX(un)); 10312 10313 if (err != 0) { 10314 mutex_exit(SD_MUTEX(un)); 10315 cmlb_invalidate(un->un_cmlbhandle, 10316 (void *)SD_PATH_DIRECT); 10317 mutex_enter(SD_MUTEX(un)); 10318 if (err == EACCES) { 10319 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10320 "reservation conflict\n"); 10321 rval = SD_RESERVED_BY_OTHERS; 10322 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10323 } else { 10324 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10325 "drive offline\n"); 10326 rval = SD_NOT_READY_VALID; 10327 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10328 } 10329 goto done; 10330 } 10331 } 10332 10333 if (un->un_f_format_in_progress == FALSE) { 10334 mutex_exit(SD_MUTEX(un)); 10335 10336 (void) cmlb_validate(un->un_cmlbhandle, 0, 10337 (void *)SD_PATH_DIRECT); 10338 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10339 NULL, (void *) SD_PATH_DIRECT) != 0) { 10340 rval = SD_NOT_READY_VALID; 10341 mutex_enter(SD_MUTEX(un)); 10342 10343 goto done; 10344 } 10345 if (un->un_f_pkstats_enabled) { 10346 sd_set_pstats(un); 10347 SD_TRACE(SD_LOG_IO_PARTITION, un, 10348 "sd_ready_and_valid: un:0x%p pstats created and " 10349 "set\n", un); 10350 } 10351 mutex_enter(SD_MUTEX(un)); 10352 } 10353 10354 /* 10355 * If this device supports DOOR_LOCK command, try and send 10356 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10357 * if it fails. For a CD, however, it is an error 10358 */ 10359 if (un->un_f_doorlock_supported) { 10360 mutex_exit(SD_MUTEX(un)); 10361 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10362 SD_PATH_DIRECT); 10363 10364 if ((status != 0) && ISCD(un)) { 10365 rval = SD_NOT_READY_VALID; 10366 mutex_enter(SD_MUTEX(un)); 10367 10368 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10369 10370 goto done; 10371 } else if (status != 0) 10372 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10373 mutex_enter(SD_MUTEX(un)); 10374 } 10375 10376 /* The state has changed, inform the media watch routines */ 10377 un->un_mediastate = DKIO_INSERTED; 10378 cv_broadcast(&un->un_state_cv); 10379 rval = SD_READY_VALID; 10380 10381 done: 10382 10383 /* 10384 * Initialize the capacity kstat value, if no media previously 10385 * (capacity kstat is 0) and a media has been inserted 10386 * (un_blockcount > 0). 10387 */ 10388 if (un->un_errstats != NULL) { 10389 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10390 if ((stp->sd_capacity.value.ui64 == 0) && 10391 (un->un_f_blockcount_is_valid == TRUE)) { 10392 stp->sd_capacity.value.ui64 = 10393 (uint64_t)((uint64_t)un->un_blockcount * 10394 un->un_sys_blocksize); 10395 } 10396 } 10397 10398 mutex_exit(SD_MUTEX(un)); 10399 return (rval); 10400 } 10401 10402 10403 /* 10404 * Function: sdmin 10405 * 10406 * Description: Routine to limit the size of a data transfer. Used in 10407 * conjunction with physio(9F). 10408 * 10409 * Arguments: bp - pointer to the indicated buf(9S) struct. 10410 * 10411 * Context: Kernel thread context. 10412 */ 10413 10414 static void 10415 sdmin(struct buf *bp) 10416 { 10417 struct sd_lun *un; 10418 int instance; 10419 10420 instance = SDUNIT(bp->b_edev); 10421 10422 un = ddi_get_soft_state(sd_state, instance); 10423 ASSERT(un != NULL); 10424 10425 /* 10426 * We depend on DMA partial or buf breakup to restrict 10427 * IO size if any of them enabled. 10428 */ 10429 if (un->un_partial_dma_supported || 10430 un->un_buf_breakup_supported) { 10431 return; 10432 } 10433 10434 if (bp->b_bcount > un->un_max_xfer_size) { 10435 bp->b_bcount = un->un_max_xfer_size; 10436 } 10437 } 10438 10439 10440 /* 10441 * Function: sdread 10442 * 10443 * Description: Driver's read(9e) entry point function. 10444 * 10445 * Arguments: dev - device number 10446 * uio - structure pointer describing where data is to be stored 10447 * in user's space 10448 * cred_p - user credential pointer 10449 * 10450 * Return Code: ENXIO 10451 * EIO 10452 * EINVAL 10453 * value returned by physio 10454 * 10455 * Context: Kernel thread context. 10456 */ 10457 /* ARGSUSED */ 10458 static int 10459 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10460 { 10461 struct sd_lun *un = NULL; 10462 int secmask; 10463 int err = 0; 10464 sd_ssc_t *ssc; 10465 10466 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10467 return (ENXIO); 10468 } 10469 10470 ASSERT(!mutex_owned(SD_MUTEX(un))); 10471 10472 10473 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10474 mutex_enter(SD_MUTEX(un)); 10475 /* 10476 * Because the call to sd_ready_and_valid will issue I/O we 10477 * must wait here if either the device is suspended or 10478 * if it's power level is changing. 10479 */ 10480 while ((un->un_state == SD_STATE_SUSPENDED) || 10481 (un->un_state == SD_STATE_PM_CHANGING)) { 10482 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10483 } 10484 un->un_ncmds_in_driver++; 10485 mutex_exit(SD_MUTEX(un)); 10486 10487 /* Initialize sd_ssc_t for internal uscsi commands */ 10488 ssc = sd_ssc_init(un); 10489 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10490 err = EIO; 10491 } else { 10492 err = 0; 10493 } 10494 sd_ssc_fini(ssc); 10495 10496 mutex_enter(SD_MUTEX(un)); 10497 un->un_ncmds_in_driver--; 10498 ASSERT(un->un_ncmds_in_driver >= 0); 10499 mutex_exit(SD_MUTEX(un)); 10500 if (err != 0) 10501 return (err); 10502 } 10503 10504 /* 10505 * Read requests are restricted to multiples of the system block size. 10506 */ 10507 secmask = un->un_sys_blocksize - 1; 10508 10509 if (uio->uio_loffset & ((offset_t)(secmask))) { 10510 SD_ERROR(SD_LOG_READ_WRITE, un, 10511 "sdread: file offset not modulo %d\n", 10512 un->un_sys_blocksize); 10513 err = EINVAL; 10514 } else if (uio->uio_iov->iov_len & (secmask)) { 10515 SD_ERROR(SD_LOG_READ_WRITE, un, 10516 "sdread: transfer length not modulo %d\n", 10517 un->un_sys_blocksize); 10518 err = EINVAL; 10519 } else { 10520 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10521 } 10522 10523 return (err); 10524 } 10525 10526 10527 /* 10528 * Function: sdwrite 10529 * 10530 * Description: Driver's write(9e) entry point function. 10531 * 10532 * Arguments: dev - device number 10533 * uio - structure pointer describing where data is stored in 10534 * user's space 10535 * cred_p - user credential pointer 10536 * 10537 * Return Code: ENXIO 10538 * EIO 10539 * EINVAL 10540 * value returned by physio 10541 * 10542 * Context: Kernel thread context. 10543 */ 10544 /* ARGSUSED */ 10545 static int 10546 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10547 { 10548 struct sd_lun *un = NULL; 10549 int secmask; 10550 int err = 0; 10551 sd_ssc_t *ssc; 10552 10553 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10554 return (ENXIO); 10555 } 10556 10557 ASSERT(!mutex_owned(SD_MUTEX(un))); 10558 10559 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10560 mutex_enter(SD_MUTEX(un)); 10561 /* 10562 * Because the call to sd_ready_and_valid will issue I/O we 10563 * must wait here if either the device is suspended or 10564 * if it's power level is changing. 10565 */ 10566 while ((un->un_state == SD_STATE_SUSPENDED) || 10567 (un->un_state == SD_STATE_PM_CHANGING)) { 10568 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10569 } 10570 un->un_ncmds_in_driver++; 10571 mutex_exit(SD_MUTEX(un)); 10572 10573 /* Initialize sd_ssc_t for internal uscsi commands */ 10574 ssc = sd_ssc_init(un); 10575 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10576 err = EIO; 10577 } else { 10578 err = 0; 10579 } 10580 sd_ssc_fini(ssc); 10581 10582 mutex_enter(SD_MUTEX(un)); 10583 un->un_ncmds_in_driver--; 10584 ASSERT(un->un_ncmds_in_driver >= 0); 10585 mutex_exit(SD_MUTEX(un)); 10586 if (err != 0) 10587 return (err); 10588 } 10589 10590 /* 10591 * Write requests are restricted to multiples of the system block size. 10592 */ 10593 secmask = un->un_sys_blocksize - 1; 10594 10595 if (uio->uio_loffset & ((offset_t)(secmask))) { 10596 SD_ERROR(SD_LOG_READ_WRITE, un, 10597 "sdwrite: file offset not modulo %d\n", 10598 un->un_sys_blocksize); 10599 err = EINVAL; 10600 } else if (uio->uio_iov->iov_len & (secmask)) { 10601 SD_ERROR(SD_LOG_READ_WRITE, un, 10602 "sdwrite: transfer length not modulo %d\n", 10603 un->un_sys_blocksize); 10604 err = EINVAL; 10605 } else { 10606 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10607 } 10608 10609 return (err); 10610 } 10611 10612 10613 /* 10614 * Function: sdaread 10615 * 10616 * Description: Driver's aread(9e) entry point function. 10617 * 10618 * Arguments: dev - device number 10619 * aio - structure pointer describing where data is to be stored 10620 * cred_p - user credential pointer 10621 * 10622 * Return Code: ENXIO 10623 * EIO 10624 * EINVAL 10625 * value returned by aphysio 10626 * 10627 * Context: Kernel thread context. 10628 */ 10629 /* ARGSUSED */ 10630 static int 10631 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10632 { 10633 struct sd_lun *un = NULL; 10634 struct uio *uio = aio->aio_uio; 10635 int secmask; 10636 int err = 0; 10637 sd_ssc_t *ssc; 10638 10639 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10640 return (ENXIO); 10641 } 10642 10643 ASSERT(!mutex_owned(SD_MUTEX(un))); 10644 10645 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10646 mutex_enter(SD_MUTEX(un)); 10647 /* 10648 * Because the call to sd_ready_and_valid will issue I/O we 10649 * must wait here if either the device is suspended or 10650 * if it's power level is changing. 10651 */ 10652 while ((un->un_state == SD_STATE_SUSPENDED) || 10653 (un->un_state == SD_STATE_PM_CHANGING)) { 10654 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10655 } 10656 un->un_ncmds_in_driver++; 10657 mutex_exit(SD_MUTEX(un)); 10658 10659 /* Initialize sd_ssc_t for internal uscsi commands */ 10660 ssc = sd_ssc_init(un); 10661 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10662 err = EIO; 10663 } else { 10664 err = 0; 10665 } 10666 sd_ssc_fini(ssc); 10667 10668 mutex_enter(SD_MUTEX(un)); 10669 un->un_ncmds_in_driver--; 10670 ASSERT(un->un_ncmds_in_driver >= 0); 10671 mutex_exit(SD_MUTEX(un)); 10672 if (err != 0) 10673 return (err); 10674 } 10675 10676 /* 10677 * Read requests are restricted to multiples of the system block size. 10678 */ 10679 secmask = un->un_sys_blocksize - 1; 10680 10681 if (uio->uio_loffset & ((offset_t)(secmask))) { 10682 SD_ERROR(SD_LOG_READ_WRITE, un, 10683 "sdaread: file offset not modulo %d\n", 10684 un->un_sys_blocksize); 10685 err = EINVAL; 10686 } else if (uio->uio_iov->iov_len & (secmask)) { 10687 SD_ERROR(SD_LOG_READ_WRITE, un, 10688 "sdaread: transfer length not modulo %d\n", 10689 un->un_sys_blocksize); 10690 err = EINVAL; 10691 } else { 10692 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10693 } 10694 10695 return (err); 10696 } 10697 10698 10699 /* 10700 * Function: sdawrite 10701 * 10702 * Description: Driver's awrite(9e) entry point function. 10703 * 10704 * Arguments: dev - device number 10705 * aio - structure pointer describing where data is stored 10706 * cred_p - user credential pointer 10707 * 10708 * Return Code: ENXIO 10709 * EIO 10710 * EINVAL 10711 * value returned by aphysio 10712 * 10713 * Context: Kernel thread context. 10714 */ 10715 /* ARGSUSED */ 10716 static int 10717 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10718 { 10719 struct sd_lun *un = NULL; 10720 struct uio *uio = aio->aio_uio; 10721 int secmask; 10722 int err = 0; 10723 sd_ssc_t *ssc; 10724 10725 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10726 return (ENXIO); 10727 } 10728 10729 ASSERT(!mutex_owned(SD_MUTEX(un))); 10730 10731 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10732 mutex_enter(SD_MUTEX(un)); 10733 /* 10734 * Because the call to sd_ready_and_valid will issue I/O we 10735 * must wait here if either the device is suspended or 10736 * if it's power level is changing. 10737 */ 10738 while ((un->un_state == SD_STATE_SUSPENDED) || 10739 (un->un_state == SD_STATE_PM_CHANGING)) { 10740 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10741 } 10742 un->un_ncmds_in_driver++; 10743 mutex_exit(SD_MUTEX(un)); 10744 10745 /* Initialize sd_ssc_t for internal uscsi commands */ 10746 ssc = sd_ssc_init(un); 10747 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10748 err = EIO; 10749 } else { 10750 err = 0; 10751 } 10752 sd_ssc_fini(ssc); 10753 10754 mutex_enter(SD_MUTEX(un)); 10755 un->un_ncmds_in_driver--; 10756 ASSERT(un->un_ncmds_in_driver >= 0); 10757 mutex_exit(SD_MUTEX(un)); 10758 if (err != 0) 10759 return (err); 10760 } 10761 10762 /* 10763 * Write requests are restricted to multiples of the system block size. 10764 */ 10765 secmask = un->un_sys_blocksize - 1; 10766 10767 if (uio->uio_loffset & ((offset_t)(secmask))) { 10768 SD_ERROR(SD_LOG_READ_WRITE, un, 10769 "sdawrite: file offset not modulo %d\n", 10770 un->un_sys_blocksize); 10771 err = EINVAL; 10772 } else if (uio->uio_iov->iov_len & (secmask)) { 10773 SD_ERROR(SD_LOG_READ_WRITE, un, 10774 "sdawrite: transfer length not modulo %d\n", 10775 un->un_sys_blocksize); 10776 err = EINVAL; 10777 } else { 10778 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10779 } 10780 10781 return (err); 10782 } 10783 10784 10785 10786 10787 10788 /* 10789 * Driver IO processing follows the following sequence: 10790 * 10791 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10792 * | | ^ 10793 * v v | 10794 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10795 * | | | | 10796 * v | | | 10797 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10798 * | | ^ ^ 10799 * v v | | 10800 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10801 * | | | | 10802 * +---+ | +------------+ +-------+ 10803 * | | | | 10804 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10805 * | v | | 10806 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10807 * | | ^ | 10808 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10809 * | v | | 10810 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10811 * | | ^ | 10812 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10813 * | v | | 10814 * | sd_checksum_iostart() sd_checksum_iodone() | 10815 * | | ^ | 10816 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10817 * | v | | 10818 * | sd_pm_iostart() sd_pm_iodone() | 10819 * | | ^ | 10820 * | | | | 10821 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10822 * | ^ 10823 * v | 10824 * sd_core_iostart() | 10825 * | | 10826 * | +------>(*destroypkt)() 10827 * +-> sd_start_cmds() <-+ | | 10828 * | | | v 10829 * | | | scsi_destroy_pkt(9F) 10830 * | | | 10831 * +->(*initpkt)() +- sdintr() 10832 * | | | | 10833 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10834 * | +-> scsi_setup_cdb(9F) | 10835 * | | 10836 * +--> scsi_transport(9F) | 10837 * | | 10838 * +----> SCSA ---->+ 10839 * 10840 * 10841 * This code is based upon the following presumptions: 10842 * 10843 * - iostart and iodone functions operate on buf(9S) structures. These 10844 * functions perform the necessary operations on the buf(9S) and pass 10845 * them along to the next function in the chain by using the macros 10846 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10847 * (for iodone side functions). 10848 * 10849 * - The iostart side functions may sleep. The iodone side functions 10850 * are called under interrupt context and may NOT sleep. Therefore 10851 * iodone side functions also may not call iostart side functions. 10852 * (NOTE: iostart side functions should NOT sleep for memory, as 10853 * this could result in deadlock.) 10854 * 10855 * - An iostart side function may call its corresponding iodone side 10856 * function directly (if necessary). 10857 * 10858 * - In the event of an error, an iostart side function can return a buf(9S) 10859 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10860 * b_error in the usual way of course). 10861 * 10862 * - The taskq mechanism may be used by the iodone side functions to dispatch 10863 * requests to the iostart side functions. The iostart side functions in 10864 * this case would be called under the context of a taskq thread, so it's 10865 * OK for them to block/sleep/spin in this case. 10866 * 10867 * - iostart side functions may allocate "shadow" buf(9S) structs and 10868 * pass them along to the next function in the chain. The corresponding 10869 * iodone side functions must coalesce the "shadow" bufs and return 10870 * the "original" buf to the next higher layer. 10871 * 10872 * - The b_private field of the buf(9S) struct holds a pointer to 10873 * an sd_xbuf struct, which contains information needed to 10874 * construct the scsi_pkt for the command. 10875 * 10876 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10877 * layer must acquire & release the SD_MUTEX(un) as needed. 10878 */ 10879 10880 10881 /* 10882 * Create taskq for all targets in the system. This is created at 10883 * _init(9E) and destroyed at _fini(9E). 10884 * 10885 * Note: here we set the minalloc to a reasonably high number to ensure that 10886 * we will have an adequate supply of task entries available at interrupt time. 10887 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10888 * sd_create_taskq(). Since we do not want to sleep for allocations at 10889 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10890 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10891 * requests any one instant in time. 10892 */ 10893 #define SD_TASKQ_NUMTHREADS 8 10894 #define SD_TASKQ_MINALLOC 256 10895 #define SD_TASKQ_MAXALLOC 256 10896 10897 static taskq_t *sd_tq = NULL; 10898 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10899 10900 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10901 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10902 10903 /* 10904 * The following task queue is being created for the write part of 10905 * read-modify-write of non-512 block size devices. 10906 * Limit the number of threads to 1 for now. This number has been chosen 10907 * considering the fact that it applies only to dvd ram drives/MO drives 10908 * currently. Performance for which is not main criteria at this stage. 10909 * Note: It needs to be explored if we can use a single taskq in future 10910 */ 10911 #define SD_WMR_TASKQ_NUMTHREADS 1 10912 static taskq_t *sd_wmr_tq = NULL; 10913 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10914 10915 /* 10916 * Function: sd_taskq_create 10917 * 10918 * Description: Create taskq thread(s) and preallocate task entries 10919 * 10920 * Return Code: Returns a pointer to the allocated taskq_t. 10921 * 10922 * Context: Can sleep. Requires blockable context. 10923 * 10924 * Notes: - The taskq() facility currently is NOT part of the DDI. 10925 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10926 * - taskq_create() will block for memory, also it will panic 10927 * if it cannot create the requested number of threads. 10928 * - Currently taskq_create() creates threads that cannot be 10929 * swapped. 10930 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10931 * supply of taskq entries at interrupt time (ie, so that we 10932 * do not have to sleep for memory) 10933 */ 10934 10935 static void 10936 sd_taskq_create(void) 10937 { 10938 char taskq_name[TASKQ_NAMELEN]; 10939 10940 ASSERT(sd_tq == NULL); 10941 ASSERT(sd_wmr_tq == NULL); 10942 10943 (void) snprintf(taskq_name, sizeof (taskq_name), 10944 "%s_drv_taskq", sd_label); 10945 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10946 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10947 TASKQ_PREPOPULATE)); 10948 10949 (void) snprintf(taskq_name, sizeof (taskq_name), 10950 "%s_rmw_taskq", sd_label); 10951 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10952 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10953 TASKQ_PREPOPULATE)); 10954 } 10955 10956 10957 /* 10958 * Function: sd_taskq_delete 10959 * 10960 * Description: Complementary cleanup routine for sd_taskq_create(). 10961 * 10962 * Context: Kernel thread context. 10963 */ 10964 10965 static void 10966 sd_taskq_delete(void) 10967 { 10968 ASSERT(sd_tq != NULL); 10969 ASSERT(sd_wmr_tq != NULL); 10970 taskq_destroy(sd_tq); 10971 taskq_destroy(sd_wmr_tq); 10972 sd_tq = NULL; 10973 sd_wmr_tq = NULL; 10974 } 10975 10976 10977 /* 10978 * Function: sdstrategy 10979 * 10980 * Description: Driver's strategy (9E) entry point function. 10981 * 10982 * Arguments: bp - pointer to buf(9S) 10983 * 10984 * Return Code: Always returns zero 10985 * 10986 * Context: Kernel thread context. 10987 */ 10988 10989 static int 10990 sdstrategy(struct buf *bp) 10991 { 10992 struct sd_lun *un; 10993 10994 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10995 if (un == NULL) { 10996 bioerror(bp, EIO); 10997 bp->b_resid = bp->b_bcount; 10998 biodone(bp); 10999 return (0); 11000 } 11001 /* As was done in the past, fail new cmds. if state is dumping. */ 11002 if (un->un_state == SD_STATE_DUMPING) { 11003 bioerror(bp, ENXIO); 11004 bp->b_resid = bp->b_bcount; 11005 biodone(bp); 11006 return (0); 11007 } 11008 11009 ASSERT(!mutex_owned(SD_MUTEX(un))); 11010 11011 /* 11012 * Commands may sneak in while we released the mutex in 11013 * DDI_SUSPEND, we should block new commands. However, old 11014 * commands that are still in the driver at this point should 11015 * still be allowed to drain. 11016 */ 11017 mutex_enter(SD_MUTEX(un)); 11018 /* 11019 * Must wait here if either the device is suspended or 11020 * if it's power level is changing. 11021 */ 11022 while ((un->un_state == SD_STATE_SUSPENDED) || 11023 (un->un_state == SD_STATE_PM_CHANGING)) { 11024 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11025 } 11026 11027 un->un_ncmds_in_driver++; 11028 11029 /* 11030 * atapi: Since we are running the CD for now in PIO mode we need to 11031 * call bp_mapin here to avoid bp_mapin called interrupt context under 11032 * the HBA's init_pkt routine. 11033 */ 11034 if (un->un_f_cfg_is_atapi == TRUE) { 11035 mutex_exit(SD_MUTEX(un)); 11036 bp_mapin(bp); 11037 mutex_enter(SD_MUTEX(un)); 11038 } 11039 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11040 un->un_ncmds_in_driver); 11041 11042 if (bp->b_flags & B_WRITE) 11043 un->un_f_sync_cache_required = TRUE; 11044 11045 mutex_exit(SD_MUTEX(un)); 11046 11047 /* 11048 * This will (eventually) allocate the sd_xbuf area and 11049 * call sd_xbuf_strategy(). We just want to return the 11050 * result of ddi_xbuf_qstrategy so that we have an opt- 11051 * imized tail call which saves us a stack frame. 11052 */ 11053 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11054 } 11055 11056 11057 /* 11058 * Function: sd_xbuf_strategy 11059 * 11060 * Description: Function for initiating IO operations via the 11061 * ddi_xbuf_qstrategy() mechanism. 11062 * 11063 * Context: Kernel thread context. 11064 */ 11065 11066 static void 11067 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11068 { 11069 struct sd_lun *un = arg; 11070 11071 ASSERT(bp != NULL); 11072 ASSERT(xp != NULL); 11073 ASSERT(un != NULL); 11074 ASSERT(!mutex_owned(SD_MUTEX(un))); 11075 11076 /* 11077 * Initialize the fields in the xbuf and save a pointer to the 11078 * xbuf in bp->b_private. 11079 */ 11080 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11081 11082 /* Send the buf down the iostart chain */ 11083 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11084 } 11085 11086 11087 /* 11088 * Function: sd_xbuf_init 11089 * 11090 * Description: Prepare the given sd_xbuf struct for use. 11091 * 11092 * Arguments: un - ptr to softstate 11093 * bp - ptr to associated buf(9S) 11094 * xp - ptr to associated sd_xbuf 11095 * chain_type - IO chain type to use: 11096 * SD_CHAIN_NULL 11097 * SD_CHAIN_BUFIO 11098 * SD_CHAIN_USCSI 11099 * SD_CHAIN_DIRECT 11100 * SD_CHAIN_DIRECT_PRIORITY 11101 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11102 * initialization; may be NULL if none. 11103 * 11104 * Context: Kernel thread context 11105 */ 11106 11107 static void 11108 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11109 uchar_t chain_type, void *pktinfop) 11110 { 11111 int index; 11112 11113 ASSERT(un != NULL); 11114 ASSERT(bp != NULL); 11115 ASSERT(xp != NULL); 11116 11117 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11118 bp, chain_type); 11119 11120 xp->xb_un = un; 11121 xp->xb_pktp = NULL; 11122 xp->xb_pktinfo = pktinfop; 11123 xp->xb_private = bp->b_private; 11124 xp->xb_blkno = (daddr_t)bp->b_blkno; 11125 11126 /* 11127 * Set up the iostart and iodone chain indexes in the xbuf, based 11128 * upon the specified chain type to use. 11129 */ 11130 switch (chain_type) { 11131 case SD_CHAIN_NULL: 11132 /* 11133 * Fall thru to just use the values for the buf type, even 11134 * tho for the NULL chain these values will never be used. 11135 */ 11136 /* FALLTHRU */ 11137 case SD_CHAIN_BUFIO: 11138 index = un->un_buf_chain_type; 11139 break; 11140 case SD_CHAIN_USCSI: 11141 index = un->un_uscsi_chain_type; 11142 break; 11143 case SD_CHAIN_DIRECT: 11144 index = un->un_direct_chain_type; 11145 break; 11146 case SD_CHAIN_DIRECT_PRIORITY: 11147 index = un->un_priority_chain_type; 11148 break; 11149 default: 11150 /* We're really broken if we ever get here... */ 11151 panic("sd_xbuf_init: illegal chain type!"); 11152 /*NOTREACHED*/ 11153 } 11154 11155 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11156 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11157 11158 /* 11159 * It might be a bit easier to simply bzero the entire xbuf above, 11160 * but it turns out that since we init a fair number of members anyway, 11161 * we save a fair number cycles by doing explicit assignment of zero. 11162 */ 11163 xp->xb_pkt_flags = 0; 11164 xp->xb_dma_resid = 0; 11165 xp->xb_retry_count = 0; 11166 xp->xb_victim_retry_count = 0; 11167 xp->xb_ua_retry_count = 0; 11168 xp->xb_nr_retry_count = 0; 11169 xp->xb_sense_bp = NULL; 11170 xp->xb_sense_status = 0; 11171 xp->xb_sense_state = 0; 11172 xp->xb_sense_resid = 0; 11173 xp->xb_ena = 0; 11174 11175 bp->b_private = xp; 11176 bp->b_flags &= ~(B_DONE | B_ERROR); 11177 bp->b_resid = 0; 11178 bp->av_forw = NULL; 11179 bp->av_back = NULL; 11180 bioerror(bp, 0); 11181 11182 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11183 } 11184 11185 11186 /* 11187 * Function: sd_uscsi_strategy 11188 * 11189 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11190 * 11191 * Arguments: bp - buf struct ptr 11192 * 11193 * Return Code: Always returns 0 11194 * 11195 * Context: Kernel thread context 11196 */ 11197 11198 static int 11199 sd_uscsi_strategy(struct buf *bp) 11200 { 11201 struct sd_lun *un; 11202 struct sd_uscsi_info *uip; 11203 struct sd_xbuf *xp; 11204 uchar_t chain_type; 11205 uchar_t cmd; 11206 11207 ASSERT(bp != NULL); 11208 11209 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11210 if (un == NULL) { 11211 bioerror(bp, EIO); 11212 bp->b_resid = bp->b_bcount; 11213 biodone(bp); 11214 return (0); 11215 } 11216 11217 ASSERT(!mutex_owned(SD_MUTEX(un))); 11218 11219 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11220 11221 /* 11222 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11223 */ 11224 ASSERT(bp->b_private != NULL); 11225 uip = (struct sd_uscsi_info *)bp->b_private; 11226 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11227 11228 mutex_enter(SD_MUTEX(un)); 11229 /* 11230 * atapi: Since we are running the CD for now in PIO mode we need to 11231 * call bp_mapin here to avoid bp_mapin called interrupt context under 11232 * the HBA's init_pkt routine. 11233 */ 11234 if (un->un_f_cfg_is_atapi == TRUE) { 11235 mutex_exit(SD_MUTEX(un)); 11236 bp_mapin(bp); 11237 mutex_enter(SD_MUTEX(un)); 11238 } 11239 un->un_ncmds_in_driver++; 11240 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11241 un->un_ncmds_in_driver); 11242 11243 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11244 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11245 un->un_f_sync_cache_required = TRUE; 11246 11247 mutex_exit(SD_MUTEX(un)); 11248 11249 switch (uip->ui_flags) { 11250 case SD_PATH_DIRECT: 11251 chain_type = SD_CHAIN_DIRECT; 11252 break; 11253 case SD_PATH_DIRECT_PRIORITY: 11254 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11255 break; 11256 default: 11257 chain_type = SD_CHAIN_USCSI; 11258 break; 11259 } 11260 11261 /* 11262 * We may allocate extra buf for external USCSI commands. If the 11263 * application asks for bigger than 20-byte sense data via USCSI, 11264 * SCSA layer will allocate 252 bytes sense buf for that command. 11265 */ 11266 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11267 SENSE_LENGTH) { 11268 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11269 MAX_SENSE_LENGTH, KM_SLEEP); 11270 } else { 11271 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11272 } 11273 11274 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11275 11276 /* Use the index obtained within xbuf_init */ 11277 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11278 11279 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11280 11281 return (0); 11282 } 11283 11284 /* 11285 * Function: sd_send_scsi_cmd 11286 * 11287 * Description: Runs a USCSI command for user (when called thru sdioctl), 11288 * or for the driver 11289 * 11290 * Arguments: dev - the dev_t for the device 11291 * incmd - ptr to a valid uscsi_cmd struct 11292 * flag - bit flag, indicating open settings, 32/64 bit type 11293 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11294 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11295 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11296 * to use the USCSI "direct" chain and bypass the normal 11297 * command waitq. 11298 * 11299 * Return Code: 0 - successful completion of the given command 11300 * EIO - scsi_uscsi_handle_command() failed 11301 * ENXIO - soft state not found for specified dev 11302 * EINVAL 11303 * EFAULT - copyin/copyout error 11304 * return code of scsi_uscsi_handle_command(): 11305 * EIO 11306 * ENXIO 11307 * EACCES 11308 * 11309 * Context: Waits for command to complete. Can sleep. 11310 */ 11311 11312 static int 11313 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11314 enum uio_seg dataspace, int path_flag) 11315 { 11316 struct sd_lun *un; 11317 sd_ssc_t *ssc; 11318 int rval; 11319 11320 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11321 if (un == NULL) { 11322 return (ENXIO); 11323 } 11324 11325 /* 11326 * Using sd_ssc_send to handle uscsi cmd 11327 */ 11328 ssc = sd_ssc_init(un); 11329 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11330 sd_ssc_fini(ssc); 11331 11332 return (rval); 11333 } 11334 11335 /* 11336 * Function: sd_ssc_init 11337 * 11338 * Description: Uscsi end-user call this function to initialize necessary 11339 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11340 * 11341 * The return value of sd_send_scsi_cmd will be treated as a 11342 * fault in various conditions. Even it is not Zero, some 11343 * callers may ignore the return value. That is to say, we can 11344 * not make an accurate assessment in sdintr, since if a 11345 * command is failed in sdintr it does not mean the caller of 11346 * sd_send_scsi_cmd will treat it as a real failure. 11347 * 11348 * To avoid printing too many error logs for a failed uscsi 11349 * packet that the caller may not treat it as a failure, the 11350 * sd will keep silent for handling all uscsi commands. 11351 * 11352 * During detach->attach and attach-open, for some types of 11353 * problems, the driver should be providing information about 11354 * the problem encountered. Device use USCSI_SILENT, which 11355 * suppresses all driver information. The result is that no 11356 * information about the problem is available. Being 11357 * completely silent during this time is inappropriate. The 11358 * driver needs a more selective filter than USCSI_SILENT, so 11359 * that information related to faults is provided. 11360 * 11361 * To make the accurate accessment, the caller of 11362 * sd_send_scsi_USCSI_CMD should take the ownership and 11363 * get necessary information to print error messages. 11364 * 11365 * If we want to print necessary info of uscsi command, we need to 11366 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11367 * assessment. We use sd_ssc_init to alloc necessary 11368 * structs for sending an uscsi command and we are also 11369 * responsible for free the memory by calling 11370 * sd_ssc_fini. 11371 * 11372 * The calling secquences will look like: 11373 * sd_ssc_init-> 11374 * 11375 * ... 11376 * 11377 * sd_send_scsi_USCSI_CMD-> 11378 * sd_ssc_send-> - - - sdintr 11379 * ... 11380 * 11381 * if we think the return value should be treated as a 11382 * failure, we make the accessment here and print out 11383 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11384 * 11385 * ... 11386 * 11387 * sd_ssc_fini 11388 * 11389 * 11390 * Arguments: un - pointer to driver soft state (unit) structure for this 11391 * target. 11392 * 11393 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11394 * uscsi_cmd and sd_uscsi_info. 11395 * NULL - if can not alloc memory for sd_ssc_t struct 11396 * 11397 * Context: Kernel Thread. 11398 */ 11399 static sd_ssc_t * 11400 sd_ssc_init(struct sd_lun *un) 11401 { 11402 sd_ssc_t *ssc; 11403 struct uscsi_cmd *ucmdp; 11404 struct sd_uscsi_info *uip; 11405 11406 ASSERT(un != NULL); 11407 ASSERT(!mutex_owned(SD_MUTEX(un))); 11408 11409 /* 11410 * Allocate sd_ssc_t structure 11411 */ 11412 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11413 11414 /* 11415 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11416 */ 11417 ucmdp = scsi_uscsi_alloc(); 11418 11419 /* 11420 * Allocate sd_uscsi_info structure 11421 */ 11422 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11423 11424 ssc->ssc_uscsi_cmd = ucmdp; 11425 ssc->ssc_uscsi_info = uip; 11426 ssc->ssc_un = un; 11427 11428 return (ssc); 11429 } 11430 11431 /* 11432 * Function: sd_ssc_fini 11433 * 11434 * Description: To free sd_ssc_t and it's hanging off 11435 * 11436 * Arguments: ssc - struct pointer of sd_ssc_t. 11437 */ 11438 static void 11439 sd_ssc_fini(sd_ssc_t *ssc) 11440 { 11441 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11442 11443 if (ssc->ssc_uscsi_info != NULL) { 11444 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11445 ssc->ssc_uscsi_info = NULL; 11446 } 11447 11448 kmem_free(ssc, sizeof (sd_ssc_t)); 11449 ssc = NULL; 11450 } 11451 11452 /* 11453 * Function: sd_ssc_send 11454 * 11455 * Description: Runs a USCSI command for user when called through sdioctl, 11456 * or for the driver. 11457 * 11458 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11459 * sd_uscsi_info in. 11460 * incmd - ptr to a valid uscsi_cmd struct 11461 * flag - bit flag, indicating open settings, 32/64 bit type 11462 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11463 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11464 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11465 * to use the USCSI "direct" chain and bypass the normal 11466 * command waitq. 11467 * 11468 * Return Code: 0 - successful completion of the given command 11469 * EIO - scsi_uscsi_handle_command() failed 11470 * ENXIO - soft state not found for specified dev 11471 * EINVAL 11472 * EFAULT - copyin/copyout error 11473 * return code of scsi_uscsi_handle_command(): 11474 * EIO 11475 * ENXIO 11476 * EACCES 11477 * 11478 * Context: Kernel Thread; 11479 * Waits for command to complete. Can sleep. 11480 */ 11481 static int 11482 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11483 enum uio_seg dataspace, int path_flag) 11484 { 11485 struct sd_uscsi_info *uip; 11486 struct uscsi_cmd *uscmd; 11487 struct sd_lun *un; 11488 dev_t dev; 11489 11490 int format = 0; 11491 int rval; 11492 11493 ASSERT(ssc != NULL); 11494 un = ssc->ssc_un; 11495 ASSERT(un != NULL); 11496 uscmd = ssc->ssc_uscsi_cmd; 11497 ASSERT(uscmd != NULL); 11498 ASSERT(!mutex_owned(SD_MUTEX(un))); 11499 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11500 /* 11501 * If enter here, it indicates that the previous uscsi 11502 * command has not been processed by sd_ssc_assessment. 11503 * This is violating our rules of FMA telemetry processing. 11504 * We should print out this message and the last undisposed 11505 * uscsi command. 11506 */ 11507 if (uscmd->uscsi_cdb != NULL) { 11508 SD_INFO(SD_LOG_SDTEST, un, 11509 "sd_ssc_send is missing the alternative " 11510 "sd_ssc_assessment when running command 0x%x.\n", 11511 uscmd->uscsi_cdb[0]); 11512 } 11513 /* 11514 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11515 * the initial status. 11516 */ 11517 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11518 } 11519 11520 /* 11521 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11522 * followed to avoid missing FMA telemetries. 11523 */ 11524 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11525 11526 #ifdef SDDEBUG 11527 switch (dataspace) { 11528 case UIO_USERSPACE: 11529 SD_TRACE(SD_LOG_IO, un, 11530 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11531 break; 11532 case UIO_SYSSPACE: 11533 SD_TRACE(SD_LOG_IO, un, 11534 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11535 break; 11536 default: 11537 SD_TRACE(SD_LOG_IO, un, 11538 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11539 break; 11540 } 11541 #endif 11542 11543 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11544 SD_ADDRESS(un), &uscmd); 11545 if (rval != 0) { 11546 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11547 "scsi_uscsi_alloc_and_copyin failed\n", un); 11548 return (rval); 11549 } 11550 11551 if ((uscmd->uscsi_cdb != NULL) && 11552 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11553 mutex_enter(SD_MUTEX(un)); 11554 un->un_f_format_in_progress = TRUE; 11555 mutex_exit(SD_MUTEX(un)); 11556 format = 1; 11557 } 11558 11559 /* 11560 * Allocate an sd_uscsi_info struct and fill it with the info 11561 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11562 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11563 * since we allocate the buf here in this function, we do not 11564 * need to preserve the prior contents of b_private. 11565 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11566 */ 11567 uip = ssc->ssc_uscsi_info; 11568 uip->ui_flags = path_flag; 11569 uip->ui_cmdp = uscmd; 11570 11571 /* 11572 * Commands sent with priority are intended for error recovery 11573 * situations, and do not have retries performed. 11574 */ 11575 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11576 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11577 } 11578 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11579 11580 dev = SD_GET_DEV(un); 11581 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11582 sd_uscsi_strategy, NULL, uip); 11583 11584 /* 11585 * mark ssc_flags right after handle_cmd to make sure 11586 * the uscsi has been sent 11587 */ 11588 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11589 11590 #ifdef SDDEBUG 11591 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11592 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11593 uscmd->uscsi_status, uscmd->uscsi_resid); 11594 if (uscmd->uscsi_bufaddr != NULL) { 11595 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11596 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11597 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11598 if (dataspace == UIO_SYSSPACE) { 11599 SD_DUMP_MEMORY(un, SD_LOG_IO, 11600 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11601 uscmd->uscsi_buflen, SD_LOG_HEX); 11602 } 11603 } 11604 #endif 11605 11606 if (format == 1) { 11607 mutex_enter(SD_MUTEX(un)); 11608 un->un_f_format_in_progress = FALSE; 11609 mutex_exit(SD_MUTEX(un)); 11610 } 11611 11612 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11613 11614 return (rval); 11615 } 11616 11617 /* 11618 * Function: sd_ssc_print 11619 * 11620 * Description: Print information available to the console. 11621 * 11622 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11623 * sd_uscsi_info in. 11624 * sd_severity - log level. 11625 * Context: Kernel thread or interrupt context. 11626 */ 11627 static void 11628 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11629 { 11630 struct uscsi_cmd *ucmdp; 11631 struct scsi_device *devp; 11632 dev_info_t *devinfo; 11633 uchar_t *sensep; 11634 int senlen; 11635 union scsi_cdb *cdbp; 11636 uchar_t com; 11637 extern struct scsi_key_strings scsi_cmds[]; 11638 11639 ASSERT(ssc != NULL); 11640 ASSERT(ssc->ssc_un != NULL); 11641 11642 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 11643 return; 11644 ucmdp = ssc->ssc_uscsi_cmd; 11645 devp = SD_SCSI_DEVP(ssc->ssc_un); 11646 devinfo = SD_DEVINFO(ssc->ssc_un); 11647 ASSERT(ucmdp != NULL); 11648 ASSERT(devp != NULL); 11649 ASSERT(devinfo != NULL); 11650 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11651 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11652 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11653 11654 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11655 if (cdbp == NULL) 11656 return; 11657 /* We don't print log if no sense data available. */ 11658 if (senlen == 0) 11659 sensep = NULL; 11660 com = cdbp->scc_cmd; 11661 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11662 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11663 } 11664 11665 /* 11666 * Function: sd_ssc_assessment 11667 * 11668 * Description: We use this function to make an assessment at the point 11669 * where SD driver may encounter a potential error. 11670 * 11671 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11672 * sd_uscsi_info in. 11673 * tp_assess - a hint of strategy for ereport posting. 11674 * Possible values of tp_assess include: 11675 * SD_FMT_IGNORE - we don't post any ereport because we're 11676 * sure that it is ok to ignore the underlying problems. 11677 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11678 * but it might be not correct to ignore the underlying hardware 11679 * error. 11680 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11681 * payload driver-assessment of value "fail" or 11682 * "fatal"(depending on what information we have here). This 11683 * assessment value is usually set when SD driver think there 11684 * is a potential error occurred(Typically, when return value 11685 * of the SCSI command is EIO). 11686 * SD_FMT_STANDARD - we will post an ereport with the payload 11687 * driver-assessment of value "info". This assessment value is 11688 * set when the SCSI command returned successfully and with 11689 * sense data sent back. 11690 * 11691 * Context: Kernel thread. 11692 */ 11693 static void 11694 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11695 { 11696 int senlen = 0; 11697 struct uscsi_cmd *ucmdp = NULL; 11698 struct sd_lun *un; 11699 11700 ASSERT(ssc != NULL); 11701 un = ssc->ssc_un; 11702 ASSERT(un != NULL); 11703 ucmdp = ssc->ssc_uscsi_cmd; 11704 ASSERT(ucmdp != NULL); 11705 11706 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11707 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11708 } else { 11709 /* 11710 * If enter here, it indicates that we have a wrong 11711 * calling sequence of sd_ssc_send and sd_ssc_assessment, 11712 * both of which should be called in a pair in case of 11713 * loss of FMA telemetries. 11714 */ 11715 if (ucmdp->uscsi_cdb != NULL) { 11716 SD_INFO(SD_LOG_SDTEST, un, 11717 "sd_ssc_assessment is missing the " 11718 "alternative sd_ssc_send when running 0x%x, " 11719 "or there are superfluous sd_ssc_assessment for " 11720 "the same sd_ssc_send.\n", 11721 ucmdp->uscsi_cdb[0]); 11722 } 11723 /* 11724 * Set the ssc_flags to the initial value to avoid passing 11725 * down dirty flags to the following sd_ssc_send function. 11726 */ 11727 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11728 return; 11729 } 11730 11731 /* 11732 * Only handle an issued command which is waiting for assessment. 11733 * A command which is not issued will not have 11734 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 11735 */ 11736 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11737 sd_ssc_print(ssc, SCSI_ERR_INFO); 11738 return; 11739 } else { 11740 /* 11741 * For an issued command, we should clear this flag in 11742 * order to make the sd_ssc_t structure be used off 11743 * multiple uscsi commands. 11744 */ 11745 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11746 } 11747 11748 /* 11749 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 11750 * commands here. And we should clear the ssc_flags before return. 11751 */ 11752 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11753 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11754 return; 11755 } 11756 11757 switch (tp_assess) { 11758 case SD_FMT_IGNORE: 11759 case SD_FMT_IGNORE_COMPROMISE: 11760 break; 11761 case SD_FMT_STATUS_CHECK: 11762 /* 11763 * For a failed command(including the succeeded command 11764 * with invalid data sent back). 11765 */ 11766 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11767 break; 11768 case SD_FMT_STANDARD: 11769 /* 11770 * Always for the succeeded commands probably with sense 11771 * data sent back. 11772 * Limitation: 11773 * We can only handle a succeeded command with sense 11774 * data sent back when auto-request-sense is enabled. 11775 */ 11776 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11777 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11778 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11779 (un->un_f_arq_enabled == TRUE) && 11780 senlen > 0 && 11781 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11782 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11783 } 11784 break; 11785 default: 11786 /* 11787 * Should not have other type of assessment. 11788 */ 11789 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11790 "sd_ssc_assessment got wrong " 11791 "sd_type_assessment %d.\n", tp_assess); 11792 break; 11793 } 11794 /* 11795 * Clear up the ssc_flags before return. 11796 */ 11797 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11798 } 11799 11800 /* 11801 * Function: sd_ssc_post 11802 * 11803 * Description: 1. read the driver property to get fm-scsi-log flag. 11804 * 2. print log if fm_log_capable is non-zero. 11805 * 3. call sd_ssc_ereport_post to post ereport if possible. 11806 * 11807 * Context: May be called from kernel thread or interrupt context. 11808 */ 11809 static void 11810 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11811 { 11812 struct sd_lun *un; 11813 int sd_severity; 11814 11815 ASSERT(ssc != NULL); 11816 un = ssc->ssc_un; 11817 ASSERT(un != NULL); 11818 11819 /* 11820 * We may enter here from sd_ssc_assessment(for USCSI command) or 11821 * by directly called from sdintr context. 11822 * We don't handle a non-disk drive(CD-ROM, removable media). 11823 * Clear the ssc_flags before return in case we've set 11824 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 11825 * driver. 11826 */ 11827 if (ISCD(un) || un->un_f_has_removable_media) { 11828 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11829 return; 11830 } 11831 11832 switch (sd_assess) { 11833 case SD_FM_DRV_FATAL: 11834 sd_severity = SCSI_ERR_FATAL; 11835 break; 11836 case SD_FM_DRV_RECOVERY: 11837 sd_severity = SCSI_ERR_RECOVERED; 11838 break; 11839 case SD_FM_DRV_RETRY: 11840 sd_severity = SCSI_ERR_RETRYABLE; 11841 break; 11842 case SD_FM_DRV_NOTICE: 11843 sd_severity = SCSI_ERR_INFO; 11844 break; 11845 default: 11846 sd_severity = SCSI_ERR_UNKNOWN; 11847 } 11848 /* print log */ 11849 sd_ssc_print(ssc, sd_severity); 11850 11851 /* always post ereport */ 11852 sd_ssc_ereport_post(ssc, sd_assess); 11853 } 11854 11855 /* 11856 * Function: sd_ssc_set_info 11857 * 11858 * Description: Mark ssc_flags and set ssc_info which would be the 11859 * payload of uderr ereport. This function will cause 11860 * sd_ssc_ereport_post to post uderr ereport only. 11861 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 11862 * the function will also call SD_ERROR or scsi_log for a 11863 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 11864 * 11865 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11866 * sd_uscsi_info in. 11867 * ssc_flags - indicate the sub-category of a uderr. 11868 * comp - this argument is meaningful only when 11869 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 11870 * values include: 11871 * > 0, SD_ERROR is used with comp as the driver logging 11872 * component; 11873 * = 0, scsi-log is used to log error telemetries; 11874 * < 0, no log available for this telemetry. 11875 * 11876 * Context: Kernel thread or interrupt context 11877 */ 11878 static void 11879 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 11880 { 11881 va_list ap; 11882 11883 ASSERT(ssc != NULL); 11884 ASSERT(ssc->ssc_un != NULL); 11885 11886 ssc->ssc_flags |= ssc_flags; 11887 va_start(ap, fmt); 11888 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11889 va_end(ap); 11890 11891 /* 11892 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 11893 * with invalid data sent back. For non-uscsi command, the 11894 * following code will be bypassed. 11895 */ 11896 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 11897 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 11898 /* 11899 * If the error belong to certain component and we 11900 * do not want it to show up on the console, we 11901 * will use SD_ERROR, otherwise scsi_log is 11902 * preferred. 11903 */ 11904 if (comp > 0) { 11905 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 11906 } else if (comp == 0) { 11907 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 11908 CE_WARN, ssc->ssc_info); 11909 } 11910 } 11911 } 11912 } 11913 11914 /* 11915 * Function: sd_buf_iodone 11916 * 11917 * Description: Frees the sd_xbuf & returns the buf to its originator. 11918 * 11919 * Context: May be called from interrupt context. 11920 */ 11921 /* ARGSUSED */ 11922 static void 11923 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11924 { 11925 struct sd_xbuf *xp; 11926 11927 ASSERT(un != NULL); 11928 ASSERT(bp != NULL); 11929 ASSERT(!mutex_owned(SD_MUTEX(un))); 11930 11931 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11932 11933 xp = SD_GET_XBUF(bp); 11934 ASSERT(xp != NULL); 11935 11936 /* xbuf is gone after this */ 11937 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 11938 mutex_enter(SD_MUTEX(un)); 11939 11940 /* 11941 * Grab time when the cmd completed. 11942 * This is used for determining if the system has been 11943 * idle long enough to make it idle to the PM framework. 11944 * This is for lowering the overhead, and therefore improving 11945 * performance per I/O operation. 11946 */ 11947 un->un_pm_idle_time = ddi_get_time(); 11948 11949 un->un_ncmds_in_driver--; 11950 ASSERT(un->un_ncmds_in_driver >= 0); 11951 SD_INFO(SD_LOG_IO, un, 11952 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11953 un->un_ncmds_in_driver); 11954 11955 mutex_exit(SD_MUTEX(un)); 11956 } 11957 11958 biodone(bp); /* bp is gone after this */ 11959 11960 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11961 } 11962 11963 11964 /* 11965 * Function: sd_uscsi_iodone 11966 * 11967 * Description: Frees the sd_xbuf & returns the buf to its originator. 11968 * 11969 * Context: May be called from interrupt context. 11970 */ 11971 /* ARGSUSED */ 11972 static void 11973 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11974 { 11975 struct sd_xbuf *xp; 11976 11977 ASSERT(un != NULL); 11978 ASSERT(bp != NULL); 11979 11980 xp = SD_GET_XBUF(bp); 11981 ASSERT(xp != NULL); 11982 ASSERT(!mutex_owned(SD_MUTEX(un))); 11983 11984 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11985 11986 bp->b_private = xp->xb_private; 11987 11988 mutex_enter(SD_MUTEX(un)); 11989 11990 /* 11991 * Grab time when the cmd completed. 11992 * This is used for determining if the system has been 11993 * idle long enough to make it idle to the PM framework. 11994 * This is for lowering the overhead, and therefore improving 11995 * performance per I/O operation. 11996 */ 11997 un->un_pm_idle_time = ddi_get_time(); 11998 11999 un->un_ncmds_in_driver--; 12000 ASSERT(un->un_ncmds_in_driver >= 0); 12001 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12002 un->un_ncmds_in_driver); 12003 12004 mutex_exit(SD_MUTEX(un)); 12005 12006 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12007 SENSE_LENGTH) { 12008 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12009 MAX_SENSE_LENGTH); 12010 } else { 12011 kmem_free(xp, sizeof (struct sd_xbuf)); 12012 } 12013 12014 biodone(bp); 12015 12016 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12017 } 12018 12019 12020 /* 12021 * Function: sd_mapblockaddr_iostart 12022 * 12023 * Description: Verify request lies within the partition limits for 12024 * the indicated minor device. Issue "overrun" buf if 12025 * request would exceed partition range. Converts 12026 * partition-relative block address to absolute. 12027 * 12028 * Context: Can sleep 12029 * 12030 * Issues: This follows what the old code did, in terms of accessing 12031 * some of the partition info in the unit struct without holding 12032 * the mutext. This is a general issue, if the partition info 12033 * can be altered while IO is in progress... as soon as we send 12034 * a buf, its partitioning can be invalid before it gets to the 12035 * device. Probably the right fix is to move partitioning out 12036 * of the driver entirely. 12037 */ 12038 12039 static void 12040 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12041 { 12042 diskaddr_t nblocks; /* #blocks in the given partition */ 12043 daddr_t blocknum; /* Block number specified by the buf */ 12044 size_t requested_nblocks; 12045 size_t available_nblocks; 12046 int partition; 12047 diskaddr_t partition_offset; 12048 struct sd_xbuf *xp; 12049 12050 ASSERT(un != NULL); 12051 ASSERT(bp != NULL); 12052 ASSERT(!mutex_owned(SD_MUTEX(un))); 12053 12054 SD_TRACE(SD_LOG_IO_PARTITION, un, 12055 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12056 12057 xp = SD_GET_XBUF(bp); 12058 ASSERT(xp != NULL); 12059 12060 /* 12061 * If the geometry is not indicated as valid, attempt to access 12062 * the unit & verify the geometry/label. This can be the case for 12063 * removable-media devices, of if the device was opened in 12064 * NDELAY/NONBLOCK mode. 12065 */ 12066 partition = SDPART(bp->b_edev); 12067 12068 if (!SD_IS_VALID_LABEL(un)) { 12069 sd_ssc_t *ssc; 12070 /* 12071 * Initialize sd_ssc_t for internal uscsi commands 12072 * In case of potential porformance issue, we need 12073 * to alloc memory only if there is invalid label 12074 */ 12075 ssc = sd_ssc_init(un); 12076 12077 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12078 /* 12079 * For removable devices it is possible to start an 12080 * I/O without a media by opening the device in nodelay 12081 * mode. Also for writable CDs there can be many 12082 * scenarios where there is no geometry yet but volume 12083 * manager is trying to issue a read() just because 12084 * it can see TOC on the CD. So do not print a message 12085 * for removables. 12086 */ 12087 if (!un->un_f_has_removable_media) { 12088 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12089 "i/o to invalid geometry\n"); 12090 } 12091 bioerror(bp, EIO); 12092 bp->b_resid = bp->b_bcount; 12093 SD_BEGIN_IODONE(index, un, bp); 12094 12095 sd_ssc_fini(ssc); 12096 return; 12097 } 12098 sd_ssc_fini(ssc); 12099 } 12100 12101 nblocks = 0; 12102 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12103 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12104 12105 /* 12106 * blocknum is the starting block number of the request. At this 12107 * point it is still relative to the start of the minor device. 12108 */ 12109 blocknum = xp->xb_blkno; 12110 12111 /* 12112 * Legacy: If the starting block number is one past the last block 12113 * in the partition, do not set B_ERROR in the buf. 12114 */ 12115 if (blocknum == nblocks) { 12116 goto error_exit; 12117 } 12118 12119 /* 12120 * Confirm that the first block of the request lies within the 12121 * partition limits. Also the requested number of bytes must be 12122 * a multiple of the system block size. 12123 */ 12124 if ((blocknum < 0) || (blocknum >= nblocks) || 12125 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 12126 bp->b_flags |= B_ERROR; 12127 goto error_exit; 12128 } 12129 12130 /* 12131 * If the requsted # blocks exceeds the available # blocks, that 12132 * is an overrun of the partition. 12133 */ 12134 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 12135 available_nblocks = (size_t)(nblocks - blocknum); 12136 ASSERT(nblocks >= blocknum); 12137 12138 if (requested_nblocks > available_nblocks) { 12139 /* 12140 * Allocate an "overrun" buf to allow the request to proceed 12141 * for the amount of space available in the partition. The 12142 * amount not transferred will be added into the b_resid 12143 * when the operation is complete. The overrun buf 12144 * replaces the original buf here, and the original buf 12145 * is saved inside the overrun buf, for later use. 12146 */ 12147 size_t resid = SD_SYSBLOCKS2BYTES(un, 12148 (offset_t)(requested_nblocks - available_nblocks)); 12149 size_t count = bp->b_bcount - resid; 12150 /* 12151 * Note: count is an unsigned entity thus it'll NEVER 12152 * be less than 0 so ASSERT the original values are 12153 * correct. 12154 */ 12155 ASSERT(bp->b_bcount >= resid); 12156 12157 bp = sd_bioclone_alloc(bp, count, blocknum, 12158 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12159 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12160 ASSERT(xp != NULL); 12161 } 12162 12163 /* At this point there should be no residual for this buf. */ 12164 ASSERT(bp->b_resid == 0); 12165 12166 /* Convert the block number to an absolute address. */ 12167 xp->xb_blkno += partition_offset; 12168 12169 SD_NEXT_IOSTART(index, un, bp); 12170 12171 SD_TRACE(SD_LOG_IO_PARTITION, un, 12172 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12173 12174 return; 12175 12176 error_exit: 12177 bp->b_resid = bp->b_bcount; 12178 SD_BEGIN_IODONE(index, un, bp); 12179 SD_TRACE(SD_LOG_IO_PARTITION, un, 12180 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12181 } 12182 12183 12184 /* 12185 * Function: sd_mapblockaddr_iodone 12186 * 12187 * Description: Completion-side processing for partition management. 12188 * 12189 * Context: May be called under interrupt context 12190 */ 12191 12192 static void 12193 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12194 { 12195 /* int partition; */ /* Not used, see below. */ 12196 ASSERT(un != NULL); 12197 ASSERT(bp != NULL); 12198 ASSERT(!mutex_owned(SD_MUTEX(un))); 12199 12200 SD_TRACE(SD_LOG_IO_PARTITION, un, 12201 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12202 12203 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12204 /* 12205 * We have an "overrun" buf to deal with... 12206 */ 12207 struct sd_xbuf *xp; 12208 struct buf *obp; /* ptr to the original buf */ 12209 12210 xp = SD_GET_XBUF(bp); 12211 ASSERT(xp != NULL); 12212 12213 /* Retrieve the pointer to the original buf */ 12214 obp = (struct buf *)xp->xb_private; 12215 ASSERT(obp != NULL); 12216 12217 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12218 bioerror(obp, bp->b_error); 12219 12220 sd_bioclone_free(bp); 12221 12222 /* 12223 * Get back the original buf. 12224 * Note that since the restoration of xb_blkno below 12225 * was removed, the sd_xbuf is not needed. 12226 */ 12227 bp = obp; 12228 /* 12229 * xp = SD_GET_XBUF(bp); 12230 * ASSERT(xp != NULL); 12231 */ 12232 } 12233 12234 /* 12235 * Convert sd->xb_blkno back to a minor-device relative value. 12236 * Note: this has been commented out, as it is not needed in the 12237 * current implementation of the driver (ie, since this function 12238 * is at the top of the layering chains, so the info will be 12239 * discarded) and it is in the "hot" IO path. 12240 * 12241 * partition = getminor(bp->b_edev) & SDPART_MASK; 12242 * xp->xb_blkno -= un->un_offset[partition]; 12243 */ 12244 12245 SD_NEXT_IODONE(index, un, bp); 12246 12247 SD_TRACE(SD_LOG_IO_PARTITION, un, 12248 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12249 } 12250 12251 12252 /* 12253 * Function: sd_mapblocksize_iostart 12254 * 12255 * Description: Convert between system block size (un->un_sys_blocksize) 12256 * and target block size (un->un_tgt_blocksize). 12257 * 12258 * Context: Can sleep to allocate resources. 12259 * 12260 * Assumptions: A higher layer has already performed any partition validation, 12261 * and converted the xp->xb_blkno to an absolute value relative 12262 * to the start of the device. 12263 * 12264 * It is also assumed that the higher layer has implemented 12265 * an "overrun" mechanism for the case where the request would 12266 * read/write beyond the end of a partition. In this case we 12267 * assume (and ASSERT) that bp->b_resid == 0. 12268 * 12269 * Note: The implementation for this routine assumes the target 12270 * block size remains constant between allocation and transport. 12271 */ 12272 12273 static void 12274 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12275 { 12276 struct sd_mapblocksize_info *bsp; 12277 struct sd_xbuf *xp; 12278 offset_t first_byte; 12279 daddr_t start_block, end_block; 12280 daddr_t request_bytes; 12281 ushort_t is_aligned = FALSE; 12282 12283 ASSERT(un != NULL); 12284 ASSERT(bp != NULL); 12285 ASSERT(!mutex_owned(SD_MUTEX(un))); 12286 ASSERT(bp->b_resid == 0); 12287 12288 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12289 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12290 12291 /* 12292 * For a non-writable CD, a write request is an error 12293 */ 12294 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12295 (un->un_f_mmc_writable_media == FALSE)) { 12296 bioerror(bp, EIO); 12297 bp->b_resid = bp->b_bcount; 12298 SD_BEGIN_IODONE(index, un, bp); 12299 return; 12300 } 12301 12302 /* 12303 * We do not need a shadow buf if the device is using 12304 * un->un_sys_blocksize as its block size or if bcount == 0. 12305 * In this case there is no layer-private data block allocated. 12306 */ 12307 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12308 (bp->b_bcount == 0)) { 12309 goto done; 12310 } 12311 12312 #if defined(__i386) || defined(__amd64) 12313 /* We do not support non-block-aligned transfers for ROD devices */ 12314 ASSERT(!ISROD(un)); 12315 #endif 12316 12317 xp = SD_GET_XBUF(bp); 12318 ASSERT(xp != NULL); 12319 12320 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12321 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12322 un->un_tgt_blocksize, un->un_sys_blocksize); 12323 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12324 "request start block:0x%x\n", xp->xb_blkno); 12325 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12326 "request len:0x%x\n", bp->b_bcount); 12327 12328 /* 12329 * Allocate the layer-private data area for the mapblocksize layer. 12330 * Layers are allowed to use the xp_private member of the sd_xbuf 12331 * struct to store the pointer to their layer-private data block, but 12332 * each layer also has the responsibility of restoring the prior 12333 * contents of xb_private before returning the buf/xbuf to the 12334 * higher layer that sent it. 12335 * 12336 * Here we save the prior contents of xp->xb_private into the 12337 * bsp->mbs_oprivate field of our layer-private data area. This value 12338 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12339 * the layer-private area and returning the buf/xbuf to the layer 12340 * that sent it. 12341 * 12342 * Note that here we use kmem_zalloc for the allocation as there are 12343 * parts of the mapblocksize code that expect certain fields to be 12344 * zero unless explicitly set to a required value. 12345 */ 12346 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12347 bsp->mbs_oprivate = xp->xb_private; 12348 xp->xb_private = bsp; 12349 12350 /* 12351 * This treats the data on the disk (target) as an array of bytes. 12352 * first_byte is the byte offset, from the beginning of the device, 12353 * to the location of the request. This is converted from a 12354 * un->un_sys_blocksize block address to a byte offset, and then back 12355 * to a block address based upon a un->un_tgt_blocksize block size. 12356 * 12357 * xp->xb_blkno should be absolute upon entry into this function, 12358 * but, but it is based upon partitions that use the "system" 12359 * block size. It must be adjusted to reflect the block size of 12360 * the target. 12361 * 12362 * Note that end_block is actually the block that follows the last 12363 * block of the request, but that's what is needed for the computation. 12364 */ 12365 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12366 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12367 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12368 un->un_tgt_blocksize; 12369 12370 /* request_bytes is rounded up to a multiple of the target block size */ 12371 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12372 12373 /* 12374 * See if the starting address of the request and the request 12375 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12376 * then we do not need to allocate a shadow buf to handle the request. 12377 */ 12378 if (((first_byte % un->un_tgt_blocksize) == 0) && 12379 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12380 is_aligned = TRUE; 12381 } 12382 12383 if ((bp->b_flags & B_READ) == 0) { 12384 /* 12385 * Lock the range for a write operation. An aligned request is 12386 * considered a simple write; otherwise the request must be a 12387 * read-modify-write. 12388 */ 12389 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12390 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12391 } 12392 12393 /* 12394 * Alloc a shadow buf if the request is not aligned. Also, this is 12395 * where the READ command is generated for a read-modify-write. (The 12396 * write phase is deferred until after the read completes.) 12397 */ 12398 if (is_aligned == FALSE) { 12399 12400 struct sd_mapblocksize_info *shadow_bsp; 12401 struct sd_xbuf *shadow_xp; 12402 struct buf *shadow_bp; 12403 12404 /* 12405 * Allocate the shadow buf and it associated xbuf. Note that 12406 * after this call the xb_blkno value in both the original 12407 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12408 * same: absolute relative to the start of the device, and 12409 * adjusted for the target block size. The b_blkno in the 12410 * shadow buf will also be set to this value. We should never 12411 * change b_blkno in the original bp however. 12412 * 12413 * Note also that the shadow buf will always need to be a 12414 * READ command, regardless of whether the incoming command 12415 * is a READ or a WRITE. 12416 */ 12417 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12418 xp->xb_blkno, 12419 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12420 12421 shadow_xp = SD_GET_XBUF(shadow_bp); 12422 12423 /* 12424 * Allocate the layer-private data for the shadow buf. 12425 * (No need to preserve xb_private in the shadow xbuf.) 12426 */ 12427 shadow_xp->xb_private = shadow_bsp = 12428 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12429 12430 /* 12431 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12432 * to figure out where the start of the user data is (based upon 12433 * the system block size) in the data returned by the READ 12434 * command (which will be based upon the target blocksize). Note 12435 * that this is only really used if the request is unaligned. 12436 */ 12437 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12438 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12439 ASSERT((bsp->mbs_copy_offset >= 0) && 12440 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12441 12442 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12443 12444 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12445 12446 /* Transfer the wmap (if any) to the shadow buf */ 12447 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12448 bsp->mbs_wmp = NULL; 12449 12450 /* 12451 * The shadow buf goes on from here in place of the 12452 * original buf. 12453 */ 12454 shadow_bsp->mbs_orig_bp = bp; 12455 bp = shadow_bp; 12456 } 12457 12458 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12459 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12460 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12461 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12462 request_bytes); 12463 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12464 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12465 12466 done: 12467 SD_NEXT_IOSTART(index, un, bp); 12468 12469 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12470 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12471 } 12472 12473 12474 /* 12475 * Function: sd_mapblocksize_iodone 12476 * 12477 * Description: Completion side processing for block-size mapping. 12478 * 12479 * Context: May be called under interrupt context 12480 */ 12481 12482 static void 12483 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12484 { 12485 struct sd_mapblocksize_info *bsp; 12486 struct sd_xbuf *xp; 12487 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12488 struct buf *orig_bp; /* ptr to the original buf */ 12489 offset_t shadow_end; 12490 offset_t request_end; 12491 offset_t shadow_start; 12492 ssize_t copy_offset; 12493 size_t copy_length; 12494 size_t shortfall; 12495 uint_t is_write; /* TRUE if this bp is a WRITE */ 12496 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12497 12498 ASSERT(un != NULL); 12499 ASSERT(bp != NULL); 12500 12501 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12502 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12503 12504 /* 12505 * There is no shadow buf or layer-private data if the target is 12506 * using un->un_sys_blocksize as its block size or if bcount == 0. 12507 */ 12508 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12509 (bp->b_bcount == 0)) { 12510 goto exit; 12511 } 12512 12513 xp = SD_GET_XBUF(bp); 12514 ASSERT(xp != NULL); 12515 12516 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12517 bsp = xp->xb_private; 12518 12519 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12520 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12521 12522 if (is_write) { 12523 /* 12524 * For a WRITE request we must free up the block range that 12525 * we have locked up. This holds regardless of whether this is 12526 * an aligned write request or a read-modify-write request. 12527 */ 12528 sd_range_unlock(un, bsp->mbs_wmp); 12529 bsp->mbs_wmp = NULL; 12530 } 12531 12532 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12533 /* 12534 * An aligned read or write command will have no shadow buf; 12535 * there is not much else to do with it. 12536 */ 12537 goto done; 12538 } 12539 12540 orig_bp = bsp->mbs_orig_bp; 12541 ASSERT(orig_bp != NULL); 12542 orig_xp = SD_GET_XBUF(orig_bp); 12543 ASSERT(orig_xp != NULL); 12544 ASSERT(!mutex_owned(SD_MUTEX(un))); 12545 12546 if (!is_write && has_wmap) { 12547 /* 12548 * A READ with a wmap means this is the READ phase of a 12549 * read-modify-write. If an error occurred on the READ then 12550 * we do not proceed with the WRITE phase or copy any data. 12551 * Just release the write maps and return with an error. 12552 */ 12553 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12554 orig_bp->b_resid = orig_bp->b_bcount; 12555 bioerror(orig_bp, bp->b_error); 12556 sd_range_unlock(un, bsp->mbs_wmp); 12557 goto freebuf_done; 12558 } 12559 } 12560 12561 /* 12562 * Here is where we set up to copy the data from the shadow buf 12563 * into the space associated with the original buf. 12564 * 12565 * To deal with the conversion between block sizes, these 12566 * computations treat the data as an array of bytes, with the 12567 * first byte (byte 0) corresponding to the first byte in the 12568 * first block on the disk. 12569 */ 12570 12571 /* 12572 * shadow_start and shadow_len indicate the location and size of 12573 * the data returned with the shadow IO request. 12574 */ 12575 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12576 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12577 12578 /* 12579 * copy_offset gives the offset (in bytes) from the start of the first 12580 * block of the READ request to the beginning of the data. We retrieve 12581 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12582 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12583 * data to be copied (in bytes). 12584 */ 12585 copy_offset = bsp->mbs_copy_offset; 12586 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12587 copy_length = orig_bp->b_bcount; 12588 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12589 12590 /* 12591 * Set up the resid and error fields of orig_bp as appropriate. 12592 */ 12593 if (shadow_end >= request_end) { 12594 /* We got all the requested data; set resid to zero */ 12595 orig_bp->b_resid = 0; 12596 } else { 12597 /* 12598 * We failed to get enough data to fully satisfy the original 12599 * request. Just copy back whatever data we got and set 12600 * up the residual and error code as required. 12601 * 12602 * 'shortfall' is the amount by which the data received with the 12603 * shadow buf has "fallen short" of the requested amount. 12604 */ 12605 shortfall = (size_t)(request_end - shadow_end); 12606 12607 if (shortfall > orig_bp->b_bcount) { 12608 /* 12609 * We did not get enough data to even partially 12610 * fulfill the original request. The residual is 12611 * equal to the amount requested. 12612 */ 12613 orig_bp->b_resid = orig_bp->b_bcount; 12614 } else { 12615 /* 12616 * We did not get all the data that we requested 12617 * from the device, but we will try to return what 12618 * portion we did get. 12619 */ 12620 orig_bp->b_resid = shortfall; 12621 } 12622 ASSERT(copy_length >= orig_bp->b_resid); 12623 copy_length -= orig_bp->b_resid; 12624 } 12625 12626 /* Propagate the error code from the shadow buf to the original buf */ 12627 bioerror(orig_bp, bp->b_error); 12628 12629 if (is_write) { 12630 goto freebuf_done; /* No data copying for a WRITE */ 12631 } 12632 12633 if (has_wmap) { 12634 /* 12635 * This is a READ command from the READ phase of a 12636 * read-modify-write request. We have to copy the data given 12637 * by the user OVER the data returned by the READ command, 12638 * then convert the command from a READ to a WRITE and send 12639 * it back to the target. 12640 */ 12641 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12642 copy_length); 12643 12644 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12645 12646 /* 12647 * Dispatch the WRITE command to the taskq thread, which 12648 * will in turn send the command to the target. When the 12649 * WRITE command completes, we (sd_mapblocksize_iodone()) 12650 * will get called again as part of the iodone chain 12651 * processing for it. Note that we will still be dealing 12652 * with the shadow buf at that point. 12653 */ 12654 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12655 KM_NOSLEEP) != 0) { 12656 /* 12657 * Dispatch was successful so we are done. Return 12658 * without going any higher up the iodone chain. Do 12659 * not free up any layer-private data until after the 12660 * WRITE completes. 12661 */ 12662 return; 12663 } 12664 12665 /* 12666 * Dispatch of the WRITE command failed; set up the error 12667 * condition and send this IO back up the iodone chain. 12668 */ 12669 bioerror(orig_bp, EIO); 12670 orig_bp->b_resid = orig_bp->b_bcount; 12671 12672 } else { 12673 /* 12674 * This is a regular READ request (ie, not a RMW). Copy the 12675 * data from the shadow buf into the original buf. The 12676 * copy_offset compensates for any "misalignment" between the 12677 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12678 * original buf (with its un->un_sys_blocksize blocks). 12679 */ 12680 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12681 copy_length); 12682 } 12683 12684 freebuf_done: 12685 12686 /* 12687 * At this point we still have both the shadow buf AND the original 12688 * buf to deal with, as well as the layer-private data area in each. 12689 * Local variables are as follows: 12690 * 12691 * bp -- points to shadow buf 12692 * xp -- points to xbuf of shadow buf 12693 * bsp -- points to layer-private data area of shadow buf 12694 * orig_bp -- points to original buf 12695 * 12696 * First free the shadow buf and its associated xbuf, then free the 12697 * layer-private data area from the shadow buf. There is no need to 12698 * restore xb_private in the shadow xbuf. 12699 */ 12700 sd_shadow_buf_free(bp); 12701 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12702 12703 /* 12704 * Now update the local variables to point to the original buf, xbuf, 12705 * and layer-private area. 12706 */ 12707 bp = orig_bp; 12708 xp = SD_GET_XBUF(bp); 12709 ASSERT(xp != NULL); 12710 ASSERT(xp == orig_xp); 12711 bsp = xp->xb_private; 12712 ASSERT(bsp != NULL); 12713 12714 done: 12715 /* 12716 * Restore xb_private to whatever it was set to by the next higher 12717 * layer in the chain, then free the layer-private data area. 12718 */ 12719 xp->xb_private = bsp->mbs_oprivate; 12720 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12721 12722 exit: 12723 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12724 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12725 12726 SD_NEXT_IODONE(index, un, bp); 12727 } 12728 12729 12730 /* 12731 * Function: sd_checksum_iostart 12732 * 12733 * Description: A stub function for a layer that's currently not used. 12734 * For now just a placeholder. 12735 * 12736 * Context: Kernel thread context 12737 */ 12738 12739 static void 12740 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12741 { 12742 ASSERT(un != NULL); 12743 ASSERT(bp != NULL); 12744 ASSERT(!mutex_owned(SD_MUTEX(un))); 12745 SD_NEXT_IOSTART(index, un, bp); 12746 } 12747 12748 12749 /* 12750 * Function: sd_checksum_iodone 12751 * 12752 * Description: A stub function for a layer that's currently not used. 12753 * For now just a placeholder. 12754 * 12755 * Context: May be called under interrupt context 12756 */ 12757 12758 static void 12759 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12760 { 12761 ASSERT(un != NULL); 12762 ASSERT(bp != NULL); 12763 ASSERT(!mutex_owned(SD_MUTEX(un))); 12764 SD_NEXT_IODONE(index, un, bp); 12765 } 12766 12767 12768 /* 12769 * Function: sd_checksum_uscsi_iostart 12770 * 12771 * Description: A stub function for a layer that's currently not used. 12772 * For now just a placeholder. 12773 * 12774 * Context: Kernel thread context 12775 */ 12776 12777 static void 12778 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12779 { 12780 ASSERT(un != NULL); 12781 ASSERT(bp != NULL); 12782 ASSERT(!mutex_owned(SD_MUTEX(un))); 12783 SD_NEXT_IOSTART(index, un, bp); 12784 } 12785 12786 12787 /* 12788 * Function: sd_checksum_uscsi_iodone 12789 * 12790 * Description: A stub function for a layer that's currently not used. 12791 * For now just a placeholder. 12792 * 12793 * Context: May be called under interrupt context 12794 */ 12795 12796 static void 12797 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12798 { 12799 ASSERT(un != NULL); 12800 ASSERT(bp != NULL); 12801 ASSERT(!mutex_owned(SD_MUTEX(un))); 12802 SD_NEXT_IODONE(index, un, bp); 12803 } 12804 12805 12806 /* 12807 * Function: sd_pm_iostart 12808 * 12809 * Description: iostart-side routine for Power mangement. 12810 * 12811 * Context: Kernel thread context 12812 */ 12813 12814 static void 12815 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12816 { 12817 ASSERT(un != NULL); 12818 ASSERT(bp != NULL); 12819 ASSERT(!mutex_owned(SD_MUTEX(un))); 12820 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12821 12822 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12823 12824 if (sd_pm_entry(un) != DDI_SUCCESS) { 12825 /* 12826 * Set up to return the failed buf back up the 'iodone' 12827 * side of the calling chain. 12828 */ 12829 bioerror(bp, EIO); 12830 bp->b_resid = bp->b_bcount; 12831 12832 SD_BEGIN_IODONE(index, un, bp); 12833 12834 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12835 return; 12836 } 12837 12838 SD_NEXT_IOSTART(index, un, bp); 12839 12840 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12841 } 12842 12843 12844 /* 12845 * Function: sd_pm_iodone 12846 * 12847 * Description: iodone-side routine for power mangement. 12848 * 12849 * Context: may be called from interrupt context 12850 */ 12851 12852 static void 12853 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12854 { 12855 ASSERT(un != NULL); 12856 ASSERT(bp != NULL); 12857 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12858 12859 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12860 12861 /* 12862 * After attach the following flag is only read, so don't 12863 * take the penalty of acquiring a mutex for it. 12864 */ 12865 if (un->un_f_pm_is_enabled == TRUE) { 12866 sd_pm_exit(un); 12867 } 12868 12869 SD_NEXT_IODONE(index, un, bp); 12870 12871 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12872 } 12873 12874 12875 /* 12876 * Function: sd_core_iostart 12877 * 12878 * Description: Primary driver function for enqueuing buf(9S) structs from 12879 * the system and initiating IO to the target device 12880 * 12881 * Context: Kernel thread context. Can sleep. 12882 * 12883 * Assumptions: - The given xp->xb_blkno is absolute 12884 * (ie, relative to the start of the device). 12885 * - The IO is to be done using the native blocksize of 12886 * the device, as specified in un->un_tgt_blocksize. 12887 */ 12888 /* ARGSUSED */ 12889 static void 12890 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12891 { 12892 struct sd_xbuf *xp; 12893 12894 ASSERT(un != NULL); 12895 ASSERT(bp != NULL); 12896 ASSERT(!mutex_owned(SD_MUTEX(un))); 12897 ASSERT(bp->b_resid == 0); 12898 12899 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12900 12901 xp = SD_GET_XBUF(bp); 12902 ASSERT(xp != NULL); 12903 12904 mutex_enter(SD_MUTEX(un)); 12905 12906 /* 12907 * If we are currently in the failfast state, fail any new IO 12908 * that has B_FAILFAST set, then return. 12909 */ 12910 if ((bp->b_flags & B_FAILFAST) && 12911 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12912 mutex_exit(SD_MUTEX(un)); 12913 bioerror(bp, EIO); 12914 bp->b_resid = bp->b_bcount; 12915 SD_BEGIN_IODONE(index, un, bp); 12916 return; 12917 } 12918 12919 if (SD_IS_DIRECT_PRIORITY(xp)) { 12920 /* 12921 * Priority command -- transport it immediately. 12922 * 12923 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12924 * because all direct priority commands should be associated 12925 * with error recovery actions which we don't want to retry. 12926 */ 12927 sd_start_cmds(un, bp); 12928 } else { 12929 /* 12930 * Normal command -- add it to the wait queue, then start 12931 * transporting commands from the wait queue. 12932 */ 12933 sd_add_buf_to_waitq(un, bp); 12934 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12935 sd_start_cmds(un, NULL); 12936 } 12937 12938 mutex_exit(SD_MUTEX(un)); 12939 12940 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12941 } 12942 12943 12944 /* 12945 * Function: sd_init_cdb_limits 12946 * 12947 * Description: This is to handle scsi_pkt initialization differences 12948 * between the driver platforms. 12949 * 12950 * Legacy behaviors: 12951 * 12952 * If the block number or the sector count exceeds the 12953 * capabilities of a Group 0 command, shift over to a 12954 * Group 1 command. We don't blindly use Group 1 12955 * commands because a) some drives (CDC Wren IVs) get a 12956 * bit confused, and b) there is probably a fair amount 12957 * of speed difference for a target to receive and decode 12958 * a 10 byte command instead of a 6 byte command. 12959 * 12960 * The xfer time difference of 6 vs 10 byte CDBs is 12961 * still significant so this code is still worthwhile. 12962 * 10 byte CDBs are very inefficient with the fas HBA driver 12963 * and older disks. Each CDB byte took 1 usec with some 12964 * popular disks. 12965 * 12966 * Context: Must be called at attach time 12967 */ 12968 12969 static void 12970 sd_init_cdb_limits(struct sd_lun *un) 12971 { 12972 int hba_cdb_limit; 12973 12974 /* 12975 * Use CDB_GROUP1 commands for most devices except for 12976 * parallel SCSI fixed drives in which case we get better 12977 * performance using CDB_GROUP0 commands (where applicable). 12978 */ 12979 un->un_mincdb = SD_CDB_GROUP1; 12980 #if !defined(__fibre) 12981 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12982 !un->un_f_has_removable_media) { 12983 un->un_mincdb = SD_CDB_GROUP0; 12984 } 12985 #endif 12986 12987 /* 12988 * Try to read the max-cdb-length supported by HBA. 12989 */ 12990 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 12991 if (0 >= un->un_max_hba_cdb) { 12992 un->un_max_hba_cdb = CDB_GROUP4; 12993 hba_cdb_limit = SD_CDB_GROUP4; 12994 } else if (0 < un->un_max_hba_cdb && 12995 un->un_max_hba_cdb < CDB_GROUP1) { 12996 hba_cdb_limit = SD_CDB_GROUP0; 12997 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 12998 un->un_max_hba_cdb < CDB_GROUP5) { 12999 hba_cdb_limit = SD_CDB_GROUP1; 13000 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13001 un->un_max_hba_cdb < CDB_GROUP4) { 13002 hba_cdb_limit = SD_CDB_GROUP5; 13003 } else { 13004 hba_cdb_limit = SD_CDB_GROUP4; 13005 } 13006 13007 /* 13008 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13009 * commands for fixed disks unless we are building for a 32 bit 13010 * kernel. 13011 */ 13012 #ifdef _LP64 13013 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13014 min(hba_cdb_limit, SD_CDB_GROUP4); 13015 #else 13016 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13017 min(hba_cdb_limit, SD_CDB_GROUP1); 13018 #endif 13019 13020 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13021 ? sizeof (struct scsi_arq_status) : 1); 13022 un->un_cmd_timeout = (ushort_t)sd_io_time; 13023 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13024 } 13025 13026 13027 /* 13028 * Function: sd_initpkt_for_buf 13029 * 13030 * Description: Allocate and initialize for transport a scsi_pkt struct, 13031 * based upon the info specified in the given buf struct. 13032 * 13033 * Assumes the xb_blkno in the request is absolute (ie, 13034 * relative to the start of the device (NOT partition!). 13035 * Also assumes that the request is using the native block 13036 * size of the device (as returned by the READ CAPACITY 13037 * command). 13038 * 13039 * Return Code: SD_PKT_ALLOC_SUCCESS 13040 * SD_PKT_ALLOC_FAILURE 13041 * SD_PKT_ALLOC_FAILURE_NO_DMA 13042 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13043 * 13044 * Context: Kernel thread and may be called from software interrupt context 13045 * as part of a sdrunout callback. This function may not block or 13046 * call routines that block 13047 */ 13048 13049 static int 13050 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13051 { 13052 struct sd_xbuf *xp; 13053 struct scsi_pkt *pktp = NULL; 13054 struct sd_lun *un; 13055 size_t blockcount; 13056 daddr_t startblock; 13057 int rval; 13058 int cmd_flags; 13059 13060 ASSERT(bp != NULL); 13061 ASSERT(pktpp != NULL); 13062 xp = SD_GET_XBUF(bp); 13063 ASSERT(xp != NULL); 13064 un = SD_GET_UN(bp); 13065 ASSERT(un != NULL); 13066 ASSERT(mutex_owned(SD_MUTEX(un))); 13067 ASSERT(bp->b_resid == 0); 13068 13069 SD_TRACE(SD_LOG_IO_CORE, un, 13070 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13071 13072 mutex_exit(SD_MUTEX(un)); 13073 13074 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13075 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13076 /* 13077 * Already have a scsi_pkt -- just need DMA resources. 13078 * We must recompute the CDB in case the mapping returns 13079 * a nonzero pkt_resid. 13080 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13081 * that is being retried, the unmap/remap of the DMA resouces 13082 * will result in the entire transfer starting over again 13083 * from the very first block. 13084 */ 13085 ASSERT(xp->xb_pktp != NULL); 13086 pktp = xp->xb_pktp; 13087 } else { 13088 pktp = NULL; 13089 } 13090 #endif /* __i386 || __amd64 */ 13091 13092 startblock = xp->xb_blkno; /* Absolute block num. */ 13093 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13094 13095 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13096 13097 /* 13098 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13099 * call scsi_init_pkt, and build the CDB. 13100 */ 13101 rval = sd_setup_rw_pkt(un, &pktp, bp, 13102 cmd_flags, sdrunout, (caddr_t)un, 13103 startblock, blockcount); 13104 13105 if (rval == 0) { 13106 /* 13107 * Success. 13108 * 13109 * If partial DMA is being used and required for this transfer. 13110 * set it up here. 13111 */ 13112 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13113 (pktp->pkt_resid != 0)) { 13114 13115 /* 13116 * Save the CDB length and pkt_resid for the 13117 * next xfer 13118 */ 13119 xp->xb_dma_resid = pktp->pkt_resid; 13120 13121 /* rezero resid */ 13122 pktp->pkt_resid = 0; 13123 13124 } else { 13125 xp->xb_dma_resid = 0; 13126 } 13127 13128 pktp->pkt_flags = un->un_tagflags; 13129 pktp->pkt_time = un->un_cmd_timeout; 13130 pktp->pkt_comp = sdintr; 13131 13132 pktp->pkt_private = bp; 13133 *pktpp = pktp; 13134 13135 SD_TRACE(SD_LOG_IO_CORE, un, 13136 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13137 13138 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13139 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13140 #endif 13141 13142 mutex_enter(SD_MUTEX(un)); 13143 return (SD_PKT_ALLOC_SUCCESS); 13144 13145 } 13146 13147 /* 13148 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13149 * from sd_setup_rw_pkt. 13150 */ 13151 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13152 13153 if (rval == SD_PKT_ALLOC_FAILURE) { 13154 *pktpp = NULL; 13155 /* 13156 * Set the driver state to RWAIT to indicate the driver 13157 * is waiting on resource allocations. The driver will not 13158 * suspend, pm_suspend, or detatch while the state is RWAIT. 13159 */ 13160 mutex_enter(SD_MUTEX(un)); 13161 New_state(un, SD_STATE_RWAIT); 13162 13163 SD_ERROR(SD_LOG_IO_CORE, un, 13164 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13165 13166 if ((bp->b_flags & B_ERROR) != 0) { 13167 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13168 } 13169 return (SD_PKT_ALLOC_FAILURE); 13170 } else { 13171 /* 13172 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13173 * 13174 * This should never happen. Maybe someone messed with the 13175 * kernel's minphys? 13176 */ 13177 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13178 "Request rejected: too large for CDB: " 13179 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13180 SD_ERROR(SD_LOG_IO_CORE, un, 13181 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13182 mutex_enter(SD_MUTEX(un)); 13183 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13184 13185 } 13186 } 13187 13188 13189 /* 13190 * Function: sd_destroypkt_for_buf 13191 * 13192 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13193 * 13194 * Context: Kernel thread or interrupt context 13195 */ 13196 13197 static void 13198 sd_destroypkt_for_buf(struct buf *bp) 13199 { 13200 ASSERT(bp != NULL); 13201 ASSERT(SD_GET_UN(bp) != NULL); 13202 13203 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13204 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13205 13206 ASSERT(SD_GET_PKTP(bp) != NULL); 13207 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13208 13209 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13210 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13211 } 13212 13213 /* 13214 * Function: sd_setup_rw_pkt 13215 * 13216 * Description: Determines appropriate CDB group for the requested LBA 13217 * and transfer length, calls scsi_init_pkt, and builds 13218 * the CDB. Do not use for partial DMA transfers except 13219 * for the initial transfer since the CDB size must 13220 * remain constant. 13221 * 13222 * Context: Kernel thread and may be called from software interrupt 13223 * context as part of a sdrunout callback. This function may not 13224 * block or call routines that block 13225 */ 13226 13227 13228 int 13229 sd_setup_rw_pkt(struct sd_lun *un, 13230 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13231 int (*callback)(caddr_t), caddr_t callback_arg, 13232 diskaddr_t lba, uint32_t blockcount) 13233 { 13234 struct scsi_pkt *return_pktp; 13235 union scsi_cdb *cdbp; 13236 struct sd_cdbinfo *cp = NULL; 13237 int i; 13238 13239 /* 13240 * See which size CDB to use, based upon the request. 13241 */ 13242 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13243 13244 /* 13245 * Check lba and block count against sd_cdbtab limits. 13246 * In the partial DMA case, we have to use the same size 13247 * CDB for all the transfers. Check lba + blockcount 13248 * against the max LBA so we know that segment of the 13249 * transfer can use the CDB we select. 13250 */ 13251 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13252 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13253 13254 /* 13255 * The command will fit into the CDB type 13256 * specified by sd_cdbtab[i]. 13257 */ 13258 cp = sd_cdbtab + i; 13259 13260 /* 13261 * Call scsi_init_pkt so we can fill in the 13262 * CDB. 13263 */ 13264 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13265 bp, cp->sc_grpcode, un->un_status_len, 0, 13266 flags, callback, callback_arg); 13267 13268 if (return_pktp != NULL) { 13269 13270 /* 13271 * Return new value of pkt 13272 */ 13273 *pktpp = return_pktp; 13274 13275 /* 13276 * To be safe, zero the CDB insuring there is 13277 * no leftover data from a previous command. 13278 */ 13279 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13280 13281 /* 13282 * Handle partial DMA mapping 13283 */ 13284 if (return_pktp->pkt_resid != 0) { 13285 13286 /* 13287 * Not going to xfer as many blocks as 13288 * originally expected 13289 */ 13290 blockcount -= 13291 SD_BYTES2TGTBLOCKS(un, 13292 return_pktp->pkt_resid); 13293 } 13294 13295 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13296 13297 /* 13298 * Set command byte based on the CDB 13299 * type we matched. 13300 */ 13301 cdbp->scc_cmd = cp->sc_grpmask | 13302 ((bp->b_flags & B_READ) ? 13303 SCMD_READ : SCMD_WRITE); 13304 13305 SD_FILL_SCSI1_LUN(un, return_pktp); 13306 13307 /* 13308 * Fill in LBA and length 13309 */ 13310 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13311 (cp->sc_grpcode == CDB_GROUP4) || 13312 (cp->sc_grpcode == CDB_GROUP0) || 13313 (cp->sc_grpcode == CDB_GROUP5)); 13314 13315 if (cp->sc_grpcode == CDB_GROUP1) { 13316 FORMG1ADDR(cdbp, lba); 13317 FORMG1COUNT(cdbp, blockcount); 13318 return (0); 13319 } else if (cp->sc_grpcode == CDB_GROUP4) { 13320 FORMG4LONGADDR(cdbp, lba); 13321 FORMG4COUNT(cdbp, blockcount); 13322 return (0); 13323 } else if (cp->sc_grpcode == CDB_GROUP0) { 13324 FORMG0ADDR(cdbp, lba); 13325 FORMG0COUNT(cdbp, blockcount); 13326 return (0); 13327 } else if (cp->sc_grpcode == CDB_GROUP5) { 13328 FORMG5ADDR(cdbp, lba); 13329 FORMG5COUNT(cdbp, blockcount); 13330 return (0); 13331 } 13332 13333 /* 13334 * It should be impossible to not match one 13335 * of the CDB types above, so we should never 13336 * reach this point. Set the CDB command byte 13337 * to test-unit-ready to avoid writing 13338 * to somewhere we don't intend. 13339 */ 13340 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13341 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13342 } else { 13343 /* 13344 * Couldn't get scsi_pkt 13345 */ 13346 return (SD_PKT_ALLOC_FAILURE); 13347 } 13348 } 13349 } 13350 13351 /* 13352 * None of the available CDB types were suitable. This really 13353 * should never happen: on a 64 bit system we support 13354 * READ16/WRITE16 which will hold an entire 64 bit disk address 13355 * and on a 32 bit system we will refuse to bind to a device 13356 * larger than 2TB so addresses will never be larger than 32 bits. 13357 */ 13358 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13359 } 13360 13361 /* 13362 * Function: sd_setup_next_rw_pkt 13363 * 13364 * Description: Setup packet for partial DMA transfers, except for the 13365 * initial transfer. sd_setup_rw_pkt should be used for 13366 * the initial transfer. 13367 * 13368 * Context: Kernel thread and may be called from interrupt context. 13369 */ 13370 13371 int 13372 sd_setup_next_rw_pkt(struct sd_lun *un, 13373 struct scsi_pkt *pktp, struct buf *bp, 13374 diskaddr_t lba, uint32_t blockcount) 13375 { 13376 uchar_t com; 13377 union scsi_cdb *cdbp; 13378 uchar_t cdb_group_id; 13379 13380 ASSERT(pktp != NULL); 13381 ASSERT(pktp->pkt_cdbp != NULL); 13382 13383 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13384 com = cdbp->scc_cmd; 13385 cdb_group_id = CDB_GROUPID(com); 13386 13387 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13388 (cdb_group_id == CDB_GROUPID_1) || 13389 (cdb_group_id == CDB_GROUPID_4) || 13390 (cdb_group_id == CDB_GROUPID_5)); 13391 13392 /* 13393 * Move pkt to the next portion of the xfer. 13394 * func is NULL_FUNC so we do not have to release 13395 * the disk mutex here. 13396 */ 13397 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13398 NULL_FUNC, NULL) == pktp) { 13399 /* Success. Handle partial DMA */ 13400 if (pktp->pkt_resid != 0) { 13401 blockcount -= 13402 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13403 } 13404 13405 cdbp->scc_cmd = com; 13406 SD_FILL_SCSI1_LUN(un, pktp); 13407 if (cdb_group_id == CDB_GROUPID_1) { 13408 FORMG1ADDR(cdbp, lba); 13409 FORMG1COUNT(cdbp, blockcount); 13410 return (0); 13411 } else if (cdb_group_id == CDB_GROUPID_4) { 13412 FORMG4LONGADDR(cdbp, lba); 13413 FORMG4COUNT(cdbp, blockcount); 13414 return (0); 13415 } else if (cdb_group_id == CDB_GROUPID_0) { 13416 FORMG0ADDR(cdbp, lba); 13417 FORMG0COUNT(cdbp, blockcount); 13418 return (0); 13419 } else if (cdb_group_id == CDB_GROUPID_5) { 13420 FORMG5ADDR(cdbp, lba); 13421 FORMG5COUNT(cdbp, blockcount); 13422 return (0); 13423 } 13424 13425 /* Unreachable */ 13426 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13427 } 13428 13429 /* 13430 * Error setting up next portion of cmd transfer. 13431 * Something is definitely very wrong and this 13432 * should not happen. 13433 */ 13434 return (SD_PKT_ALLOC_FAILURE); 13435 } 13436 13437 /* 13438 * Function: sd_initpkt_for_uscsi 13439 * 13440 * Description: Allocate and initialize for transport a scsi_pkt struct, 13441 * based upon the info specified in the given uscsi_cmd struct. 13442 * 13443 * Return Code: SD_PKT_ALLOC_SUCCESS 13444 * SD_PKT_ALLOC_FAILURE 13445 * SD_PKT_ALLOC_FAILURE_NO_DMA 13446 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13447 * 13448 * Context: Kernel thread and may be called from software interrupt context 13449 * as part of a sdrunout callback. This function may not block or 13450 * call routines that block 13451 */ 13452 13453 static int 13454 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13455 { 13456 struct uscsi_cmd *uscmd; 13457 struct sd_xbuf *xp; 13458 struct scsi_pkt *pktp; 13459 struct sd_lun *un; 13460 uint32_t flags = 0; 13461 13462 ASSERT(bp != NULL); 13463 ASSERT(pktpp != NULL); 13464 xp = SD_GET_XBUF(bp); 13465 ASSERT(xp != NULL); 13466 un = SD_GET_UN(bp); 13467 ASSERT(un != NULL); 13468 ASSERT(mutex_owned(SD_MUTEX(un))); 13469 13470 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13471 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13472 ASSERT(uscmd != NULL); 13473 13474 SD_TRACE(SD_LOG_IO_CORE, un, 13475 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13476 13477 /* 13478 * Allocate the scsi_pkt for the command. 13479 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13480 * during scsi_init_pkt time and will continue to use the 13481 * same path as long as the same scsi_pkt is used without 13482 * intervening scsi_dma_free(). Since uscsi command does 13483 * not call scsi_dmafree() before retry failed command, it 13484 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13485 * set such that scsi_vhci can use other available path for 13486 * retry. Besides, ucsci command does not allow DMA breakup, 13487 * so there is no need to set PKT_DMA_PARTIAL flag. 13488 */ 13489 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13490 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13491 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13492 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13493 - sizeof (struct scsi_extended_sense)), 0, 13494 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13495 sdrunout, (caddr_t)un); 13496 } else { 13497 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13498 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13499 sizeof (struct scsi_arq_status), 0, 13500 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13501 sdrunout, (caddr_t)un); 13502 } 13503 13504 if (pktp == NULL) { 13505 *pktpp = NULL; 13506 /* 13507 * Set the driver state to RWAIT to indicate the driver 13508 * is waiting on resource allocations. The driver will not 13509 * suspend, pm_suspend, or detatch while the state is RWAIT. 13510 */ 13511 New_state(un, SD_STATE_RWAIT); 13512 13513 SD_ERROR(SD_LOG_IO_CORE, un, 13514 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13515 13516 if ((bp->b_flags & B_ERROR) != 0) { 13517 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13518 } 13519 return (SD_PKT_ALLOC_FAILURE); 13520 } 13521 13522 /* 13523 * We do not do DMA breakup for USCSI commands, so return failure 13524 * here if all the needed DMA resources were not allocated. 13525 */ 13526 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13527 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13528 scsi_destroy_pkt(pktp); 13529 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13530 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13531 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13532 } 13533 13534 /* Init the cdb from the given uscsi struct */ 13535 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13536 uscmd->uscsi_cdb[0], 0, 0, 0); 13537 13538 SD_FILL_SCSI1_LUN(un, pktp); 13539 13540 /* 13541 * Set up the optional USCSI flags. See the uscsi (7I) man page 13542 * for listing of the supported flags. 13543 */ 13544 13545 if (uscmd->uscsi_flags & USCSI_SILENT) { 13546 flags |= FLAG_SILENT; 13547 } 13548 13549 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13550 flags |= FLAG_DIAGNOSE; 13551 } 13552 13553 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13554 flags |= FLAG_ISOLATE; 13555 } 13556 13557 if (un->un_f_is_fibre == FALSE) { 13558 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13559 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13560 } 13561 } 13562 13563 /* 13564 * Set the pkt flags here so we save time later. 13565 * Note: These flags are NOT in the uscsi man page!!! 13566 */ 13567 if (uscmd->uscsi_flags & USCSI_HEAD) { 13568 flags |= FLAG_HEAD; 13569 } 13570 13571 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13572 flags |= FLAG_NOINTR; 13573 } 13574 13575 /* 13576 * For tagged queueing, things get a bit complicated. 13577 * Check first for head of queue and last for ordered queue. 13578 * If neither head nor order, use the default driver tag flags. 13579 */ 13580 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13581 if (uscmd->uscsi_flags & USCSI_HTAG) { 13582 flags |= FLAG_HTAG; 13583 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13584 flags |= FLAG_OTAG; 13585 } else { 13586 flags |= un->un_tagflags & FLAG_TAGMASK; 13587 } 13588 } 13589 13590 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13591 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13592 } 13593 13594 pktp->pkt_flags = flags; 13595 13596 /* Transfer uscsi information to scsi_pkt */ 13597 (void) scsi_uscsi_pktinit(uscmd, pktp); 13598 13599 /* Copy the caller's CDB into the pkt... */ 13600 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13601 13602 if (uscmd->uscsi_timeout == 0) { 13603 pktp->pkt_time = un->un_uscsi_timeout; 13604 } else { 13605 pktp->pkt_time = uscmd->uscsi_timeout; 13606 } 13607 13608 /* need it later to identify USCSI request in sdintr */ 13609 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13610 13611 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13612 13613 pktp->pkt_private = bp; 13614 pktp->pkt_comp = sdintr; 13615 *pktpp = pktp; 13616 13617 SD_TRACE(SD_LOG_IO_CORE, un, 13618 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13619 13620 return (SD_PKT_ALLOC_SUCCESS); 13621 } 13622 13623 13624 /* 13625 * Function: sd_destroypkt_for_uscsi 13626 * 13627 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13628 * IOs.. Also saves relevant info into the associated uscsi_cmd 13629 * struct. 13630 * 13631 * Context: May be called under interrupt context 13632 */ 13633 13634 static void 13635 sd_destroypkt_for_uscsi(struct buf *bp) 13636 { 13637 struct uscsi_cmd *uscmd; 13638 struct sd_xbuf *xp; 13639 struct scsi_pkt *pktp; 13640 struct sd_lun *un; 13641 struct sd_uscsi_info *suip; 13642 13643 ASSERT(bp != NULL); 13644 xp = SD_GET_XBUF(bp); 13645 ASSERT(xp != NULL); 13646 un = SD_GET_UN(bp); 13647 ASSERT(un != NULL); 13648 ASSERT(!mutex_owned(SD_MUTEX(un))); 13649 pktp = SD_GET_PKTP(bp); 13650 ASSERT(pktp != NULL); 13651 13652 SD_TRACE(SD_LOG_IO_CORE, un, 13653 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13654 13655 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13656 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13657 ASSERT(uscmd != NULL); 13658 13659 /* Save the status and the residual into the uscsi_cmd struct */ 13660 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13661 uscmd->uscsi_resid = bp->b_resid; 13662 13663 /* Transfer scsi_pkt information to uscsi */ 13664 (void) scsi_uscsi_pktfini(pktp, uscmd); 13665 13666 /* 13667 * If enabled, copy any saved sense data into the area specified 13668 * by the uscsi command. 13669 */ 13670 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13671 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13672 /* 13673 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13674 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13675 */ 13676 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13677 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13678 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13679 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13680 MAX_SENSE_LENGTH); 13681 } else { 13682 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13683 SENSE_LENGTH); 13684 } 13685 } 13686 /* 13687 * The following assignments are for SCSI FMA. 13688 */ 13689 ASSERT(xp->xb_private != NULL); 13690 suip = (struct sd_uscsi_info *)xp->xb_private; 13691 suip->ui_pkt_reason = pktp->pkt_reason; 13692 suip->ui_pkt_state = pktp->pkt_state; 13693 suip->ui_pkt_statistics = pktp->pkt_statistics; 13694 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13695 13696 /* We are done with the scsi_pkt; free it now */ 13697 ASSERT(SD_GET_PKTP(bp) != NULL); 13698 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13699 13700 SD_TRACE(SD_LOG_IO_CORE, un, 13701 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13702 } 13703 13704 13705 /* 13706 * Function: sd_bioclone_alloc 13707 * 13708 * Description: Allocate a buf(9S) and init it as per the given buf 13709 * and the various arguments. The associated sd_xbuf 13710 * struct is (nearly) duplicated. The struct buf *bp 13711 * argument is saved in new_xp->xb_private. 13712 * 13713 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13714 * datalen - size of data area for the shadow bp 13715 * blkno - starting LBA 13716 * func - function pointer for b_iodone in the shadow buf. (May 13717 * be NULL if none.) 13718 * 13719 * Return Code: Pointer to allocates buf(9S) struct 13720 * 13721 * Context: Can sleep. 13722 */ 13723 13724 static struct buf * 13725 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13726 daddr_t blkno, int (*func)(struct buf *)) 13727 { 13728 struct sd_lun *un; 13729 struct sd_xbuf *xp; 13730 struct sd_xbuf *new_xp; 13731 struct buf *new_bp; 13732 13733 ASSERT(bp != NULL); 13734 xp = SD_GET_XBUF(bp); 13735 ASSERT(xp != NULL); 13736 un = SD_GET_UN(bp); 13737 ASSERT(un != NULL); 13738 ASSERT(!mutex_owned(SD_MUTEX(un))); 13739 13740 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13741 NULL, KM_SLEEP); 13742 13743 new_bp->b_lblkno = blkno; 13744 13745 /* 13746 * Allocate an xbuf for the shadow bp and copy the contents of the 13747 * original xbuf into it. 13748 */ 13749 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13750 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13751 13752 /* 13753 * The given bp is automatically saved in the xb_private member 13754 * of the new xbuf. Callers are allowed to depend on this. 13755 */ 13756 new_xp->xb_private = bp; 13757 13758 new_bp->b_private = new_xp; 13759 13760 return (new_bp); 13761 } 13762 13763 /* 13764 * Function: sd_shadow_buf_alloc 13765 * 13766 * Description: Allocate a buf(9S) and init it as per the given buf 13767 * and the various arguments. The associated sd_xbuf 13768 * struct is (nearly) duplicated. The struct buf *bp 13769 * argument is saved in new_xp->xb_private. 13770 * 13771 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13772 * datalen - size of data area for the shadow bp 13773 * bflags - B_READ or B_WRITE (pseudo flag) 13774 * blkno - starting LBA 13775 * func - function pointer for b_iodone in the shadow buf. (May 13776 * be NULL if none.) 13777 * 13778 * Return Code: Pointer to allocates buf(9S) struct 13779 * 13780 * Context: Can sleep. 13781 */ 13782 13783 static struct buf * 13784 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13785 daddr_t blkno, int (*func)(struct buf *)) 13786 { 13787 struct sd_lun *un; 13788 struct sd_xbuf *xp; 13789 struct sd_xbuf *new_xp; 13790 struct buf *new_bp; 13791 13792 ASSERT(bp != NULL); 13793 xp = SD_GET_XBUF(bp); 13794 ASSERT(xp != NULL); 13795 un = SD_GET_UN(bp); 13796 ASSERT(un != NULL); 13797 ASSERT(!mutex_owned(SD_MUTEX(un))); 13798 13799 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13800 bp_mapin(bp); 13801 } 13802 13803 bflags &= (B_READ | B_WRITE); 13804 #if defined(__i386) || defined(__amd64) 13805 new_bp = getrbuf(KM_SLEEP); 13806 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13807 new_bp->b_bcount = datalen; 13808 new_bp->b_flags = bflags | 13809 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13810 #else 13811 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13812 datalen, bflags, SLEEP_FUNC, NULL); 13813 #endif 13814 new_bp->av_forw = NULL; 13815 new_bp->av_back = NULL; 13816 new_bp->b_dev = bp->b_dev; 13817 new_bp->b_blkno = blkno; 13818 new_bp->b_iodone = func; 13819 new_bp->b_edev = bp->b_edev; 13820 new_bp->b_resid = 0; 13821 13822 /* We need to preserve the B_FAILFAST flag */ 13823 if (bp->b_flags & B_FAILFAST) { 13824 new_bp->b_flags |= B_FAILFAST; 13825 } 13826 13827 /* 13828 * Allocate an xbuf for the shadow bp and copy the contents of the 13829 * original xbuf into it. 13830 */ 13831 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13832 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13833 13834 /* Need later to copy data between the shadow buf & original buf! */ 13835 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13836 13837 /* 13838 * The given bp is automatically saved in the xb_private member 13839 * of the new xbuf. Callers are allowed to depend on this. 13840 */ 13841 new_xp->xb_private = bp; 13842 13843 new_bp->b_private = new_xp; 13844 13845 return (new_bp); 13846 } 13847 13848 /* 13849 * Function: sd_bioclone_free 13850 * 13851 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13852 * in the larger than partition operation. 13853 * 13854 * Context: May be called under interrupt context 13855 */ 13856 13857 static void 13858 sd_bioclone_free(struct buf *bp) 13859 { 13860 struct sd_xbuf *xp; 13861 13862 ASSERT(bp != NULL); 13863 xp = SD_GET_XBUF(bp); 13864 ASSERT(xp != NULL); 13865 13866 /* 13867 * Call bp_mapout() before freeing the buf, in case a lower 13868 * layer or HBA had done a bp_mapin(). we must do this here 13869 * as we are the "originator" of the shadow buf. 13870 */ 13871 bp_mapout(bp); 13872 13873 /* 13874 * Null out b_iodone before freeing the bp, to ensure that the driver 13875 * never gets confused by a stale value in this field. (Just a little 13876 * extra defensiveness here.) 13877 */ 13878 bp->b_iodone = NULL; 13879 13880 freerbuf(bp); 13881 13882 kmem_free(xp, sizeof (struct sd_xbuf)); 13883 } 13884 13885 /* 13886 * Function: sd_shadow_buf_free 13887 * 13888 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13889 * 13890 * Context: May be called under interrupt context 13891 */ 13892 13893 static void 13894 sd_shadow_buf_free(struct buf *bp) 13895 { 13896 struct sd_xbuf *xp; 13897 13898 ASSERT(bp != NULL); 13899 xp = SD_GET_XBUF(bp); 13900 ASSERT(xp != NULL); 13901 13902 #if defined(__sparc) 13903 /* 13904 * Call bp_mapout() before freeing the buf, in case a lower 13905 * layer or HBA had done a bp_mapin(). we must do this here 13906 * as we are the "originator" of the shadow buf. 13907 */ 13908 bp_mapout(bp); 13909 #endif 13910 13911 /* 13912 * Null out b_iodone before freeing the bp, to ensure that the driver 13913 * never gets confused by a stale value in this field. (Just a little 13914 * extra defensiveness here.) 13915 */ 13916 bp->b_iodone = NULL; 13917 13918 #if defined(__i386) || defined(__amd64) 13919 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13920 freerbuf(bp); 13921 #else 13922 scsi_free_consistent_buf(bp); 13923 #endif 13924 13925 kmem_free(xp, sizeof (struct sd_xbuf)); 13926 } 13927 13928 13929 /* 13930 * Function: sd_print_transport_rejected_message 13931 * 13932 * Description: This implements the ludicrously complex rules for printing 13933 * a "transport rejected" message. This is to address the 13934 * specific problem of having a flood of this error message 13935 * produced when a failover occurs. 13936 * 13937 * Context: Any. 13938 */ 13939 13940 static void 13941 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13942 int code) 13943 { 13944 ASSERT(un != NULL); 13945 ASSERT(mutex_owned(SD_MUTEX(un))); 13946 ASSERT(xp != NULL); 13947 13948 /* 13949 * Print the "transport rejected" message under the following 13950 * conditions: 13951 * 13952 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13953 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13954 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13955 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13956 * scsi_transport(9F) (which indicates that the target might have 13957 * gone off-line). This uses the un->un_tran_fatal_count 13958 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13959 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13960 * from scsi_transport(). 13961 * 13962 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13963 * the preceeding cases in order for the message to be printed. 13964 */ 13965 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 13966 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 13967 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13968 (code != TRAN_FATAL_ERROR) || 13969 (un->un_tran_fatal_count == 1)) { 13970 switch (code) { 13971 case TRAN_BADPKT: 13972 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13973 "transport rejected bad packet\n"); 13974 break; 13975 case TRAN_FATAL_ERROR: 13976 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13977 "transport rejected fatal error\n"); 13978 break; 13979 default: 13980 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13981 "transport rejected (%d)\n", code); 13982 break; 13983 } 13984 } 13985 } 13986 } 13987 13988 13989 /* 13990 * Function: sd_add_buf_to_waitq 13991 * 13992 * Description: Add the given buf(9S) struct to the wait queue for the 13993 * instance. If sorting is enabled, then the buf is added 13994 * to the queue via an elevator sort algorithm (a la 13995 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13996 * If sorting is not enabled, then the buf is just added 13997 * to the end of the wait queue. 13998 * 13999 * Return Code: void 14000 * 14001 * Context: Does not sleep/block, therefore technically can be called 14002 * from any context. However if sorting is enabled then the 14003 * execution time is indeterminate, and may take long if 14004 * the wait queue grows large. 14005 */ 14006 14007 static void 14008 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14009 { 14010 struct buf *ap; 14011 14012 ASSERT(bp != NULL); 14013 ASSERT(un != NULL); 14014 ASSERT(mutex_owned(SD_MUTEX(un))); 14015 14016 /* If the queue is empty, add the buf as the only entry & return. */ 14017 if (un->un_waitq_headp == NULL) { 14018 ASSERT(un->un_waitq_tailp == NULL); 14019 un->un_waitq_headp = un->un_waitq_tailp = bp; 14020 bp->av_forw = NULL; 14021 return; 14022 } 14023 14024 ASSERT(un->un_waitq_tailp != NULL); 14025 14026 /* 14027 * If sorting is disabled, just add the buf to the tail end of 14028 * the wait queue and return. 14029 */ 14030 if (un->un_f_disksort_disabled) { 14031 un->un_waitq_tailp->av_forw = bp; 14032 un->un_waitq_tailp = bp; 14033 bp->av_forw = NULL; 14034 return; 14035 } 14036 14037 /* 14038 * Sort thru the list of requests currently on the wait queue 14039 * and add the new buf request at the appropriate position. 14040 * 14041 * The un->un_waitq_headp is an activity chain pointer on which 14042 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14043 * first queue holds those requests which are positioned after 14044 * the current SD_GET_BLKNO() (in the first request); the second holds 14045 * requests which came in after their SD_GET_BLKNO() number was passed. 14046 * Thus we implement a one way scan, retracting after reaching 14047 * the end of the drive to the first request on the second 14048 * queue, at which time it becomes the first queue. 14049 * A one-way scan is natural because of the way UNIX read-ahead 14050 * blocks are allocated. 14051 * 14052 * If we lie after the first request, then we must locate the 14053 * second request list and add ourselves to it. 14054 */ 14055 ap = un->un_waitq_headp; 14056 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14057 while (ap->av_forw != NULL) { 14058 /* 14059 * Look for an "inversion" in the (normally 14060 * ascending) block numbers. This indicates 14061 * the start of the second request list. 14062 */ 14063 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14064 /* 14065 * Search the second request list for the 14066 * first request at a larger block number. 14067 * We go before that; however if there is 14068 * no such request, we go at the end. 14069 */ 14070 do { 14071 if (SD_GET_BLKNO(bp) < 14072 SD_GET_BLKNO(ap->av_forw)) { 14073 goto insert; 14074 } 14075 ap = ap->av_forw; 14076 } while (ap->av_forw != NULL); 14077 goto insert; /* after last */ 14078 } 14079 ap = ap->av_forw; 14080 } 14081 14082 /* 14083 * No inversions... we will go after the last, and 14084 * be the first request in the second request list. 14085 */ 14086 goto insert; 14087 } 14088 14089 /* 14090 * Request is at/after the current request... 14091 * sort in the first request list. 14092 */ 14093 while (ap->av_forw != NULL) { 14094 /* 14095 * We want to go after the current request (1) if 14096 * there is an inversion after it (i.e. it is the end 14097 * of the first request list), or (2) if the next 14098 * request is a larger block no. than our request. 14099 */ 14100 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14101 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14102 goto insert; 14103 } 14104 ap = ap->av_forw; 14105 } 14106 14107 /* 14108 * Neither a second list nor a larger request, therefore 14109 * we go at the end of the first list (which is the same 14110 * as the end of the whole schebang). 14111 */ 14112 insert: 14113 bp->av_forw = ap->av_forw; 14114 ap->av_forw = bp; 14115 14116 /* 14117 * If we inserted onto the tail end of the waitq, make sure the 14118 * tail pointer is updated. 14119 */ 14120 if (ap == un->un_waitq_tailp) { 14121 un->un_waitq_tailp = bp; 14122 } 14123 } 14124 14125 14126 /* 14127 * Function: sd_start_cmds 14128 * 14129 * Description: Remove and transport cmds from the driver queues. 14130 * 14131 * Arguments: un - pointer to the unit (soft state) struct for the target. 14132 * 14133 * immed_bp - ptr to a buf to be transported immediately. Only 14134 * the immed_bp is transported; bufs on the waitq are not 14135 * processed and the un_retry_bp is not checked. If immed_bp is 14136 * NULL, then normal queue processing is performed. 14137 * 14138 * Context: May be called from kernel thread context, interrupt context, 14139 * or runout callback context. This function may not block or 14140 * call routines that block. 14141 */ 14142 14143 static void 14144 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14145 { 14146 struct sd_xbuf *xp; 14147 struct buf *bp; 14148 void (*statp)(kstat_io_t *); 14149 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14150 void (*saved_statp)(kstat_io_t *); 14151 #endif 14152 int rval; 14153 struct sd_fm_internal *sfip = NULL; 14154 14155 ASSERT(un != NULL); 14156 ASSERT(mutex_owned(SD_MUTEX(un))); 14157 ASSERT(un->un_ncmds_in_transport >= 0); 14158 ASSERT(un->un_throttle >= 0); 14159 14160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14161 14162 do { 14163 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14164 saved_statp = NULL; 14165 #endif 14166 14167 /* 14168 * If we are syncing or dumping, fail the command to 14169 * avoid recursively calling back into scsi_transport(). 14170 * The dump I/O itself uses a separate code path so this 14171 * only prevents non-dump I/O from being sent while dumping. 14172 * File system sync takes place before dumping begins. 14173 * During panic, filesystem I/O is allowed provided 14174 * un_in_callback is <= 1. This is to prevent recursion 14175 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14176 * sd_start_cmds and so on. See panic.c for more information 14177 * about the states the system can be in during panic. 14178 */ 14179 if ((un->un_state == SD_STATE_DUMPING) || 14180 (ddi_in_panic() && (un->un_in_callback > 1))) { 14181 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14182 "sd_start_cmds: panicking\n"); 14183 goto exit; 14184 } 14185 14186 if ((bp = immed_bp) != NULL) { 14187 /* 14188 * We have a bp that must be transported immediately. 14189 * It's OK to transport the immed_bp here without doing 14190 * the throttle limit check because the immed_bp is 14191 * always used in a retry/recovery case. This means 14192 * that we know we are not at the throttle limit by 14193 * virtue of the fact that to get here we must have 14194 * already gotten a command back via sdintr(). This also 14195 * relies on (1) the command on un_retry_bp preventing 14196 * further commands from the waitq from being issued; 14197 * and (2) the code in sd_retry_command checking the 14198 * throttle limit before issuing a delayed or immediate 14199 * retry. This holds even if the throttle limit is 14200 * currently ratcheted down from its maximum value. 14201 */ 14202 statp = kstat_runq_enter; 14203 if (bp == un->un_retry_bp) { 14204 ASSERT((un->un_retry_statp == NULL) || 14205 (un->un_retry_statp == kstat_waitq_enter) || 14206 (un->un_retry_statp == 14207 kstat_runq_back_to_waitq)); 14208 /* 14209 * If the waitq kstat was incremented when 14210 * sd_set_retry_bp() queued this bp for a retry, 14211 * then we must set up statp so that the waitq 14212 * count will get decremented correctly below. 14213 * Also we must clear un->un_retry_statp to 14214 * ensure that we do not act on a stale value 14215 * in this field. 14216 */ 14217 if ((un->un_retry_statp == kstat_waitq_enter) || 14218 (un->un_retry_statp == 14219 kstat_runq_back_to_waitq)) { 14220 statp = kstat_waitq_to_runq; 14221 } 14222 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14223 saved_statp = un->un_retry_statp; 14224 #endif 14225 un->un_retry_statp = NULL; 14226 14227 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14228 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14229 "un_throttle:%d un_ncmds_in_transport:%d\n", 14230 un, un->un_retry_bp, un->un_throttle, 14231 un->un_ncmds_in_transport); 14232 } else { 14233 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14234 "processing priority bp:0x%p\n", bp); 14235 } 14236 14237 } else if ((bp = un->un_waitq_headp) != NULL) { 14238 /* 14239 * A command on the waitq is ready to go, but do not 14240 * send it if: 14241 * 14242 * (1) the throttle limit has been reached, or 14243 * (2) a retry is pending, or 14244 * (3) a START_STOP_UNIT callback pending, or 14245 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14246 * command is pending. 14247 * 14248 * For all of these conditions, IO processing will 14249 * restart after the condition is cleared. 14250 */ 14251 if (un->un_ncmds_in_transport >= un->un_throttle) { 14252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14253 "sd_start_cmds: exiting, " 14254 "throttle limit reached!\n"); 14255 goto exit; 14256 } 14257 if (un->un_retry_bp != NULL) { 14258 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14259 "sd_start_cmds: exiting, retry pending!\n"); 14260 goto exit; 14261 } 14262 if (un->un_startstop_timeid != NULL) { 14263 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14264 "sd_start_cmds: exiting, " 14265 "START_STOP pending!\n"); 14266 goto exit; 14267 } 14268 if (un->un_direct_priority_timeid != NULL) { 14269 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14270 "sd_start_cmds: exiting, " 14271 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14272 goto exit; 14273 } 14274 14275 /* Dequeue the command */ 14276 un->un_waitq_headp = bp->av_forw; 14277 if (un->un_waitq_headp == NULL) { 14278 un->un_waitq_tailp = NULL; 14279 } 14280 bp->av_forw = NULL; 14281 statp = kstat_waitq_to_runq; 14282 SD_TRACE(SD_LOG_IO_CORE, un, 14283 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14284 14285 } else { 14286 /* No work to do so bail out now */ 14287 SD_TRACE(SD_LOG_IO_CORE, un, 14288 "sd_start_cmds: no more work, exiting!\n"); 14289 goto exit; 14290 } 14291 14292 /* 14293 * Reset the state to normal. This is the mechanism by which 14294 * the state transitions from either SD_STATE_RWAIT or 14295 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14296 * If state is SD_STATE_PM_CHANGING then this command is 14297 * part of the device power control and the state must 14298 * not be put back to normal. Doing so would would 14299 * allow new commands to proceed when they shouldn't, 14300 * the device may be going off. 14301 */ 14302 if ((un->un_state != SD_STATE_SUSPENDED) && 14303 (un->un_state != SD_STATE_PM_CHANGING)) { 14304 New_state(un, SD_STATE_NORMAL); 14305 } 14306 14307 xp = SD_GET_XBUF(bp); 14308 ASSERT(xp != NULL); 14309 14310 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14311 /* 14312 * Allocate the scsi_pkt if we need one, or attach DMA 14313 * resources if we have a scsi_pkt that needs them. The 14314 * latter should only occur for commands that are being 14315 * retried. 14316 */ 14317 if ((xp->xb_pktp == NULL) || 14318 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14319 #else 14320 if (xp->xb_pktp == NULL) { 14321 #endif 14322 /* 14323 * There is no scsi_pkt allocated for this buf. Call 14324 * the initpkt function to allocate & init one. 14325 * 14326 * The scsi_init_pkt runout callback functionality is 14327 * implemented as follows: 14328 * 14329 * 1) The initpkt function always calls 14330 * scsi_init_pkt(9F) with sdrunout specified as the 14331 * callback routine. 14332 * 2) A successful packet allocation is initialized and 14333 * the I/O is transported. 14334 * 3) The I/O associated with an allocation resource 14335 * failure is left on its queue to be retried via 14336 * runout or the next I/O. 14337 * 4) The I/O associated with a DMA error is removed 14338 * from the queue and failed with EIO. Processing of 14339 * the transport queues is also halted to be 14340 * restarted via runout or the next I/O. 14341 * 5) The I/O associated with a CDB size or packet 14342 * size error is removed from the queue and failed 14343 * with EIO. Processing of the transport queues is 14344 * continued. 14345 * 14346 * Note: there is no interface for canceling a runout 14347 * callback. To prevent the driver from detaching or 14348 * suspending while a runout is pending the driver 14349 * state is set to SD_STATE_RWAIT 14350 * 14351 * Note: using the scsi_init_pkt callback facility can 14352 * result in an I/O request persisting at the head of 14353 * the list which cannot be satisfied even after 14354 * multiple retries. In the future the driver may 14355 * implement some kind of maximum runout count before 14356 * failing an I/O. 14357 * 14358 * Note: the use of funcp below may seem superfluous, 14359 * but it helps warlock figure out the correct 14360 * initpkt function calls (see [s]sd.wlcmd). 14361 */ 14362 struct scsi_pkt *pktp; 14363 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14364 14365 ASSERT(bp != un->un_rqs_bp); 14366 14367 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14368 switch ((*funcp)(bp, &pktp)) { 14369 case SD_PKT_ALLOC_SUCCESS: 14370 xp->xb_pktp = pktp; 14371 SD_TRACE(SD_LOG_IO_CORE, un, 14372 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14373 pktp); 14374 goto got_pkt; 14375 14376 case SD_PKT_ALLOC_FAILURE: 14377 /* 14378 * Temporary (hopefully) resource depletion. 14379 * Since retries and RQS commands always have a 14380 * scsi_pkt allocated, these cases should never 14381 * get here. So the only cases this needs to 14382 * handle is a bp from the waitq (which we put 14383 * back onto the waitq for sdrunout), or a bp 14384 * sent as an immed_bp (which we just fail). 14385 */ 14386 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14387 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14388 14389 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14390 14391 if (bp == immed_bp) { 14392 /* 14393 * If SD_XB_DMA_FREED is clear, then 14394 * this is a failure to allocate a 14395 * scsi_pkt, and we must fail the 14396 * command. 14397 */ 14398 if ((xp->xb_pkt_flags & 14399 SD_XB_DMA_FREED) == 0) { 14400 break; 14401 } 14402 14403 /* 14404 * If this immediate command is NOT our 14405 * un_retry_bp, then we must fail it. 14406 */ 14407 if (bp != un->un_retry_bp) { 14408 break; 14409 } 14410 14411 /* 14412 * We get here if this cmd is our 14413 * un_retry_bp that was DMAFREED, but 14414 * scsi_init_pkt() failed to reallocate 14415 * DMA resources when we attempted to 14416 * retry it. This can happen when an 14417 * mpxio failover is in progress, but 14418 * we don't want to just fail the 14419 * command in this case. 14420 * 14421 * Use timeout(9F) to restart it after 14422 * a 100ms delay. We don't want to 14423 * let sdrunout() restart it, because 14424 * sdrunout() is just supposed to start 14425 * commands that are sitting on the 14426 * wait queue. The un_retry_bp stays 14427 * set until the command completes, but 14428 * sdrunout can be called many times 14429 * before that happens. Since sdrunout 14430 * cannot tell if the un_retry_bp is 14431 * already in the transport, it could 14432 * end up calling scsi_transport() for 14433 * the un_retry_bp multiple times. 14434 * 14435 * Also: don't schedule the callback 14436 * if some other callback is already 14437 * pending. 14438 */ 14439 if (un->un_retry_statp == NULL) { 14440 /* 14441 * restore the kstat pointer to 14442 * keep kstat counts coherent 14443 * when we do retry the command. 14444 */ 14445 un->un_retry_statp = 14446 saved_statp; 14447 } 14448 14449 if ((un->un_startstop_timeid == NULL) && 14450 (un->un_retry_timeid == NULL) && 14451 (un->un_direct_priority_timeid == 14452 NULL)) { 14453 14454 un->un_retry_timeid = 14455 timeout( 14456 sd_start_retry_command, 14457 un, SD_RESTART_TIMEOUT); 14458 } 14459 goto exit; 14460 } 14461 14462 #else 14463 if (bp == immed_bp) { 14464 break; /* Just fail the command */ 14465 } 14466 #endif 14467 14468 /* Add the buf back to the head of the waitq */ 14469 bp->av_forw = un->un_waitq_headp; 14470 un->un_waitq_headp = bp; 14471 if (un->un_waitq_tailp == NULL) { 14472 un->un_waitq_tailp = bp; 14473 } 14474 goto exit; 14475 14476 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14477 /* 14478 * HBA DMA resource failure. Fail the command 14479 * and continue processing of the queues. 14480 */ 14481 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14482 "sd_start_cmds: " 14483 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14484 break; 14485 14486 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14487 /* 14488 * Note:x86: Partial DMA mapping not supported 14489 * for USCSI commands, and all the needed DMA 14490 * resources were not allocated. 14491 */ 14492 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14493 "sd_start_cmds: " 14494 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14495 break; 14496 14497 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14498 /* 14499 * Note:x86: Request cannot fit into CDB based 14500 * on lba and len. 14501 */ 14502 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14503 "sd_start_cmds: " 14504 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14505 break; 14506 14507 default: 14508 /* Should NEVER get here! */ 14509 panic("scsi_initpkt error"); 14510 /*NOTREACHED*/ 14511 } 14512 14513 /* 14514 * Fatal error in allocating a scsi_pkt for this buf. 14515 * Update kstats & return the buf with an error code. 14516 * We must use sd_return_failed_command_no_restart() to 14517 * avoid a recursive call back into sd_start_cmds(). 14518 * However this also means that we must keep processing 14519 * the waitq here in order to avoid stalling. 14520 */ 14521 if (statp == kstat_waitq_to_runq) { 14522 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14523 } 14524 sd_return_failed_command_no_restart(un, bp, EIO); 14525 if (bp == immed_bp) { 14526 /* immed_bp is gone by now, so clear this */ 14527 immed_bp = NULL; 14528 } 14529 continue; 14530 } 14531 got_pkt: 14532 if (bp == immed_bp) { 14533 /* goto the head of the class.... */ 14534 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14535 } 14536 14537 un->un_ncmds_in_transport++; 14538 SD_UPDATE_KSTATS(un, statp, bp); 14539 14540 /* 14541 * Call scsi_transport() to send the command to the target. 14542 * According to SCSA architecture, we must drop the mutex here 14543 * before calling scsi_transport() in order to avoid deadlock. 14544 * Note that the scsi_pkt's completion routine can be executed 14545 * (from interrupt context) even before the call to 14546 * scsi_transport() returns. 14547 */ 14548 SD_TRACE(SD_LOG_IO_CORE, un, 14549 "sd_start_cmds: calling scsi_transport()\n"); 14550 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14551 14552 mutex_exit(SD_MUTEX(un)); 14553 rval = scsi_transport(xp->xb_pktp); 14554 mutex_enter(SD_MUTEX(un)); 14555 14556 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14557 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14558 14559 switch (rval) { 14560 case TRAN_ACCEPT: 14561 /* Clear this with every pkt accepted by the HBA */ 14562 un->un_tran_fatal_count = 0; 14563 break; /* Success; try the next cmd (if any) */ 14564 14565 case TRAN_BUSY: 14566 un->un_ncmds_in_transport--; 14567 ASSERT(un->un_ncmds_in_transport >= 0); 14568 14569 /* 14570 * Don't retry request sense, the sense data 14571 * is lost when another request is sent. 14572 * Free up the rqs buf and retry 14573 * the original failed cmd. Update kstat. 14574 */ 14575 if (bp == un->un_rqs_bp) { 14576 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14577 bp = sd_mark_rqs_idle(un, xp); 14578 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14579 NULL, NULL, EIO, un->un_busy_timeout / 500, 14580 kstat_waitq_enter); 14581 goto exit; 14582 } 14583 14584 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14585 /* 14586 * Free the DMA resources for the scsi_pkt. This will 14587 * allow mpxio to select another path the next time 14588 * we call scsi_transport() with this scsi_pkt. 14589 * See sdintr() for the rationalization behind this. 14590 */ 14591 if ((un->un_f_is_fibre == TRUE) && 14592 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14593 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14594 scsi_dmafree(xp->xb_pktp); 14595 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14596 } 14597 #endif 14598 14599 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14600 /* 14601 * Commands that are SD_PATH_DIRECT_PRIORITY 14602 * are for error recovery situations. These do 14603 * not use the normal command waitq, so if they 14604 * get a TRAN_BUSY we cannot put them back onto 14605 * the waitq for later retry. One possible 14606 * problem is that there could already be some 14607 * other command on un_retry_bp that is waiting 14608 * for this one to complete, so we would be 14609 * deadlocked if we put this command back onto 14610 * the waitq for later retry (since un_retry_bp 14611 * must complete before the driver gets back to 14612 * commands on the waitq). 14613 * 14614 * To avoid deadlock we must schedule a callback 14615 * that will restart this command after a set 14616 * interval. This should keep retrying for as 14617 * long as the underlying transport keeps 14618 * returning TRAN_BUSY (just like for other 14619 * commands). Use the same timeout interval as 14620 * for the ordinary TRAN_BUSY retry. 14621 */ 14622 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14623 "sd_start_cmds: scsi_transport() returned " 14624 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14625 14626 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14627 un->un_direct_priority_timeid = 14628 timeout(sd_start_direct_priority_command, 14629 bp, un->un_busy_timeout / 500); 14630 14631 goto exit; 14632 } 14633 14634 /* 14635 * For TRAN_BUSY, we want to reduce the throttle value, 14636 * unless we are retrying a command. 14637 */ 14638 if (bp != un->un_retry_bp) { 14639 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14640 } 14641 14642 /* 14643 * Set up the bp to be tried again 10 ms later. 14644 * Note:x86: Is there a timeout value in the sd_lun 14645 * for this condition? 14646 */ 14647 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14648 kstat_runq_back_to_waitq); 14649 goto exit; 14650 14651 case TRAN_FATAL_ERROR: 14652 un->un_tran_fatal_count++; 14653 /* FALLTHRU */ 14654 14655 case TRAN_BADPKT: 14656 default: 14657 un->un_ncmds_in_transport--; 14658 ASSERT(un->un_ncmds_in_transport >= 0); 14659 14660 /* 14661 * If this is our REQUEST SENSE command with a 14662 * transport error, we must get back the pointers 14663 * to the original buf, and mark the REQUEST 14664 * SENSE command as "available". 14665 */ 14666 if (bp == un->un_rqs_bp) { 14667 bp = sd_mark_rqs_idle(un, xp); 14668 xp = SD_GET_XBUF(bp); 14669 } else { 14670 /* 14671 * Legacy behavior: do not update transport 14672 * error count for request sense commands. 14673 */ 14674 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14675 } 14676 14677 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14678 sd_print_transport_rejected_message(un, xp, rval); 14679 14680 /* 14681 * This command will be terminated by SD driver due 14682 * to a fatal transport error. We should post 14683 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14684 * of "fail" for any command to indicate this 14685 * situation. 14686 */ 14687 if (xp->xb_ena > 0) { 14688 ASSERT(un->un_fm_private != NULL); 14689 sfip = un->un_fm_private; 14690 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14691 sd_ssc_extract_info(&sfip->fm_ssc, un, 14692 xp->xb_pktp, bp, xp); 14693 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14694 } 14695 14696 /* 14697 * We must use sd_return_failed_command_no_restart() to 14698 * avoid a recursive call back into sd_start_cmds(). 14699 * However this also means that we must keep processing 14700 * the waitq here in order to avoid stalling. 14701 */ 14702 sd_return_failed_command_no_restart(un, bp, EIO); 14703 14704 /* 14705 * Notify any threads waiting in sd_ddi_suspend() that 14706 * a command completion has occurred. 14707 */ 14708 if (un->un_state == SD_STATE_SUSPENDED) { 14709 cv_broadcast(&un->un_disk_busy_cv); 14710 } 14711 14712 if (bp == immed_bp) { 14713 /* immed_bp is gone by now, so clear this */ 14714 immed_bp = NULL; 14715 } 14716 break; 14717 } 14718 14719 } while (immed_bp == NULL); 14720 14721 exit: 14722 ASSERT(mutex_owned(SD_MUTEX(un))); 14723 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14724 } 14725 14726 14727 /* 14728 * Function: sd_return_command 14729 * 14730 * Description: Returns a command to its originator (with or without an 14731 * error). Also starts commands waiting to be transported 14732 * to the target. 14733 * 14734 * Context: May be called from interrupt, kernel, or timeout context 14735 */ 14736 14737 static void 14738 sd_return_command(struct sd_lun *un, struct buf *bp) 14739 { 14740 struct sd_xbuf *xp; 14741 struct scsi_pkt *pktp; 14742 struct sd_fm_internal *sfip; 14743 14744 ASSERT(bp != NULL); 14745 ASSERT(un != NULL); 14746 ASSERT(mutex_owned(SD_MUTEX(un))); 14747 ASSERT(bp != un->un_rqs_bp); 14748 xp = SD_GET_XBUF(bp); 14749 ASSERT(xp != NULL); 14750 14751 pktp = SD_GET_PKTP(bp); 14752 sfip = (struct sd_fm_internal *)un->un_fm_private; 14753 ASSERT(sfip != NULL); 14754 14755 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14756 14757 /* 14758 * Note: check for the "sdrestart failed" case. 14759 */ 14760 if ((un->un_partial_dma_supported == 1) && 14761 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14762 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14763 (xp->xb_pktp->pkt_resid == 0)) { 14764 14765 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14766 /* 14767 * Successfully set up next portion of cmd 14768 * transfer, try sending it 14769 */ 14770 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14771 NULL, NULL, 0, (clock_t)0, NULL); 14772 sd_start_cmds(un, NULL); 14773 return; /* Note:x86: need a return here? */ 14774 } 14775 } 14776 14777 /* 14778 * If this is the failfast bp, clear it from un_failfast_bp. This 14779 * can happen if upon being re-tried the failfast bp either 14780 * succeeded or encountered another error (possibly even a different 14781 * error than the one that precipitated the failfast state, but in 14782 * that case it would have had to exhaust retries as well). Regardless, 14783 * this should not occur whenever the instance is in the active 14784 * failfast state. 14785 */ 14786 if (bp == un->un_failfast_bp) { 14787 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14788 un->un_failfast_bp = NULL; 14789 } 14790 14791 /* 14792 * Clear the failfast state upon successful completion of ANY cmd. 14793 */ 14794 if (bp->b_error == 0) { 14795 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14796 /* 14797 * If this is a successful command, but used to be retried, 14798 * we will take it as a recovered command and post an 14799 * ereport with driver-assessment of "recovered". 14800 */ 14801 if (xp->xb_ena > 0) { 14802 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14803 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14804 } 14805 } else { 14806 /* 14807 * If this is a failed non-USCSI command we will post an 14808 * ereport with driver-assessment set accordingly("fail" or 14809 * "fatal"). 14810 */ 14811 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 14812 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14813 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14814 } 14815 } 14816 14817 /* 14818 * This is used if the command was retried one or more times. Show that 14819 * we are done with it, and allow processing of the waitq to resume. 14820 */ 14821 if (bp == un->un_retry_bp) { 14822 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14823 "sd_return_command: un:0x%p: " 14824 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14825 un->un_retry_bp = NULL; 14826 un->un_retry_statp = NULL; 14827 } 14828 14829 SD_UPDATE_RDWR_STATS(un, bp); 14830 SD_UPDATE_PARTITION_STATS(un, bp); 14831 14832 switch (un->un_state) { 14833 case SD_STATE_SUSPENDED: 14834 /* 14835 * Notify any threads waiting in sd_ddi_suspend() that 14836 * a command completion has occurred. 14837 */ 14838 cv_broadcast(&un->un_disk_busy_cv); 14839 break; 14840 default: 14841 sd_start_cmds(un, NULL); 14842 break; 14843 } 14844 14845 /* Return this command up the iodone chain to its originator. */ 14846 mutex_exit(SD_MUTEX(un)); 14847 14848 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14849 xp->xb_pktp = NULL; 14850 14851 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14852 14853 ASSERT(!mutex_owned(SD_MUTEX(un))); 14854 mutex_enter(SD_MUTEX(un)); 14855 14856 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14857 } 14858 14859 14860 /* 14861 * Function: sd_return_failed_command 14862 * 14863 * Description: Command completion when an error occurred. 14864 * 14865 * Context: May be called from interrupt context 14866 */ 14867 14868 static void 14869 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14870 { 14871 ASSERT(bp != NULL); 14872 ASSERT(un != NULL); 14873 ASSERT(mutex_owned(SD_MUTEX(un))); 14874 14875 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14876 "sd_return_failed_command: entry\n"); 14877 14878 /* 14879 * b_resid could already be nonzero due to a partial data 14880 * transfer, so do not change it here. 14881 */ 14882 SD_BIOERROR(bp, errcode); 14883 14884 sd_return_command(un, bp); 14885 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14886 "sd_return_failed_command: exit\n"); 14887 } 14888 14889 14890 /* 14891 * Function: sd_return_failed_command_no_restart 14892 * 14893 * Description: Same as sd_return_failed_command, but ensures that no 14894 * call back into sd_start_cmds will be issued. 14895 * 14896 * Context: May be called from interrupt context 14897 */ 14898 14899 static void 14900 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14901 int errcode) 14902 { 14903 struct sd_xbuf *xp; 14904 14905 ASSERT(bp != NULL); 14906 ASSERT(un != NULL); 14907 ASSERT(mutex_owned(SD_MUTEX(un))); 14908 xp = SD_GET_XBUF(bp); 14909 ASSERT(xp != NULL); 14910 ASSERT(errcode != 0); 14911 14912 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14913 "sd_return_failed_command_no_restart: entry\n"); 14914 14915 /* 14916 * b_resid could already be nonzero due to a partial data 14917 * transfer, so do not change it here. 14918 */ 14919 SD_BIOERROR(bp, errcode); 14920 14921 /* 14922 * If this is the failfast bp, clear it. This can happen if the 14923 * failfast bp encounterd a fatal error when we attempted to 14924 * re-try it (such as a scsi_transport(9F) failure). However 14925 * we should NOT be in an active failfast state if the failfast 14926 * bp is not NULL. 14927 */ 14928 if (bp == un->un_failfast_bp) { 14929 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14930 un->un_failfast_bp = NULL; 14931 } 14932 14933 if (bp == un->un_retry_bp) { 14934 /* 14935 * This command was retried one or more times. Show that we are 14936 * done with it, and allow processing of the waitq to resume. 14937 */ 14938 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14939 "sd_return_failed_command_no_restart: " 14940 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14941 un->un_retry_bp = NULL; 14942 un->un_retry_statp = NULL; 14943 } 14944 14945 SD_UPDATE_RDWR_STATS(un, bp); 14946 SD_UPDATE_PARTITION_STATS(un, bp); 14947 14948 mutex_exit(SD_MUTEX(un)); 14949 14950 if (xp->xb_pktp != NULL) { 14951 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14952 xp->xb_pktp = NULL; 14953 } 14954 14955 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14956 14957 mutex_enter(SD_MUTEX(un)); 14958 14959 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14960 "sd_return_failed_command_no_restart: exit\n"); 14961 } 14962 14963 14964 /* 14965 * Function: sd_retry_command 14966 * 14967 * Description: queue up a command for retry, or (optionally) fail it 14968 * if retry counts are exhausted. 14969 * 14970 * Arguments: un - Pointer to the sd_lun struct for the target. 14971 * 14972 * bp - Pointer to the buf for the command to be retried. 14973 * 14974 * retry_check_flag - Flag to see which (if any) of the retry 14975 * counts should be decremented/checked. If the indicated 14976 * retry count is exhausted, then the command will not be 14977 * retried; it will be failed instead. This should use a 14978 * value equal to one of the following: 14979 * 14980 * SD_RETRIES_NOCHECK 14981 * SD_RESD_RETRIES_STANDARD 14982 * SD_RETRIES_VICTIM 14983 * 14984 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14985 * if the check should be made to see of FLAG_ISOLATE is set 14986 * in the pkt. If FLAG_ISOLATE is set, then the command is 14987 * not retried, it is simply failed. 14988 * 14989 * user_funcp - Ptr to function to call before dispatching the 14990 * command. May be NULL if no action needs to be performed. 14991 * (Primarily intended for printing messages.) 14992 * 14993 * user_arg - Optional argument to be passed along to 14994 * the user_funcp call. 14995 * 14996 * failure_code - errno return code to set in the bp if the 14997 * command is going to be failed. 14998 * 14999 * retry_delay - Retry delay interval in (clock_t) units. May 15000 * be zero which indicates that the retry should be retried 15001 * immediately (ie, without an intervening delay). 15002 * 15003 * statp - Ptr to kstat function to be updated if the command 15004 * is queued for a delayed retry. May be NULL if no kstat 15005 * update is desired. 15006 * 15007 * Context: May be called from interrupt context. 15008 */ 15009 15010 static void 15011 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15012 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15013 code), void *user_arg, int failure_code, clock_t retry_delay, 15014 void (*statp)(kstat_io_t *)) 15015 { 15016 struct sd_xbuf *xp; 15017 struct scsi_pkt *pktp; 15018 struct sd_fm_internal *sfip; 15019 15020 ASSERT(un != NULL); 15021 ASSERT(mutex_owned(SD_MUTEX(un))); 15022 ASSERT(bp != NULL); 15023 xp = SD_GET_XBUF(bp); 15024 ASSERT(xp != NULL); 15025 pktp = SD_GET_PKTP(bp); 15026 ASSERT(pktp != NULL); 15027 15028 sfip = (struct sd_fm_internal *)un->un_fm_private; 15029 ASSERT(sfip != NULL); 15030 15031 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15032 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15033 15034 /* 15035 * If we are syncing or dumping, fail the command to avoid 15036 * recursively calling back into scsi_transport(). 15037 */ 15038 if (ddi_in_panic()) { 15039 goto fail_command_no_log; 15040 } 15041 15042 /* 15043 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15044 * log an error and fail the command. 15045 */ 15046 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15047 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15048 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15049 sd_dump_memory(un, SD_LOG_IO, "CDB", 15050 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15051 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15052 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15053 goto fail_command; 15054 } 15055 15056 /* 15057 * If we are suspended, then put the command onto head of the 15058 * wait queue since we don't want to start more commands, and 15059 * clear the un_retry_bp. Next time when we are resumed, will 15060 * handle the command in the wait queue. 15061 */ 15062 switch (un->un_state) { 15063 case SD_STATE_SUSPENDED: 15064 case SD_STATE_DUMPING: 15065 bp->av_forw = un->un_waitq_headp; 15066 un->un_waitq_headp = bp; 15067 if (un->un_waitq_tailp == NULL) { 15068 un->un_waitq_tailp = bp; 15069 } 15070 if (bp == un->un_retry_bp) { 15071 un->un_retry_bp = NULL; 15072 un->un_retry_statp = NULL; 15073 } 15074 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15075 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15076 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15077 return; 15078 default: 15079 break; 15080 } 15081 15082 /* 15083 * If the caller wants us to check FLAG_ISOLATE, then see if that 15084 * is set; if it is then we do not want to retry the command. 15085 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15086 */ 15087 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15088 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15089 goto fail_command; 15090 } 15091 } 15092 15093 15094 /* 15095 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15096 * command timeout or a selection timeout has occurred. This means 15097 * that we were unable to establish an kind of communication with 15098 * the target, and subsequent retries and/or commands are likely 15099 * to encounter similar results and take a long time to complete. 15100 * 15101 * If this is a failfast error condition, we need to update the 15102 * failfast state, even if this bp does not have B_FAILFAST set. 15103 */ 15104 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15105 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15106 ASSERT(un->un_failfast_bp == NULL); 15107 /* 15108 * If we are already in the active failfast state, and 15109 * another failfast error condition has been detected, 15110 * then fail this command if it has B_FAILFAST set. 15111 * If B_FAILFAST is clear, then maintain the legacy 15112 * behavior of retrying heroically, even tho this will 15113 * take a lot more time to fail the command. 15114 */ 15115 if (bp->b_flags & B_FAILFAST) { 15116 goto fail_command; 15117 } 15118 } else { 15119 /* 15120 * We're not in the active failfast state, but we 15121 * have a failfast error condition, so we must begin 15122 * transition to the next state. We do this regardless 15123 * of whether or not this bp has B_FAILFAST set. 15124 */ 15125 if (un->un_failfast_bp == NULL) { 15126 /* 15127 * This is the first bp to meet a failfast 15128 * condition so save it on un_failfast_bp & 15129 * do normal retry processing. Do not enter 15130 * active failfast state yet. This marks 15131 * entry into the "failfast pending" state. 15132 */ 15133 un->un_failfast_bp = bp; 15134 15135 } else if (un->un_failfast_bp == bp) { 15136 /* 15137 * This is the second time *this* bp has 15138 * encountered a failfast error condition, 15139 * so enter active failfast state & flush 15140 * queues as appropriate. 15141 */ 15142 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15143 un->un_failfast_bp = NULL; 15144 sd_failfast_flushq(un); 15145 15146 /* 15147 * Fail this bp now if B_FAILFAST set; 15148 * otherwise continue with retries. (It would 15149 * be pretty ironic if this bp succeeded on a 15150 * subsequent retry after we just flushed all 15151 * the queues). 15152 */ 15153 if (bp->b_flags & B_FAILFAST) { 15154 goto fail_command; 15155 } 15156 15157 #if !defined(lint) && !defined(__lint) 15158 } else { 15159 /* 15160 * If neither of the preceeding conditionals 15161 * was true, it means that there is some 15162 * *other* bp that has met an inital failfast 15163 * condition and is currently either being 15164 * retried or is waiting to be retried. In 15165 * that case we should perform normal retry 15166 * processing on *this* bp, since there is a 15167 * chance that the current failfast condition 15168 * is transient and recoverable. If that does 15169 * not turn out to be the case, then retries 15170 * will be cleared when the wait queue is 15171 * flushed anyway. 15172 */ 15173 #endif 15174 } 15175 } 15176 } else { 15177 /* 15178 * SD_RETRIES_FAILFAST is clear, which indicates that we 15179 * likely were able to at least establish some level of 15180 * communication with the target and subsequent commands 15181 * and/or retries are likely to get through to the target, 15182 * In this case we want to be aggressive about clearing 15183 * the failfast state. Note that this does not affect 15184 * the "failfast pending" condition. 15185 */ 15186 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15187 } 15188 15189 15190 /* 15191 * Check the specified retry count to see if we can still do 15192 * any retries with this pkt before we should fail it. 15193 */ 15194 switch (retry_check_flag & SD_RETRIES_MASK) { 15195 case SD_RETRIES_VICTIM: 15196 /* 15197 * Check the victim retry count. If exhausted, then fall 15198 * thru & check against the standard retry count. 15199 */ 15200 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15201 /* Increment count & proceed with the retry */ 15202 xp->xb_victim_retry_count++; 15203 break; 15204 } 15205 /* Victim retries exhausted, fall back to std. retries... */ 15206 /* FALLTHRU */ 15207 15208 case SD_RETRIES_STANDARD: 15209 if (xp->xb_retry_count >= un->un_retry_count) { 15210 /* Retries exhausted, fail the command */ 15211 SD_TRACE(SD_LOG_IO_CORE, un, 15212 "sd_retry_command: retries exhausted!\n"); 15213 /* 15214 * update b_resid for failed SCMD_READ & SCMD_WRITE 15215 * commands with nonzero pkt_resid. 15216 */ 15217 if ((pktp->pkt_reason == CMD_CMPLT) && 15218 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15219 (pktp->pkt_resid != 0)) { 15220 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15221 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15222 SD_UPDATE_B_RESID(bp, pktp); 15223 } 15224 } 15225 goto fail_command; 15226 } 15227 xp->xb_retry_count++; 15228 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15229 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15230 break; 15231 15232 case SD_RETRIES_UA: 15233 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15234 /* Retries exhausted, fail the command */ 15235 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15236 "Unit Attention retries exhausted. " 15237 "Check the target.\n"); 15238 goto fail_command; 15239 } 15240 xp->xb_ua_retry_count++; 15241 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15242 "sd_retry_command: retry count:%d\n", 15243 xp->xb_ua_retry_count); 15244 break; 15245 15246 case SD_RETRIES_BUSY: 15247 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15248 /* Retries exhausted, fail the command */ 15249 SD_TRACE(SD_LOG_IO_CORE, un, 15250 "sd_retry_command: retries exhausted!\n"); 15251 goto fail_command; 15252 } 15253 xp->xb_retry_count++; 15254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15255 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15256 break; 15257 15258 case SD_RETRIES_NOCHECK: 15259 default: 15260 /* No retry count to check. Just proceed with the retry */ 15261 break; 15262 } 15263 15264 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15265 15266 /* 15267 * If this is a non-USCSI command being retried 15268 * during execution last time, we should post an ereport with 15269 * driver-assessment of the value "retry". 15270 * For partial DMA, request sense and STATUS_QFULL, there are no 15271 * hardware errors, we bypass ereport posting. 15272 */ 15273 if (failure_code != 0) { 15274 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15275 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15276 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15277 } 15278 } 15279 15280 /* 15281 * If we were given a zero timeout, we must attempt to retry the 15282 * command immediately (ie, without a delay). 15283 */ 15284 if (retry_delay == 0) { 15285 /* 15286 * Check some limiting conditions to see if we can actually 15287 * do the immediate retry. If we cannot, then we must 15288 * fall back to queueing up a delayed retry. 15289 */ 15290 if (un->un_ncmds_in_transport >= un->un_throttle) { 15291 /* 15292 * We are at the throttle limit for the target, 15293 * fall back to delayed retry. 15294 */ 15295 retry_delay = un->un_busy_timeout; 15296 statp = kstat_waitq_enter; 15297 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15298 "sd_retry_command: immed. retry hit " 15299 "throttle!\n"); 15300 } else { 15301 /* 15302 * We're clear to proceed with the immediate retry. 15303 * First call the user-provided function (if any) 15304 */ 15305 if (user_funcp != NULL) { 15306 (*user_funcp)(un, bp, user_arg, 15307 SD_IMMEDIATE_RETRY_ISSUED); 15308 #ifdef __lock_lint 15309 sd_print_incomplete_msg(un, bp, user_arg, 15310 SD_IMMEDIATE_RETRY_ISSUED); 15311 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15312 SD_IMMEDIATE_RETRY_ISSUED); 15313 sd_print_sense_failed_msg(un, bp, user_arg, 15314 SD_IMMEDIATE_RETRY_ISSUED); 15315 #endif 15316 } 15317 15318 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15319 "sd_retry_command: issuing immediate retry\n"); 15320 15321 /* 15322 * Call sd_start_cmds() to transport the command to 15323 * the target. 15324 */ 15325 sd_start_cmds(un, bp); 15326 15327 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15328 "sd_retry_command exit\n"); 15329 return; 15330 } 15331 } 15332 15333 /* 15334 * Set up to retry the command after a delay. 15335 * First call the user-provided function (if any) 15336 */ 15337 if (user_funcp != NULL) { 15338 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15339 } 15340 15341 sd_set_retry_bp(un, bp, retry_delay, statp); 15342 15343 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15344 return; 15345 15346 fail_command: 15347 15348 if (user_funcp != NULL) { 15349 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15350 } 15351 15352 fail_command_no_log: 15353 15354 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15355 "sd_retry_command: returning failed command\n"); 15356 15357 sd_return_failed_command(un, bp, failure_code); 15358 15359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15360 } 15361 15362 15363 /* 15364 * Function: sd_set_retry_bp 15365 * 15366 * Description: Set up the given bp for retry. 15367 * 15368 * Arguments: un - ptr to associated softstate 15369 * bp - ptr to buf(9S) for the command 15370 * retry_delay - time interval before issuing retry (may be 0) 15371 * statp - optional pointer to kstat function 15372 * 15373 * Context: May be called under interrupt context 15374 */ 15375 15376 static void 15377 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15378 void (*statp)(kstat_io_t *)) 15379 { 15380 ASSERT(un != NULL); 15381 ASSERT(mutex_owned(SD_MUTEX(un))); 15382 ASSERT(bp != NULL); 15383 15384 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15385 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15386 15387 /* 15388 * Indicate that the command is being retried. This will not allow any 15389 * other commands on the wait queue to be transported to the target 15390 * until this command has been completed (success or failure). The 15391 * "retry command" is not transported to the target until the given 15392 * time delay expires, unless the user specified a 0 retry_delay. 15393 * 15394 * Note: the timeout(9F) callback routine is what actually calls 15395 * sd_start_cmds() to transport the command, with the exception of a 15396 * zero retry_delay. The only current implementor of a zero retry delay 15397 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15398 */ 15399 if (un->un_retry_bp == NULL) { 15400 ASSERT(un->un_retry_statp == NULL); 15401 un->un_retry_bp = bp; 15402 15403 /* 15404 * If the user has not specified a delay the command should 15405 * be queued and no timeout should be scheduled. 15406 */ 15407 if (retry_delay == 0) { 15408 /* 15409 * Save the kstat pointer that will be used in the 15410 * call to SD_UPDATE_KSTATS() below, so that 15411 * sd_start_cmds() can correctly decrement the waitq 15412 * count when it is time to transport this command. 15413 */ 15414 un->un_retry_statp = statp; 15415 goto done; 15416 } 15417 } 15418 15419 if (un->un_retry_bp == bp) { 15420 /* 15421 * Save the kstat pointer that will be used in the call to 15422 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15423 * correctly decrement the waitq count when it is time to 15424 * transport this command. 15425 */ 15426 un->un_retry_statp = statp; 15427 15428 /* 15429 * Schedule a timeout if: 15430 * 1) The user has specified a delay. 15431 * 2) There is not a START_STOP_UNIT callback pending. 15432 * 15433 * If no delay has been specified, then it is up to the caller 15434 * to ensure that IO processing continues without stalling. 15435 * Effectively, this means that the caller will issue the 15436 * required call to sd_start_cmds(). The START_STOP_UNIT 15437 * callback does this after the START STOP UNIT command has 15438 * completed. In either of these cases we should not schedule 15439 * a timeout callback here. Also don't schedule the timeout if 15440 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15441 */ 15442 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15443 (un->un_direct_priority_timeid == NULL)) { 15444 un->un_retry_timeid = 15445 timeout(sd_start_retry_command, un, retry_delay); 15446 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15447 "sd_set_retry_bp: setting timeout: un: 0x%p" 15448 " bp:0x%p un_retry_timeid:0x%p\n", 15449 un, bp, un->un_retry_timeid); 15450 } 15451 } else { 15452 /* 15453 * We only get in here if there is already another command 15454 * waiting to be retried. In this case, we just put the 15455 * given command onto the wait queue, so it can be transported 15456 * after the current retry command has completed. 15457 * 15458 * Also we have to make sure that if the command at the head 15459 * of the wait queue is the un_failfast_bp, that we do not 15460 * put ahead of it any other commands that are to be retried. 15461 */ 15462 if ((un->un_failfast_bp != NULL) && 15463 (un->un_failfast_bp == un->un_waitq_headp)) { 15464 /* 15465 * Enqueue this command AFTER the first command on 15466 * the wait queue (which is also un_failfast_bp). 15467 */ 15468 bp->av_forw = un->un_waitq_headp->av_forw; 15469 un->un_waitq_headp->av_forw = bp; 15470 if (un->un_waitq_headp == un->un_waitq_tailp) { 15471 un->un_waitq_tailp = bp; 15472 } 15473 } else { 15474 /* Enqueue this command at the head of the waitq. */ 15475 bp->av_forw = un->un_waitq_headp; 15476 un->un_waitq_headp = bp; 15477 if (un->un_waitq_tailp == NULL) { 15478 un->un_waitq_tailp = bp; 15479 } 15480 } 15481 15482 if (statp == NULL) { 15483 statp = kstat_waitq_enter; 15484 } 15485 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15486 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15487 } 15488 15489 done: 15490 if (statp != NULL) { 15491 SD_UPDATE_KSTATS(un, statp, bp); 15492 } 15493 15494 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15495 "sd_set_retry_bp: exit un:0x%p\n", un); 15496 } 15497 15498 15499 /* 15500 * Function: sd_start_retry_command 15501 * 15502 * Description: Start the command that has been waiting on the target's 15503 * retry queue. Called from timeout(9F) context after the 15504 * retry delay interval has expired. 15505 * 15506 * Arguments: arg - pointer to associated softstate for the device. 15507 * 15508 * Context: timeout(9F) thread context. May not sleep. 15509 */ 15510 15511 static void 15512 sd_start_retry_command(void *arg) 15513 { 15514 struct sd_lun *un = arg; 15515 15516 ASSERT(un != NULL); 15517 ASSERT(!mutex_owned(SD_MUTEX(un))); 15518 15519 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15520 "sd_start_retry_command: entry\n"); 15521 15522 mutex_enter(SD_MUTEX(un)); 15523 15524 un->un_retry_timeid = NULL; 15525 15526 if (un->un_retry_bp != NULL) { 15527 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15528 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15529 un, un->un_retry_bp); 15530 sd_start_cmds(un, un->un_retry_bp); 15531 } 15532 15533 mutex_exit(SD_MUTEX(un)); 15534 15535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15536 "sd_start_retry_command: exit\n"); 15537 } 15538 15539 15540 /* 15541 * Function: sd_start_direct_priority_command 15542 * 15543 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15544 * received TRAN_BUSY when we called scsi_transport() to send it 15545 * to the underlying HBA. This function is called from timeout(9F) 15546 * context after the delay interval has expired. 15547 * 15548 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15549 * 15550 * Context: timeout(9F) thread context. May not sleep. 15551 */ 15552 15553 static void 15554 sd_start_direct_priority_command(void *arg) 15555 { 15556 struct buf *priority_bp = arg; 15557 struct sd_lun *un; 15558 15559 ASSERT(priority_bp != NULL); 15560 un = SD_GET_UN(priority_bp); 15561 ASSERT(un != NULL); 15562 ASSERT(!mutex_owned(SD_MUTEX(un))); 15563 15564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15565 "sd_start_direct_priority_command: entry\n"); 15566 15567 mutex_enter(SD_MUTEX(un)); 15568 un->un_direct_priority_timeid = NULL; 15569 sd_start_cmds(un, priority_bp); 15570 mutex_exit(SD_MUTEX(un)); 15571 15572 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15573 "sd_start_direct_priority_command: exit\n"); 15574 } 15575 15576 15577 /* 15578 * Function: sd_send_request_sense_command 15579 * 15580 * Description: Sends a REQUEST SENSE command to the target 15581 * 15582 * Context: May be called from interrupt context. 15583 */ 15584 15585 static void 15586 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15587 struct scsi_pkt *pktp) 15588 { 15589 ASSERT(bp != NULL); 15590 ASSERT(un != NULL); 15591 ASSERT(mutex_owned(SD_MUTEX(un))); 15592 15593 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15594 "entry: buf:0x%p\n", bp); 15595 15596 /* 15597 * If we are syncing or dumping, then fail the command to avoid a 15598 * recursive callback into scsi_transport(). Also fail the command 15599 * if we are suspended (legacy behavior). 15600 */ 15601 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15602 (un->un_state == SD_STATE_DUMPING)) { 15603 sd_return_failed_command(un, bp, EIO); 15604 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15605 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15606 return; 15607 } 15608 15609 /* 15610 * Retry the failed command and don't issue the request sense if: 15611 * 1) the sense buf is busy 15612 * 2) we have 1 or more outstanding commands on the target 15613 * (the sense data will be cleared or invalidated any way) 15614 * 15615 * Note: There could be an issue with not checking a retry limit here, 15616 * the problem is determining which retry limit to check. 15617 */ 15618 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15619 /* Don't retry if the command is flagged as non-retryable */ 15620 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15621 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15622 NULL, NULL, 0, un->un_busy_timeout, 15623 kstat_waitq_enter); 15624 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15625 "sd_send_request_sense_command: " 15626 "at full throttle, retrying exit\n"); 15627 } else { 15628 sd_return_failed_command(un, bp, EIO); 15629 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15630 "sd_send_request_sense_command: " 15631 "at full throttle, non-retryable exit\n"); 15632 } 15633 return; 15634 } 15635 15636 sd_mark_rqs_busy(un, bp); 15637 sd_start_cmds(un, un->un_rqs_bp); 15638 15639 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15640 "sd_send_request_sense_command: exit\n"); 15641 } 15642 15643 15644 /* 15645 * Function: sd_mark_rqs_busy 15646 * 15647 * Description: Indicate that the request sense bp for this instance is 15648 * in use. 15649 * 15650 * Context: May be called under interrupt context 15651 */ 15652 15653 static void 15654 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15655 { 15656 struct sd_xbuf *sense_xp; 15657 15658 ASSERT(un != NULL); 15659 ASSERT(bp != NULL); 15660 ASSERT(mutex_owned(SD_MUTEX(un))); 15661 ASSERT(un->un_sense_isbusy == 0); 15662 15663 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15664 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15665 15666 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15667 ASSERT(sense_xp != NULL); 15668 15669 SD_INFO(SD_LOG_IO, un, 15670 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15671 15672 ASSERT(sense_xp->xb_pktp != NULL); 15673 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15674 == (FLAG_SENSING | FLAG_HEAD)); 15675 15676 un->un_sense_isbusy = 1; 15677 un->un_rqs_bp->b_resid = 0; 15678 sense_xp->xb_pktp->pkt_resid = 0; 15679 sense_xp->xb_pktp->pkt_reason = 0; 15680 15681 /* So we can get back the bp at interrupt time! */ 15682 sense_xp->xb_sense_bp = bp; 15683 15684 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15685 15686 /* 15687 * Mark this buf as awaiting sense data. (This is already set in 15688 * the pkt_flags for the RQS packet.) 15689 */ 15690 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15691 15692 /* Request sense down same path */ 15693 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15694 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15695 sense_xp->xb_pktp->pkt_path_instance = 15696 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15697 15698 sense_xp->xb_retry_count = 0; 15699 sense_xp->xb_victim_retry_count = 0; 15700 sense_xp->xb_ua_retry_count = 0; 15701 sense_xp->xb_nr_retry_count = 0; 15702 sense_xp->xb_dma_resid = 0; 15703 15704 /* Clean up the fields for auto-request sense */ 15705 sense_xp->xb_sense_status = 0; 15706 sense_xp->xb_sense_state = 0; 15707 sense_xp->xb_sense_resid = 0; 15708 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15709 15710 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15711 } 15712 15713 15714 /* 15715 * Function: sd_mark_rqs_idle 15716 * 15717 * Description: SD_MUTEX must be held continuously through this routine 15718 * to prevent reuse of the rqs struct before the caller can 15719 * complete it's processing. 15720 * 15721 * Return Code: Pointer to the RQS buf 15722 * 15723 * Context: May be called under interrupt context 15724 */ 15725 15726 static struct buf * 15727 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15728 { 15729 struct buf *bp; 15730 ASSERT(un != NULL); 15731 ASSERT(sense_xp != NULL); 15732 ASSERT(mutex_owned(SD_MUTEX(un))); 15733 ASSERT(un->un_sense_isbusy != 0); 15734 15735 un->un_sense_isbusy = 0; 15736 bp = sense_xp->xb_sense_bp; 15737 sense_xp->xb_sense_bp = NULL; 15738 15739 /* This pkt is no longer interested in getting sense data */ 15740 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15741 15742 return (bp); 15743 } 15744 15745 15746 15747 /* 15748 * Function: sd_alloc_rqs 15749 * 15750 * Description: Set up the unit to receive auto request sense data 15751 * 15752 * Return Code: DDI_SUCCESS or DDI_FAILURE 15753 * 15754 * Context: Called under attach(9E) context 15755 */ 15756 15757 static int 15758 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15759 { 15760 struct sd_xbuf *xp; 15761 15762 ASSERT(un != NULL); 15763 ASSERT(!mutex_owned(SD_MUTEX(un))); 15764 ASSERT(un->un_rqs_bp == NULL); 15765 ASSERT(un->un_rqs_pktp == NULL); 15766 15767 /* 15768 * First allocate the required buf and scsi_pkt structs, then set up 15769 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15770 */ 15771 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15772 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15773 if (un->un_rqs_bp == NULL) { 15774 return (DDI_FAILURE); 15775 } 15776 15777 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15778 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15779 15780 if (un->un_rqs_pktp == NULL) { 15781 sd_free_rqs(un); 15782 return (DDI_FAILURE); 15783 } 15784 15785 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15786 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15787 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15788 15789 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15790 15791 /* Set up the other needed members in the ARQ scsi_pkt. */ 15792 un->un_rqs_pktp->pkt_comp = sdintr; 15793 un->un_rqs_pktp->pkt_time = sd_io_time; 15794 un->un_rqs_pktp->pkt_flags |= 15795 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15796 15797 /* 15798 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15799 * provide any intpkt, destroypkt routines as we take care of 15800 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15801 */ 15802 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15803 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15804 xp->xb_pktp = un->un_rqs_pktp; 15805 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15806 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15807 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15808 15809 /* 15810 * Save the pointer to the request sense private bp so it can 15811 * be retrieved in sdintr. 15812 */ 15813 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15814 ASSERT(un->un_rqs_bp->b_private == xp); 15815 15816 /* 15817 * See if the HBA supports auto-request sense for the specified 15818 * target/lun. If it does, then try to enable it (if not already 15819 * enabled). 15820 * 15821 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15822 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15823 * return success. However, in both of these cases ARQ is always 15824 * enabled and scsi_ifgetcap will always return true. The best approach 15825 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15826 * 15827 * The 3rd case is the HBA (adp) always return enabled on 15828 * scsi_ifgetgetcap even when it's not enable, the best approach 15829 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15830 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15831 */ 15832 15833 if (un->un_f_is_fibre == TRUE) { 15834 un->un_f_arq_enabled = TRUE; 15835 } else { 15836 #if defined(__i386) || defined(__amd64) 15837 /* 15838 * Circumvent the Adaptec bug, remove this code when 15839 * the bug is fixed 15840 */ 15841 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15842 #endif 15843 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15844 case 0: 15845 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15846 "sd_alloc_rqs: HBA supports ARQ\n"); 15847 /* 15848 * ARQ is supported by this HBA but currently is not 15849 * enabled. Attempt to enable it and if successful then 15850 * mark this instance as ARQ enabled. 15851 */ 15852 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15853 == 1) { 15854 /* Successfully enabled ARQ in the HBA */ 15855 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15856 "sd_alloc_rqs: ARQ enabled\n"); 15857 un->un_f_arq_enabled = TRUE; 15858 } else { 15859 /* Could not enable ARQ in the HBA */ 15860 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15861 "sd_alloc_rqs: failed ARQ enable\n"); 15862 un->un_f_arq_enabled = FALSE; 15863 } 15864 break; 15865 case 1: 15866 /* 15867 * ARQ is supported by this HBA and is already enabled. 15868 * Just mark ARQ as enabled for this instance. 15869 */ 15870 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15871 "sd_alloc_rqs: ARQ already enabled\n"); 15872 un->un_f_arq_enabled = TRUE; 15873 break; 15874 default: 15875 /* 15876 * ARQ is not supported by this HBA; disable it for this 15877 * instance. 15878 */ 15879 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15880 "sd_alloc_rqs: HBA does not support ARQ\n"); 15881 un->un_f_arq_enabled = FALSE; 15882 break; 15883 } 15884 } 15885 15886 return (DDI_SUCCESS); 15887 } 15888 15889 15890 /* 15891 * Function: sd_free_rqs 15892 * 15893 * Description: Cleanup for the pre-instance RQS command. 15894 * 15895 * Context: Kernel thread context 15896 */ 15897 15898 static void 15899 sd_free_rqs(struct sd_lun *un) 15900 { 15901 ASSERT(un != NULL); 15902 15903 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15904 15905 /* 15906 * If consistent memory is bound to a scsi_pkt, the pkt 15907 * has to be destroyed *before* freeing the consistent memory. 15908 * Don't change the sequence of this operations. 15909 * scsi_destroy_pkt() might access memory, which isn't allowed, 15910 * after it was freed in scsi_free_consistent_buf(). 15911 */ 15912 if (un->un_rqs_pktp != NULL) { 15913 scsi_destroy_pkt(un->un_rqs_pktp); 15914 un->un_rqs_pktp = NULL; 15915 } 15916 15917 if (un->un_rqs_bp != NULL) { 15918 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15919 if (xp != NULL) { 15920 kmem_free(xp, sizeof (struct sd_xbuf)); 15921 } 15922 scsi_free_consistent_buf(un->un_rqs_bp); 15923 un->un_rqs_bp = NULL; 15924 } 15925 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15926 } 15927 15928 15929 15930 /* 15931 * Function: sd_reduce_throttle 15932 * 15933 * Description: Reduces the maximum # of outstanding commands on a 15934 * target to the current number of outstanding commands. 15935 * Queues a tiemout(9F) callback to restore the limit 15936 * after a specified interval has elapsed. 15937 * Typically used when we get a TRAN_BUSY return code 15938 * back from scsi_transport(). 15939 * 15940 * Arguments: un - ptr to the sd_lun softstate struct 15941 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15942 * 15943 * Context: May be called from interrupt context 15944 */ 15945 15946 static void 15947 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15948 { 15949 ASSERT(un != NULL); 15950 ASSERT(mutex_owned(SD_MUTEX(un))); 15951 ASSERT(un->un_ncmds_in_transport >= 0); 15952 15953 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15954 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15955 un, un->un_throttle, un->un_ncmds_in_transport); 15956 15957 if (un->un_throttle > 1) { 15958 if (un->un_f_use_adaptive_throttle == TRUE) { 15959 switch (throttle_type) { 15960 case SD_THROTTLE_TRAN_BUSY: 15961 if (un->un_busy_throttle == 0) { 15962 un->un_busy_throttle = un->un_throttle; 15963 } 15964 break; 15965 case SD_THROTTLE_QFULL: 15966 un->un_busy_throttle = 0; 15967 break; 15968 default: 15969 ASSERT(FALSE); 15970 } 15971 15972 if (un->un_ncmds_in_transport > 0) { 15973 un->un_throttle = un->un_ncmds_in_transport; 15974 } 15975 15976 } else { 15977 if (un->un_ncmds_in_transport == 0) { 15978 un->un_throttle = 1; 15979 } else { 15980 un->un_throttle = un->un_ncmds_in_transport; 15981 } 15982 } 15983 } 15984 15985 /* Reschedule the timeout if none is currently active */ 15986 if (un->un_reset_throttle_timeid == NULL) { 15987 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15988 un, SD_THROTTLE_RESET_INTERVAL); 15989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15990 "sd_reduce_throttle: timeout scheduled!\n"); 15991 } 15992 15993 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15994 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15995 } 15996 15997 15998 15999 /* 16000 * Function: sd_restore_throttle 16001 * 16002 * Description: Callback function for timeout(9F). Resets the current 16003 * value of un->un_throttle to its default. 16004 * 16005 * Arguments: arg - pointer to associated softstate for the device. 16006 * 16007 * Context: May be called from interrupt context 16008 */ 16009 16010 static void 16011 sd_restore_throttle(void *arg) 16012 { 16013 struct sd_lun *un = arg; 16014 16015 ASSERT(un != NULL); 16016 ASSERT(!mutex_owned(SD_MUTEX(un))); 16017 16018 mutex_enter(SD_MUTEX(un)); 16019 16020 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16021 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16022 16023 un->un_reset_throttle_timeid = NULL; 16024 16025 if (un->un_f_use_adaptive_throttle == TRUE) { 16026 /* 16027 * If un_busy_throttle is nonzero, then it contains the 16028 * value that un_throttle was when we got a TRAN_BUSY back 16029 * from scsi_transport(). We want to revert back to this 16030 * value. 16031 * 16032 * In the QFULL case, the throttle limit will incrementally 16033 * increase until it reaches max throttle. 16034 */ 16035 if (un->un_busy_throttle > 0) { 16036 un->un_throttle = un->un_busy_throttle; 16037 un->un_busy_throttle = 0; 16038 } else { 16039 /* 16040 * increase throttle by 10% open gate slowly, schedule 16041 * another restore if saved throttle has not been 16042 * reached 16043 */ 16044 short throttle; 16045 if (sd_qfull_throttle_enable) { 16046 throttle = un->un_throttle + 16047 max((un->un_throttle / 10), 1); 16048 un->un_throttle = 16049 (throttle < un->un_saved_throttle) ? 16050 throttle : un->un_saved_throttle; 16051 if (un->un_throttle < un->un_saved_throttle) { 16052 un->un_reset_throttle_timeid = 16053 timeout(sd_restore_throttle, 16054 un, 16055 SD_QFULL_THROTTLE_RESET_INTERVAL); 16056 } 16057 } 16058 } 16059 16060 /* 16061 * If un_throttle has fallen below the low-water mark, we 16062 * restore the maximum value here (and allow it to ratchet 16063 * down again if necessary). 16064 */ 16065 if (un->un_throttle < un->un_min_throttle) { 16066 un->un_throttle = un->un_saved_throttle; 16067 } 16068 } else { 16069 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16070 "restoring limit from 0x%x to 0x%x\n", 16071 un->un_throttle, un->un_saved_throttle); 16072 un->un_throttle = un->un_saved_throttle; 16073 } 16074 16075 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16076 "sd_restore_throttle: calling sd_start_cmds!\n"); 16077 16078 sd_start_cmds(un, NULL); 16079 16080 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16081 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16082 un, un->un_throttle); 16083 16084 mutex_exit(SD_MUTEX(un)); 16085 16086 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16087 } 16088 16089 /* 16090 * Function: sdrunout 16091 * 16092 * Description: Callback routine for scsi_init_pkt when a resource allocation 16093 * fails. 16094 * 16095 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16096 * soft state instance. 16097 * 16098 * Return Code: The scsi_init_pkt routine allows for the callback function to 16099 * return a 0 indicating the callback should be rescheduled or a 1 16100 * indicating not to reschedule. This routine always returns 1 16101 * because the driver always provides a callback function to 16102 * scsi_init_pkt. This results in a callback always being scheduled 16103 * (via the scsi_init_pkt callback implementation) if a resource 16104 * failure occurs. 16105 * 16106 * Context: This callback function may not block or call routines that block 16107 * 16108 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16109 * request persisting at the head of the list which cannot be 16110 * satisfied even after multiple retries. In the future the driver 16111 * may implement some time of maximum runout count before failing 16112 * an I/O. 16113 */ 16114 16115 static int 16116 sdrunout(caddr_t arg) 16117 { 16118 struct sd_lun *un = (struct sd_lun *)arg; 16119 16120 ASSERT(un != NULL); 16121 ASSERT(!mutex_owned(SD_MUTEX(un))); 16122 16123 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16124 16125 mutex_enter(SD_MUTEX(un)); 16126 sd_start_cmds(un, NULL); 16127 mutex_exit(SD_MUTEX(un)); 16128 /* 16129 * This callback routine always returns 1 (i.e. do not reschedule) 16130 * because we always specify sdrunout as the callback handler for 16131 * scsi_init_pkt inside the call to sd_start_cmds. 16132 */ 16133 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16134 return (1); 16135 } 16136 16137 16138 /* 16139 * Function: sdintr 16140 * 16141 * Description: Completion callback routine for scsi_pkt(9S) structs 16142 * sent to the HBA driver via scsi_transport(9F). 16143 * 16144 * Context: Interrupt context 16145 */ 16146 16147 static void 16148 sdintr(struct scsi_pkt *pktp) 16149 { 16150 struct buf *bp; 16151 struct sd_xbuf *xp; 16152 struct sd_lun *un; 16153 size_t actual_len; 16154 sd_ssc_t *sscp; 16155 16156 ASSERT(pktp != NULL); 16157 bp = (struct buf *)pktp->pkt_private; 16158 ASSERT(bp != NULL); 16159 xp = SD_GET_XBUF(bp); 16160 ASSERT(xp != NULL); 16161 ASSERT(xp->xb_pktp != NULL); 16162 un = SD_GET_UN(bp); 16163 ASSERT(un != NULL); 16164 ASSERT(!mutex_owned(SD_MUTEX(un))); 16165 16166 #ifdef SD_FAULT_INJECTION 16167 16168 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16169 /* SD FaultInjection */ 16170 sd_faultinjection(pktp); 16171 16172 #endif /* SD_FAULT_INJECTION */ 16173 16174 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16175 " xp:0x%p, un:0x%p\n", bp, xp, un); 16176 16177 mutex_enter(SD_MUTEX(un)); 16178 16179 ASSERT(un->un_fm_private != NULL); 16180 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16181 ASSERT(sscp != NULL); 16182 16183 /* Reduce the count of the #commands currently in transport */ 16184 un->un_ncmds_in_transport--; 16185 ASSERT(un->un_ncmds_in_transport >= 0); 16186 16187 /* Increment counter to indicate that the callback routine is active */ 16188 un->un_in_callback++; 16189 16190 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16191 16192 #ifdef SDDEBUG 16193 if (bp == un->un_retry_bp) { 16194 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16195 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16196 un, un->un_retry_bp, un->un_ncmds_in_transport); 16197 } 16198 #endif 16199 16200 /* 16201 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16202 * state if needed. 16203 */ 16204 if (pktp->pkt_reason == CMD_DEV_GONE) { 16205 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16206 "Command failed to complete...Device is gone\n"); 16207 if (un->un_mediastate != DKIO_DEV_GONE) { 16208 un->un_mediastate = DKIO_DEV_GONE; 16209 cv_broadcast(&un->un_state_cv); 16210 } 16211 sd_return_failed_command(un, bp, EIO); 16212 goto exit; 16213 } 16214 16215 if (pktp->pkt_state & STATE_XARQ_DONE) { 16216 SD_TRACE(SD_LOG_COMMON, un, 16217 "sdintr: extra sense data received. pkt=%p\n", pktp); 16218 } 16219 16220 /* 16221 * First see if the pkt has auto-request sense data with it.... 16222 * Look at the packet state first so we don't take a performance 16223 * hit looking at the arq enabled flag unless absolutely necessary. 16224 */ 16225 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16226 (un->un_f_arq_enabled == TRUE)) { 16227 /* 16228 * The HBA did an auto request sense for this command so check 16229 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16230 * driver command that should not be retried. 16231 */ 16232 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16233 /* 16234 * Save the relevant sense info into the xp for the 16235 * original cmd. 16236 */ 16237 struct scsi_arq_status *asp; 16238 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16239 xp->xb_sense_status = 16240 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16241 xp->xb_sense_state = asp->sts_rqpkt_state; 16242 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16243 if (pktp->pkt_state & STATE_XARQ_DONE) { 16244 actual_len = MAX_SENSE_LENGTH - 16245 xp->xb_sense_resid; 16246 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16247 MAX_SENSE_LENGTH); 16248 } else { 16249 if (xp->xb_sense_resid > SENSE_LENGTH) { 16250 actual_len = MAX_SENSE_LENGTH - 16251 xp->xb_sense_resid; 16252 } else { 16253 actual_len = SENSE_LENGTH - 16254 xp->xb_sense_resid; 16255 } 16256 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16257 if ((((struct uscsi_cmd *) 16258 (xp->xb_pktinfo))->uscsi_rqlen) > 16259 actual_len) { 16260 xp->xb_sense_resid = 16261 (((struct uscsi_cmd *) 16262 (xp->xb_pktinfo))-> 16263 uscsi_rqlen) - actual_len; 16264 } else { 16265 xp->xb_sense_resid = 0; 16266 } 16267 } 16268 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16269 SENSE_LENGTH); 16270 } 16271 16272 /* fail the command */ 16273 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16274 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16275 sd_return_failed_command(un, bp, EIO); 16276 goto exit; 16277 } 16278 16279 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16280 /* 16281 * We want to either retry or fail this command, so free 16282 * the DMA resources here. If we retry the command then 16283 * the DMA resources will be reallocated in sd_start_cmds(). 16284 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16285 * causes the *entire* transfer to start over again from the 16286 * beginning of the request, even for PARTIAL chunks that 16287 * have already transferred successfully. 16288 */ 16289 if ((un->un_f_is_fibre == TRUE) && 16290 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16291 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16292 scsi_dmafree(pktp); 16293 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16294 } 16295 #endif 16296 16297 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16298 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16299 16300 sd_handle_auto_request_sense(un, bp, xp, pktp); 16301 goto exit; 16302 } 16303 16304 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16305 if (pktp->pkt_flags & FLAG_SENSING) { 16306 /* This pktp is from the unit's REQUEST_SENSE command */ 16307 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16308 "sdintr: sd_handle_request_sense\n"); 16309 sd_handle_request_sense(un, bp, xp, pktp); 16310 goto exit; 16311 } 16312 16313 /* 16314 * Check to see if the command successfully completed as requested; 16315 * this is the most common case (and also the hot performance path). 16316 * 16317 * Requirements for successful completion are: 16318 * pkt_reason is CMD_CMPLT and packet status is status good. 16319 * In addition: 16320 * - A residual of zero indicates successful completion no matter what 16321 * the command is. 16322 * - If the residual is not zero and the command is not a read or 16323 * write, then it's still defined as successful completion. In other 16324 * words, if the command is a read or write the residual must be 16325 * zero for successful completion. 16326 * - If the residual is not zero and the command is a read or 16327 * write, and it's a USCSICMD, then it's still defined as 16328 * successful completion. 16329 */ 16330 if ((pktp->pkt_reason == CMD_CMPLT) && 16331 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16332 16333 /* 16334 * Since this command is returned with a good status, we 16335 * can reset the count for Sonoma failover. 16336 */ 16337 un->un_sonoma_failure_count = 0; 16338 16339 /* 16340 * Return all USCSI commands on good status 16341 */ 16342 if (pktp->pkt_resid == 0) { 16343 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16344 "sdintr: returning command for resid == 0\n"); 16345 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16346 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16347 SD_UPDATE_B_RESID(bp, pktp); 16348 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16349 "sdintr: returning command for resid != 0\n"); 16350 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16351 SD_UPDATE_B_RESID(bp, pktp); 16352 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16353 "sdintr: returning uscsi command\n"); 16354 } else { 16355 goto not_successful; 16356 } 16357 sd_return_command(un, bp); 16358 16359 /* 16360 * Decrement counter to indicate that the callback routine 16361 * is done. 16362 */ 16363 un->un_in_callback--; 16364 ASSERT(un->un_in_callback >= 0); 16365 mutex_exit(SD_MUTEX(un)); 16366 16367 return; 16368 } 16369 16370 not_successful: 16371 16372 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16373 /* 16374 * The following is based upon knowledge of the underlying transport 16375 * and its use of DMA resources. This code should be removed when 16376 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16377 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16378 * and sd_start_cmds(). 16379 * 16380 * Free any DMA resources associated with this command if there 16381 * is a chance it could be retried or enqueued for later retry. 16382 * If we keep the DMA binding then mpxio cannot reissue the 16383 * command on another path whenever a path failure occurs. 16384 * 16385 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16386 * causes the *entire* transfer to start over again from the 16387 * beginning of the request, even for PARTIAL chunks that 16388 * have already transferred successfully. 16389 * 16390 * This is only done for non-uscsi commands (and also skipped for the 16391 * driver's internal RQS command). Also just do this for Fibre Channel 16392 * devices as these are the only ones that support mpxio. 16393 */ 16394 if ((un->un_f_is_fibre == TRUE) && 16395 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16396 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16397 scsi_dmafree(pktp); 16398 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16399 } 16400 #endif 16401 16402 /* 16403 * The command did not successfully complete as requested so check 16404 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16405 * driver command that should not be retried so just return. If 16406 * FLAG_DIAGNOSE is not set the error will be processed below. 16407 */ 16408 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16409 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16410 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16411 /* 16412 * Issue a request sense if a check condition caused the error 16413 * (we handle the auto request sense case above), otherwise 16414 * just fail the command. 16415 */ 16416 if ((pktp->pkt_reason == CMD_CMPLT) && 16417 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16418 sd_send_request_sense_command(un, bp, pktp); 16419 } else { 16420 sd_return_failed_command(un, bp, EIO); 16421 } 16422 goto exit; 16423 } 16424 16425 /* 16426 * The command did not successfully complete as requested so process 16427 * the error, retry, and/or attempt recovery. 16428 */ 16429 switch (pktp->pkt_reason) { 16430 case CMD_CMPLT: 16431 switch (SD_GET_PKT_STATUS(pktp)) { 16432 case STATUS_GOOD: 16433 /* 16434 * The command completed successfully with a non-zero 16435 * residual 16436 */ 16437 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16438 "sdintr: STATUS_GOOD \n"); 16439 sd_pkt_status_good(un, bp, xp, pktp); 16440 break; 16441 16442 case STATUS_CHECK: 16443 case STATUS_TERMINATED: 16444 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16445 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16446 sd_pkt_status_check_condition(un, bp, xp, pktp); 16447 break; 16448 16449 case STATUS_BUSY: 16450 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16451 "sdintr: STATUS_BUSY\n"); 16452 sd_pkt_status_busy(un, bp, xp, pktp); 16453 break; 16454 16455 case STATUS_RESERVATION_CONFLICT: 16456 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16457 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16458 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16459 break; 16460 16461 case STATUS_QFULL: 16462 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16463 "sdintr: STATUS_QFULL\n"); 16464 sd_pkt_status_qfull(un, bp, xp, pktp); 16465 break; 16466 16467 case STATUS_MET: 16468 case STATUS_INTERMEDIATE: 16469 case STATUS_SCSI2: 16470 case STATUS_INTERMEDIATE_MET: 16471 case STATUS_ACA_ACTIVE: 16472 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16473 "Unexpected SCSI status received: 0x%x\n", 16474 SD_GET_PKT_STATUS(pktp)); 16475 /* 16476 * Mark the ssc_flags when detected invalid status 16477 * code for non-USCSI command. 16478 */ 16479 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16480 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16481 0, "stat-code"); 16482 } 16483 sd_return_failed_command(un, bp, EIO); 16484 break; 16485 16486 default: 16487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16488 "Invalid SCSI status received: 0x%x\n", 16489 SD_GET_PKT_STATUS(pktp)); 16490 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16491 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16492 0, "stat-code"); 16493 } 16494 sd_return_failed_command(un, bp, EIO); 16495 break; 16496 16497 } 16498 break; 16499 16500 case CMD_INCOMPLETE: 16501 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16502 "sdintr: CMD_INCOMPLETE\n"); 16503 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16504 break; 16505 case CMD_TRAN_ERR: 16506 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16507 "sdintr: CMD_TRAN_ERR\n"); 16508 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16509 break; 16510 case CMD_RESET: 16511 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16512 "sdintr: CMD_RESET \n"); 16513 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16514 break; 16515 case CMD_ABORTED: 16516 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16517 "sdintr: CMD_ABORTED \n"); 16518 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16519 break; 16520 case CMD_TIMEOUT: 16521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16522 "sdintr: CMD_TIMEOUT\n"); 16523 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16524 break; 16525 case CMD_UNX_BUS_FREE: 16526 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16527 "sdintr: CMD_UNX_BUS_FREE \n"); 16528 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16529 break; 16530 case CMD_TAG_REJECT: 16531 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16532 "sdintr: CMD_TAG_REJECT\n"); 16533 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16534 break; 16535 default: 16536 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16537 "sdintr: default\n"); 16538 /* 16539 * Mark the ssc_flags for detecting invliad pkt_reason. 16540 */ 16541 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16542 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16543 0, "pkt-reason"); 16544 } 16545 sd_pkt_reason_default(un, bp, xp, pktp); 16546 break; 16547 } 16548 16549 exit: 16550 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16551 16552 /* Decrement counter to indicate that the callback routine is done. */ 16553 un->un_in_callback--; 16554 ASSERT(un->un_in_callback >= 0); 16555 16556 /* 16557 * At this point, the pkt has been dispatched, ie, it is either 16558 * being re-tried or has been returned to its caller and should 16559 * not be referenced. 16560 */ 16561 16562 mutex_exit(SD_MUTEX(un)); 16563 } 16564 16565 16566 /* 16567 * Function: sd_print_incomplete_msg 16568 * 16569 * Description: Prints the error message for a CMD_INCOMPLETE error. 16570 * 16571 * Arguments: un - ptr to associated softstate for the device. 16572 * bp - ptr to the buf(9S) for the command. 16573 * arg - message string ptr 16574 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16575 * or SD_NO_RETRY_ISSUED. 16576 * 16577 * Context: May be called under interrupt context 16578 */ 16579 16580 static void 16581 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16582 { 16583 struct scsi_pkt *pktp; 16584 char *msgp; 16585 char *cmdp = arg; 16586 16587 ASSERT(un != NULL); 16588 ASSERT(mutex_owned(SD_MUTEX(un))); 16589 ASSERT(bp != NULL); 16590 ASSERT(arg != NULL); 16591 pktp = SD_GET_PKTP(bp); 16592 ASSERT(pktp != NULL); 16593 16594 switch (code) { 16595 case SD_DELAYED_RETRY_ISSUED: 16596 case SD_IMMEDIATE_RETRY_ISSUED: 16597 msgp = "retrying"; 16598 break; 16599 case SD_NO_RETRY_ISSUED: 16600 default: 16601 msgp = "giving up"; 16602 break; 16603 } 16604 16605 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16607 "incomplete %s- %s\n", cmdp, msgp); 16608 } 16609 } 16610 16611 16612 16613 /* 16614 * Function: sd_pkt_status_good 16615 * 16616 * Description: Processing for a STATUS_GOOD code in pkt_status. 16617 * 16618 * Context: May be called under interrupt context 16619 */ 16620 16621 static void 16622 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16623 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16624 { 16625 char *cmdp; 16626 16627 ASSERT(un != NULL); 16628 ASSERT(mutex_owned(SD_MUTEX(un))); 16629 ASSERT(bp != NULL); 16630 ASSERT(xp != NULL); 16631 ASSERT(pktp != NULL); 16632 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16633 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16634 ASSERT(pktp->pkt_resid != 0); 16635 16636 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16637 16638 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16639 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16640 case SCMD_READ: 16641 cmdp = "read"; 16642 break; 16643 case SCMD_WRITE: 16644 cmdp = "write"; 16645 break; 16646 default: 16647 SD_UPDATE_B_RESID(bp, pktp); 16648 sd_return_command(un, bp); 16649 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16650 return; 16651 } 16652 16653 /* 16654 * See if we can retry the read/write, preferrably immediately. 16655 * If retries are exhaused, then sd_retry_command() will update 16656 * the b_resid count. 16657 */ 16658 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16659 cmdp, EIO, (clock_t)0, NULL); 16660 16661 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16662 } 16663 16664 16665 16666 16667 16668 /* 16669 * Function: sd_handle_request_sense 16670 * 16671 * Description: Processing for non-auto Request Sense command. 16672 * 16673 * Arguments: un - ptr to associated softstate 16674 * sense_bp - ptr to buf(9S) for the RQS command 16675 * sense_xp - ptr to the sd_xbuf for the RQS command 16676 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16677 * 16678 * Context: May be called under interrupt context 16679 */ 16680 16681 static void 16682 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16683 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16684 { 16685 struct buf *cmd_bp; /* buf for the original command */ 16686 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16687 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16688 size_t actual_len; /* actual sense data length */ 16689 16690 ASSERT(un != NULL); 16691 ASSERT(mutex_owned(SD_MUTEX(un))); 16692 ASSERT(sense_bp != NULL); 16693 ASSERT(sense_xp != NULL); 16694 ASSERT(sense_pktp != NULL); 16695 16696 /* 16697 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16698 * RQS command and not the original command. 16699 */ 16700 ASSERT(sense_pktp == un->un_rqs_pktp); 16701 ASSERT(sense_bp == un->un_rqs_bp); 16702 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16703 (FLAG_SENSING | FLAG_HEAD)); 16704 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16705 FLAG_SENSING) == FLAG_SENSING); 16706 16707 /* These are the bp, xp, and pktp for the original command */ 16708 cmd_bp = sense_xp->xb_sense_bp; 16709 cmd_xp = SD_GET_XBUF(cmd_bp); 16710 cmd_pktp = SD_GET_PKTP(cmd_bp); 16711 16712 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16713 /* 16714 * The REQUEST SENSE command failed. Release the REQUEST 16715 * SENSE command for re-use, get back the bp for the original 16716 * command, and attempt to re-try the original command if 16717 * FLAG_DIAGNOSE is not set in the original packet. 16718 */ 16719 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16720 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16721 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16722 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16723 NULL, NULL, EIO, (clock_t)0, NULL); 16724 return; 16725 } 16726 } 16727 16728 /* 16729 * Save the relevant sense info into the xp for the original cmd. 16730 * 16731 * Note: if the request sense failed the state info will be zero 16732 * as set in sd_mark_rqs_busy() 16733 */ 16734 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16735 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16736 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16737 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16738 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16739 SENSE_LENGTH)) { 16740 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16741 MAX_SENSE_LENGTH); 16742 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16743 } else { 16744 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16745 SENSE_LENGTH); 16746 if (actual_len < SENSE_LENGTH) { 16747 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16748 } else { 16749 cmd_xp->xb_sense_resid = 0; 16750 } 16751 } 16752 16753 /* 16754 * Free up the RQS command.... 16755 * NOTE: 16756 * Must do this BEFORE calling sd_validate_sense_data! 16757 * sd_validate_sense_data may return the original command in 16758 * which case the pkt will be freed and the flags can no 16759 * longer be touched. 16760 * SD_MUTEX is held through this process until the command 16761 * is dispatched based upon the sense data, so there are 16762 * no race conditions. 16763 */ 16764 (void) sd_mark_rqs_idle(un, sense_xp); 16765 16766 /* 16767 * For a retryable command see if we have valid sense data, if so then 16768 * turn it over to sd_decode_sense() to figure out the right course of 16769 * action. Just fail a non-retryable command. 16770 */ 16771 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16772 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16773 SD_SENSE_DATA_IS_VALID) { 16774 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16775 } 16776 } else { 16777 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16778 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16779 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16780 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16781 sd_return_failed_command(un, cmd_bp, EIO); 16782 } 16783 } 16784 16785 16786 16787 16788 /* 16789 * Function: sd_handle_auto_request_sense 16790 * 16791 * Description: Processing for auto-request sense information. 16792 * 16793 * Arguments: un - ptr to associated softstate 16794 * bp - ptr to buf(9S) for the command 16795 * xp - ptr to the sd_xbuf for the command 16796 * pktp - ptr to the scsi_pkt(9S) for the command 16797 * 16798 * Context: May be called under interrupt context 16799 */ 16800 16801 static void 16802 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16803 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16804 { 16805 struct scsi_arq_status *asp; 16806 size_t actual_len; 16807 16808 ASSERT(un != NULL); 16809 ASSERT(mutex_owned(SD_MUTEX(un))); 16810 ASSERT(bp != NULL); 16811 ASSERT(xp != NULL); 16812 ASSERT(pktp != NULL); 16813 ASSERT(pktp != un->un_rqs_pktp); 16814 ASSERT(bp != un->un_rqs_bp); 16815 16816 /* 16817 * For auto-request sense, we get a scsi_arq_status back from 16818 * the HBA, with the sense data in the sts_sensedata member. 16819 * The pkt_scbp of the packet points to this scsi_arq_status. 16820 */ 16821 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16822 16823 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16824 /* 16825 * The auto REQUEST SENSE failed; see if we can re-try 16826 * the original command. 16827 */ 16828 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16829 "auto request sense failed (reason=%s)\n", 16830 scsi_rname(asp->sts_rqpkt_reason)); 16831 16832 sd_reset_target(un, pktp); 16833 16834 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16835 NULL, NULL, EIO, (clock_t)0, NULL); 16836 return; 16837 } 16838 16839 /* Save the relevant sense info into the xp for the original cmd. */ 16840 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16841 xp->xb_sense_state = asp->sts_rqpkt_state; 16842 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16843 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16844 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16845 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16846 MAX_SENSE_LENGTH); 16847 } else { 16848 if (xp->xb_sense_resid > SENSE_LENGTH) { 16849 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16850 } else { 16851 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16852 } 16853 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16854 if ((((struct uscsi_cmd *) 16855 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16856 xp->xb_sense_resid = (((struct uscsi_cmd *) 16857 (xp->xb_pktinfo))->uscsi_rqlen) - 16858 actual_len; 16859 } else { 16860 xp->xb_sense_resid = 0; 16861 } 16862 } 16863 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16864 } 16865 16866 /* 16867 * See if we have valid sense data, if so then turn it over to 16868 * sd_decode_sense() to figure out the right course of action. 16869 */ 16870 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16871 SD_SENSE_DATA_IS_VALID) { 16872 sd_decode_sense(un, bp, xp, pktp); 16873 } 16874 } 16875 16876 16877 /* 16878 * Function: sd_print_sense_failed_msg 16879 * 16880 * Description: Print log message when RQS has failed. 16881 * 16882 * Arguments: un - ptr to associated softstate 16883 * bp - ptr to buf(9S) for the command 16884 * arg - generic message string ptr 16885 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16886 * or SD_NO_RETRY_ISSUED 16887 * 16888 * Context: May be called from interrupt context 16889 */ 16890 16891 static void 16892 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16893 int code) 16894 { 16895 char *msgp = arg; 16896 16897 ASSERT(un != NULL); 16898 ASSERT(mutex_owned(SD_MUTEX(un))); 16899 ASSERT(bp != NULL); 16900 16901 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16903 } 16904 } 16905 16906 16907 /* 16908 * Function: sd_validate_sense_data 16909 * 16910 * Description: Check the given sense data for validity. 16911 * If the sense data is not valid, the command will 16912 * be either failed or retried! 16913 * 16914 * Return Code: SD_SENSE_DATA_IS_INVALID 16915 * SD_SENSE_DATA_IS_VALID 16916 * 16917 * Context: May be called from interrupt context 16918 */ 16919 16920 static int 16921 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16922 size_t actual_len) 16923 { 16924 struct scsi_extended_sense *esp; 16925 struct scsi_pkt *pktp; 16926 char *msgp = NULL; 16927 sd_ssc_t *sscp; 16928 16929 ASSERT(un != NULL); 16930 ASSERT(mutex_owned(SD_MUTEX(un))); 16931 ASSERT(bp != NULL); 16932 ASSERT(bp != un->un_rqs_bp); 16933 ASSERT(xp != NULL); 16934 ASSERT(un->un_fm_private != NULL); 16935 16936 pktp = SD_GET_PKTP(bp); 16937 ASSERT(pktp != NULL); 16938 16939 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16940 ASSERT(sscp != NULL); 16941 16942 /* 16943 * Check the status of the RQS command (auto or manual). 16944 */ 16945 switch (xp->xb_sense_status & STATUS_MASK) { 16946 case STATUS_GOOD: 16947 break; 16948 16949 case STATUS_RESERVATION_CONFLICT: 16950 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16951 return (SD_SENSE_DATA_IS_INVALID); 16952 16953 case STATUS_BUSY: 16954 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16955 "Busy Status on REQUEST SENSE\n"); 16956 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16957 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16958 return (SD_SENSE_DATA_IS_INVALID); 16959 16960 case STATUS_QFULL: 16961 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16962 "QFULL Status on REQUEST SENSE\n"); 16963 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16964 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16965 return (SD_SENSE_DATA_IS_INVALID); 16966 16967 case STATUS_CHECK: 16968 case STATUS_TERMINATED: 16969 msgp = "Check Condition on REQUEST SENSE\n"; 16970 goto sense_failed; 16971 16972 default: 16973 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16974 goto sense_failed; 16975 } 16976 16977 /* 16978 * See if we got the minimum required amount of sense data. 16979 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16980 * or less. 16981 */ 16982 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16983 (actual_len == 0)) { 16984 msgp = "Request Sense couldn't get sense data\n"; 16985 goto sense_failed; 16986 } 16987 16988 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16989 msgp = "Not enough sense information\n"; 16990 /* Mark the ssc_flags for detecting invalid sense data */ 16991 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16992 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 16993 "sense-data"); 16994 } 16995 goto sense_failed; 16996 } 16997 16998 /* 16999 * We require the extended sense data 17000 */ 17001 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17002 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17003 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17004 static char tmp[8]; 17005 static char buf[148]; 17006 char *p = (char *)(xp->xb_sense_data); 17007 int i; 17008 17009 mutex_enter(&sd_sense_mutex); 17010 (void) strcpy(buf, "undecodable sense information:"); 17011 for (i = 0; i < actual_len; i++) { 17012 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17013 (void) strcpy(&buf[strlen(buf)], tmp); 17014 } 17015 i = strlen(buf); 17016 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17017 17018 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17019 scsi_log(SD_DEVINFO(un), sd_label, 17020 CE_WARN, buf); 17021 } 17022 mutex_exit(&sd_sense_mutex); 17023 } 17024 17025 /* Mark the ssc_flags for detecting invalid sense data */ 17026 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17027 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17028 "sense-data"); 17029 } 17030 17031 /* Note: Legacy behavior, fail the command with no retry */ 17032 sd_return_failed_command(un, bp, EIO); 17033 return (SD_SENSE_DATA_IS_INVALID); 17034 } 17035 17036 /* 17037 * Check that es_code is valid (es_class concatenated with es_code 17038 * make up the "response code" field. es_class will always be 7, so 17039 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17040 * format. 17041 */ 17042 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17043 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17044 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17045 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17046 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17047 /* Mark the ssc_flags for detecting invalid sense data */ 17048 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17049 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17050 "sense-data"); 17051 } 17052 goto sense_failed; 17053 } 17054 17055 return (SD_SENSE_DATA_IS_VALID); 17056 17057 sense_failed: 17058 /* 17059 * If the request sense failed (for whatever reason), attempt 17060 * to retry the original command. 17061 */ 17062 #if defined(__i386) || defined(__amd64) 17063 /* 17064 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17065 * sddef.h for Sparc platform, and x86 uses 1 binary 17066 * for both SCSI/FC. 17067 * The SD_RETRY_DELAY value need to be adjusted here 17068 * when SD_RETRY_DELAY change in sddef.h 17069 */ 17070 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17071 sd_print_sense_failed_msg, msgp, EIO, 17072 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17073 #else 17074 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17075 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17076 #endif 17077 17078 return (SD_SENSE_DATA_IS_INVALID); 17079 } 17080 17081 /* 17082 * Function: sd_decode_sense 17083 * 17084 * Description: Take recovery action(s) when SCSI Sense Data is received. 17085 * 17086 * Context: Interrupt context. 17087 */ 17088 17089 static void 17090 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17091 struct scsi_pkt *pktp) 17092 { 17093 uint8_t sense_key; 17094 17095 ASSERT(un != NULL); 17096 ASSERT(mutex_owned(SD_MUTEX(un))); 17097 ASSERT(bp != NULL); 17098 ASSERT(bp != un->un_rqs_bp); 17099 ASSERT(xp != NULL); 17100 ASSERT(pktp != NULL); 17101 17102 sense_key = scsi_sense_key(xp->xb_sense_data); 17103 17104 switch (sense_key) { 17105 case KEY_NO_SENSE: 17106 sd_sense_key_no_sense(un, bp, xp, pktp); 17107 break; 17108 case KEY_RECOVERABLE_ERROR: 17109 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17110 bp, xp, pktp); 17111 break; 17112 case KEY_NOT_READY: 17113 sd_sense_key_not_ready(un, xp->xb_sense_data, 17114 bp, xp, pktp); 17115 break; 17116 case KEY_MEDIUM_ERROR: 17117 case KEY_HARDWARE_ERROR: 17118 sd_sense_key_medium_or_hardware_error(un, 17119 xp->xb_sense_data, bp, xp, pktp); 17120 break; 17121 case KEY_ILLEGAL_REQUEST: 17122 sd_sense_key_illegal_request(un, bp, xp, pktp); 17123 break; 17124 case KEY_UNIT_ATTENTION: 17125 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17126 bp, xp, pktp); 17127 break; 17128 case KEY_WRITE_PROTECT: 17129 case KEY_VOLUME_OVERFLOW: 17130 case KEY_MISCOMPARE: 17131 sd_sense_key_fail_command(un, bp, xp, pktp); 17132 break; 17133 case KEY_BLANK_CHECK: 17134 sd_sense_key_blank_check(un, bp, xp, pktp); 17135 break; 17136 case KEY_ABORTED_COMMAND: 17137 sd_sense_key_aborted_command(un, bp, xp, pktp); 17138 break; 17139 case KEY_VENDOR_UNIQUE: 17140 case KEY_COPY_ABORTED: 17141 case KEY_EQUAL: 17142 case KEY_RESERVED: 17143 default: 17144 sd_sense_key_default(un, xp->xb_sense_data, 17145 bp, xp, pktp); 17146 break; 17147 } 17148 } 17149 17150 17151 /* 17152 * Function: sd_dump_memory 17153 * 17154 * Description: Debug logging routine to print the contents of a user provided 17155 * buffer. The output of the buffer is broken up into 256 byte 17156 * segments due to a size constraint of the scsi_log. 17157 * implementation. 17158 * 17159 * Arguments: un - ptr to softstate 17160 * comp - component mask 17161 * title - "title" string to preceed data when printed 17162 * data - ptr to data block to be printed 17163 * len - size of data block to be printed 17164 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17165 * 17166 * Context: May be called from interrupt context 17167 */ 17168 17169 #define SD_DUMP_MEMORY_BUF_SIZE 256 17170 17171 static char *sd_dump_format_string[] = { 17172 " 0x%02x", 17173 " %c" 17174 }; 17175 17176 static void 17177 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17178 int len, int fmt) 17179 { 17180 int i, j; 17181 int avail_count; 17182 int start_offset; 17183 int end_offset; 17184 size_t entry_len; 17185 char *bufp; 17186 char *local_buf; 17187 char *format_string; 17188 17189 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17190 17191 /* 17192 * In the debug version of the driver, this function is called from a 17193 * number of places which are NOPs in the release driver. 17194 * The debug driver therefore has additional methods of filtering 17195 * debug output. 17196 */ 17197 #ifdef SDDEBUG 17198 /* 17199 * In the debug version of the driver we can reduce the amount of debug 17200 * messages by setting sd_error_level to something other than 17201 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17202 * sd_component_mask. 17203 */ 17204 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17205 (sd_error_level != SCSI_ERR_ALL)) { 17206 return; 17207 } 17208 if (((sd_component_mask & comp) == 0) || 17209 (sd_error_level != SCSI_ERR_ALL)) { 17210 return; 17211 } 17212 #else 17213 if (sd_error_level != SCSI_ERR_ALL) { 17214 return; 17215 } 17216 #endif 17217 17218 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17219 bufp = local_buf; 17220 /* 17221 * Available length is the length of local_buf[], minus the 17222 * length of the title string, minus one for the ":", minus 17223 * one for the newline, minus one for the NULL terminator. 17224 * This gives the #bytes available for holding the printed 17225 * values from the given data buffer. 17226 */ 17227 if (fmt == SD_LOG_HEX) { 17228 format_string = sd_dump_format_string[0]; 17229 } else /* SD_LOG_CHAR */ { 17230 format_string = sd_dump_format_string[1]; 17231 } 17232 /* 17233 * Available count is the number of elements from the given 17234 * data buffer that we can fit into the available length. 17235 * This is based upon the size of the format string used. 17236 * Make one entry and find it's size. 17237 */ 17238 (void) sprintf(bufp, format_string, data[0]); 17239 entry_len = strlen(bufp); 17240 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17241 17242 j = 0; 17243 while (j < len) { 17244 bufp = local_buf; 17245 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17246 start_offset = j; 17247 17248 end_offset = start_offset + avail_count; 17249 17250 (void) sprintf(bufp, "%s:", title); 17251 bufp += strlen(bufp); 17252 for (i = start_offset; ((i < end_offset) && (j < len)); 17253 i++, j++) { 17254 (void) sprintf(bufp, format_string, data[i]); 17255 bufp += entry_len; 17256 } 17257 (void) sprintf(bufp, "\n"); 17258 17259 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17260 } 17261 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17262 } 17263 17264 /* 17265 * Function: sd_print_sense_msg 17266 * 17267 * Description: Log a message based upon the given sense data. 17268 * 17269 * Arguments: un - ptr to associated softstate 17270 * bp - ptr to buf(9S) for the command 17271 * arg - ptr to associate sd_sense_info struct 17272 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17273 * or SD_NO_RETRY_ISSUED 17274 * 17275 * Context: May be called from interrupt context 17276 */ 17277 17278 static void 17279 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17280 { 17281 struct sd_xbuf *xp; 17282 struct scsi_pkt *pktp; 17283 uint8_t *sensep; 17284 daddr_t request_blkno; 17285 diskaddr_t err_blkno; 17286 int severity; 17287 int pfa_flag; 17288 extern struct scsi_key_strings scsi_cmds[]; 17289 17290 ASSERT(un != NULL); 17291 ASSERT(mutex_owned(SD_MUTEX(un))); 17292 ASSERT(bp != NULL); 17293 xp = SD_GET_XBUF(bp); 17294 ASSERT(xp != NULL); 17295 pktp = SD_GET_PKTP(bp); 17296 ASSERT(pktp != NULL); 17297 ASSERT(arg != NULL); 17298 17299 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17300 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17301 17302 if ((code == SD_DELAYED_RETRY_ISSUED) || 17303 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17304 severity = SCSI_ERR_RETRYABLE; 17305 } 17306 17307 /* Use absolute block number for the request block number */ 17308 request_blkno = xp->xb_blkno; 17309 17310 /* 17311 * Now try to get the error block number from the sense data 17312 */ 17313 sensep = xp->xb_sense_data; 17314 17315 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17316 (uint64_t *)&err_blkno)) { 17317 /* 17318 * We retrieved the error block number from the information 17319 * portion of the sense data. 17320 * 17321 * For USCSI commands we are better off using the error 17322 * block no. as the requested block no. (This is the best 17323 * we can estimate.) 17324 */ 17325 if ((SD_IS_BUFIO(xp) == FALSE) && 17326 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17327 request_blkno = err_blkno; 17328 } 17329 } else { 17330 /* 17331 * Without the es_valid bit set (for fixed format) or an 17332 * information descriptor (for descriptor format) we cannot 17333 * be certain of the error blkno, so just use the 17334 * request_blkno. 17335 */ 17336 err_blkno = (diskaddr_t)request_blkno; 17337 } 17338 17339 /* 17340 * The following will log the buffer contents for the release driver 17341 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17342 * level is set to verbose. 17343 */ 17344 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17345 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17346 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17347 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17348 17349 if (pfa_flag == FALSE) { 17350 /* This is normally only set for USCSI */ 17351 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17352 return; 17353 } 17354 17355 if ((SD_IS_BUFIO(xp) == TRUE) && 17356 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17357 (severity < sd_error_level))) { 17358 return; 17359 } 17360 } 17361 /* 17362 * Check for Sonoma Failover and keep a count of how many failed I/O's 17363 */ 17364 if ((SD_IS_LSI(un)) && 17365 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17366 (scsi_sense_asc(sensep) == 0x94) && 17367 (scsi_sense_ascq(sensep) == 0x01)) { 17368 un->un_sonoma_failure_count++; 17369 if (un->un_sonoma_failure_count > 1) { 17370 return; 17371 } 17372 } 17373 17374 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17375 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17376 (pktp->pkt_resid == 0))) { 17377 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17378 request_blkno, err_blkno, scsi_cmds, 17379 (struct scsi_extended_sense *)sensep, 17380 un->un_additional_codes, NULL); 17381 } 17382 } 17383 17384 /* 17385 * Function: sd_sense_key_no_sense 17386 * 17387 * Description: Recovery action when sense data was not received. 17388 * 17389 * Context: May be called from interrupt context 17390 */ 17391 17392 static void 17393 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17394 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17395 { 17396 struct sd_sense_info si; 17397 17398 ASSERT(un != NULL); 17399 ASSERT(mutex_owned(SD_MUTEX(un))); 17400 ASSERT(bp != NULL); 17401 ASSERT(xp != NULL); 17402 ASSERT(pktp != NULL); 17403 17404 si.ssi_severity = SCSI_ERR_FATAL; 17405 si.ssi_pfa_flag = FALSE; 17406 17407 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17408 17409 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17410 &si, EIO, (clock_t)0, NULL); 17411 } 17412 17413 17414 /* 17415 * Function: sd_sense_key_recoverable_error 17416 * 17417 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17418 * 17419 * Context: May be called from interrupt context 17420 */ 17421 17422 static void 17423 sd_sense_key_recoverable_error(struct sd_lun *un, 17424 uint8_t *sense_datap, 17425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17426 { 17427 struct sd_sense_info si; 17428 uint8_t asc = scsi_sense_asc(sense_datap); 17429 17430 ASSERT(un != NULL); 17431 ASSERT(mutex_owned(SD_MUTEX(un))); 17432 ASSERT(bp != NULL); 17433 ASSERT(xp != NULL); 17434 ASSERT(pktp != NULL); 17435 17436 /* 17437 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17438 */ 17439 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17440 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17441 si.ssi_severity = SCSI_ERR_INFO; 17442 si.ssi_pfa_flag = TRUE; 17443 } else { 17444 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17445 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17446 si.ssi_severity = SCSI_ERR_RECOVERED; 17447 si.ssi_pfa_flag = FALSE; 17448 } 17449 17450 if (pktp->pkt_resid == 0) { 17451 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17452 sd_return_command(un, bp); 17453 return; 17454 } 17455 17456 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17457 &si, EIO, (clock_t)0, NULL); 17458 } 17459 17460 17461 17462 17463 /* 17464 * Function: sd_sense_key_not_ready 17465 * 17466 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17467 * 17468 * Context: May be called from interrupt context 17469 */ 17470 17471 static void 17472 sd_sense_key_not_ready(struct sd_lun *un, 17473 uint8_t *sense_datap, 17474 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17475 { 17476 struct sd_sense_info si; 17477 uint8_t asc = scsi_sense_asc(sense_datap); 17478 uint8_t ascq = scsi_sense_ascq(sense_datap); 17479 17480 ASSERT(un != NULL); 17481 ASSERT(mutex_owned(SD_MUTEX(un))); 17482 ASSERT(bp != NULL); 17483 ASSERT(xp != NULL); 17484 ASSERT(pktp != NULL); 17485 17486 si.ssi_severity = SCSI_ERR_FATAL; 17487 si.ssi_pfa_flag = FALSE; 17488 17489 /* 17490 * Update error stats after first NOT READY error. Disks may have 17491 * been powered down and may need to be restarted. For CDROMs, 17492 * report NOT READY errors only if media is present. 17493 */ 17494 if ((ISCD(un) && (asc == 0x3A)) || 17495 (xp->xb_nr_retry_count > 0)) { 17496 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17497 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17498 } 17499 17500 /* 17501 * Just fail if the "not ready" retry limit has been reached. 17502 */ 17503 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17504 /* Special check for error message printing for removables. */ 17505 if (un->un_f_has_removable_media && (asc == 0x04) && 17506 (ascq >= 0x04)) { 17507 si.ssi_severity = SCSI_ERR_ALL; 17508 } 17509 goto fail_command; 17510 } 17511 17512 /* 17513 * Check the ASC and ASCQ in the sense data as needed, to determine 17514 * what to do. 17515 */ 17516 switch (asc) { 17517 case 0x04: /* LOGICAL UNIT NOT READY */ 17518 /* 17519 * disk drives that don't spin up result in a very long delay 17520 * in format without warning messages. We will log a message 17521 * if the error level is set to verbose. 17522 */ 17523 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17524 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17525 "logical unit not ready, resetting disk\n"); 17526 } 17527 17528 /* 17529 * There are different requirements for CDROMs and disks for 17530 * the number of retries. If a CD-ROM is giving this, it is 17531 * probably reading TOC and is in the process of getting 17532 * ready, so we should keep on trying for a long time to make 17533 * sure that all types of media are taken in account (for 17534 * some media the drive takes a long time to read TOC). For 17535 * disks we do not want to retry this too many times as this 17536 * can cause a long hang in format when the drive refuses to 17537 * spin up (a very common failure). 17538 */ 17539 switch (ascq) { 17540 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17541 /* 17542 * Disk drives frequently refuse to spin up which 17543 * results in a very long hang in format without 17544 * warning messages. 17545 * 17546 * Note: This code preserves the legacy behavior of 17547 * comparing xb_nr_retry_count against zero for fibre 17548 * channel targets instead of comparing against the 17549 * un_reset_retry_count value. The reason for this 17550 * discrepancy has been so utterly lost beneath the 17551 * Sands of Time that even Indiana Jones could not 17552 * find it. 17553 */ 17554 if (un->un_f_is_fibre == TRUE) { 17555 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17556 (xp->xb_nr_retry_count > 0)) && 17557 (un->un_startstop_timeid == NULL)) { 17558 scsi_log(SD_DEVINFO(un), sd_label, 17559 CE_WARN, "logical unit not ready, " 17560 "resetting disk\n"); 17561 sd_reset_target(un, pktp); 17562 } 17563 } else { 17564 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17565 (xp->xb_nr_retry_count > 17566 un->un_reset_retry_count)) && 17567 (un->un_startstop_timeid == NULL)) { 17568 scsi_log(SD_DEVINFO(un), sd_label, 17569 CE_WARN, "logical unit not ready, " 17570 "resetting disk\n"); 17571 sd_reset_target(un, pktp); 17572 } 17573 } 17574 break; 17575 17576 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17577 /* 17578 * If the target is in the process of becoming 17579 * ready, just proceed with the retry. This can 17580 * happen with CD-ROMs that take a long time to 17581 * read TOC after a power cycle or reset. 17582 */ 17583 goto do_retry; 17584 17585 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17586 break; 17587 17588 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17589 /* 17590 * Retries cannot help here so just fail right away. 17591 */ 17592 goto fail_command; 17593 17594 case 0x88: 17595 /* 17596 * Vendor-unique code for T3/T4: it indicates a 17597 * path problem in a mutipathed config, but as far as 17598 * the target driver is concerned it equates to a fatal 17599 * error, so we should just fail the command right away 17600 * (without printing anything to the console). If this 17601 * is not a T3/T4, fall thru to the default recovery 17602 * action. 17603 * T3/T4 is FC only, don't need to check is_fibre 17604 */ 17605 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17606 sd_return_failed_command(un, bp, EIO); 17607 return; 17608 } 17609 /* FALLTHRU */ 17610 17611 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17612 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17613 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17614 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17615 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17616 default: /* Possible future codes in SCSI spec? */ 17617 /* 17618 * For removable-media devices, do not retry if 17619 * ASCQ > 2 as these result mostly from USCSI commands 17620 * on MMC devices issued to check status of an 17621 * operation initiated in immediate mode. Also for 17622 * ASCQ >= 4 do not print console messages as these 17623 * mainly represent a user-initiated operation 17624 * instead of a system failure. 17625 */ 17626 if (un->un_f_has_removable_media) { 17627 si.ssi_severity = SCSI_ERR_ALL; 17628 goto fail_command; 17629 } 17630 break; 17631 } 17632 17633 /* 17634 * As part of our recovery attempt for the NOT READY 17635 * condition, we issue a START STOP UNIT command. However 17636 * we want to wait for a short delay before attempting this 17637 * as there may still be more commands coming back from the 17638 * target with the check condition. To do this we use 17639 * timeout(9F) to call sd_start_stop_unit_callback() after 17640 * the delay interval expires. (sd_start_stop_unit_callback() 17641 * dispatches sd_start_stop_unit_task(), which will issue 17642 * the actual START STOP UNIT command. The delay interval 17643 * is one-half of the delay that we will use to retry the 17644 * command that generated the NOT READY condition. 17645 * 17646 * Note that we could just dispatch sd_start_stop_unit_task() 17647 * from here and allow it to sleep for the delay interval, 17648 * but then we would be tying up the taskq thread 17649 * uncesessarily for the duration of the delay. 17650 * 17651 * Do not issue the START STOP UNIT if the current command 17652 * is already a START STOP UNIT. 17653 */ 17654 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17655 break; 17656 } 17657 17658 /* 17659 * Do not schedule the timeout if one is already pending. 17660 */ 17661 if (un->un_startstop_timeid != NULL) { 17662 SD_INFO(SD_LOG_ERROR, un, 17663 "sd_sense_key_not_ready: restart already issued to" 17664 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17665 ddi_get_instance(SD_DEVINFO(un))); 17666 break; 17667 } 17668 17669 /* 17670 * Schedule the START STOP UNIT command, then queue the command 17671 * for a retry. 17672 * 17673 * Note: A timeout is not scheduled for this retry because we 17674 * want the retry to be serial with the START_STOP_UNIT. The 17675 * retry will be started when the START_STOP_UNIT is completed 17676 * in sd_start_stop_unit_task. 17677 */ 17678 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17679 un, un->un_busy_timeout / 2); 17680 xp->xb_nr_retry_count++; 17681 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17682 return; 17683 17684 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17685 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17686 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17687 "unit does not respond to selection\n"); 17688 } 17689 break; 17690 17691 case 0x3A: /* MEDIUM NOT PRESENT */ 17692 if (sd_error_level >= SCSI_ERR_FATAL) { 17693 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17694 "Caddy not inserted in drive\n"); 17695 } 17696 17697 sr_ejected(un); 17698 un->un_mediastate = DKIO_EJECTED; 17699 /* The state has changed, inform the media watch routines */ 17700 cv_broadcast(&un->un_state_cv); 17701 /* Just fail if no media is present in the drive. */ 17702 goto fail_command; 17703 17704 default: 17705 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17706 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17707 "Unit not Ready. Additional sense code 0x%x\n", 17708 asc); 17709 } 17710 break; 17711 } 17712 17713 do_retry: 17714 17715 /* 17716 * Retry the command, as some targets may report NOT READY for 17717 * several seconds after being reset. 17718 */ 17719 xp->xb_nr_retry_count++; 17720 si.ssi_severity = SCSI_ERR_RETRYABLE; 17721 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17722 &si, EIO, un->un_busy_timeout, NULL); 17723 17724 return; 17725 17726 fail_command: 17727 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17728 sd_return_failed_command(un, bp, EIO); 17729 } 17730 17731 17732 17733 /* 17734 * Function: sd_sense_key_medium_or_hardware_error 17735 * 17736 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17737 * sense key. 17738 * 17739 * Context: May be called from interrupt context 17740 */ 17741 17742 static void 17743 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17744 uint8_t *sense_datap, 17745 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17746 { 17747 struct sd_sense_info si; 17748 uint8_t sense_key = scsi_sense_key(sense_datap); 17749 uint8_t asc = scsi_sense_asc(sense_datap); 17750 17751 ASSERT(un != NULL); 17752 ASSERT(mutex_owned(SD_MUTEX(un))); 17753 ASSERT(bp != NULL); 17754 ASSERT(xp != NULL); 17755 ASSERT(pktp != NULL); 17756 17757 si.ssi_severity = SCSI_ERR_FATAL; 17758 si.ssi_pfa_flag = FALSE; 17759 17760 if (sense_key == KEY_MEDIUM_ERROR) { 17761 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17762 } 17763 17764 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17765 17766 if ((un->un_reset_retry_count != 0) && 17767 (xp->xb_retry_count == un->un_reset_retry_count)) { 17768 mutex_exit(SD_MUTEX(un)); 17769 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17770 if (un->un_f_allow_bus_device_reset == TRUE) { 17771 17772 boolean_t try_resetting_target = B_TRUE; 17773 17774 /* 17775 * We need to be able to handle specific ASC when we are 17776 * handling a KEY_HARDWARE_ERROR. In particular 17777 * taking the default action of resetting the target may 17778 * not be the appropriate way to attempt recovery. 17779 * Resetting a target because of a single LUN failure 17780 * victimizes all LUNs on that target. 17781 * 17782 * This is true for the LSI arrays, if an LSI 17783 * array controller returns an ASC of 0x84 (LUN Dead) we 17784 * should trust it. 17785 */ 17786 17787 if (sense_key == KEY_HARDWARE_ERROR) { 17788 switch (asc) { 17789 case 0x84: 17790 if (SD_IS_LSI(un)) { 17791 try_resetting_target = B_FALSE; 17792 } 17793 break; 17794 default: 17795 break; 17796 } 17797 } 17798 17799 if (try_resetting_target == B_TRUE) { 17800 int reset_retval = 0; 17801 if (un->un_f_lun_reset_enabled == TRUE) { 17802 SD_TRACE(SD_LOG_IO_CORE, un, 17803 "sd_sense_key_medium_or_hardware_" 17804 "error: issuing RESET_LUN\n"); 17805 reset_retval = 17806 scsi_reset(SD_ADDRESS(un), 17807 RESET_LUN); 17808 } 17809 if (reset_retval == 0) { 17810 SD_TRACE(SD_LOG_IO_CORE, un, 17811 "sd_sense_key_medium_or_hardware_" 17812 "error: issuing RESET_TARGET\n"); 17813 (void) scsi_reset(SD_ADDRESS(un), 17814 RESET_TARGET); 17815 } 17816 } 17817 } 17818 mutex_enter(SD_MUTEX(un)); 17819 } 17820 17821 /* 17822 * This really ought to be a fatal error, but we will retry anyway 17823 * as some drives report this as a spurious error. 17824 */ 17825 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17826 &si, EIO, (clock_t)0, NULL); 17827 } 17828 17829 17830 17831 /* 17832 * Function: sd_sense_key_illegal_request 17833 * 17834 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17835 * 17836 * Context: May be called from interrupt context 17837 */ 17838 17839 static void 17840 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17841 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17842 { 17843 struct sd_sense_info si; 17844 17845 ASSERT(un != NULL); 17846 ASSERT(mutex_owned(SD_MUTEX(un))); 17847 ASSERT(bp != NULL); 17848 ASSERT(xp != NULL); 17849 ASSERT(pktp != NULL); 17850 17851 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17852 17853 si.ssi_severity = SCSI_ERR_INFO; 17854 si.ssi_pfa_flag = FALSE; 17855 17856 /* Pointless to retry if the target thinks it's an illegal request */ 17857 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17858 sd_return_failed_command(un, bp, EIO); 17859 } 17860 17861 17862 17863 17864 /* 17865 * Function: sd_sense_key_unit_attention 17866 * 17867 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17868 * 17869 * Context: May be called from interrupt context 17870 */ 17871 17872 static void 17873 sd_sense_key_unit_attention(struct sd_lun *un, 17874 uint8_t *sense_datap, 17875 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17876 { 17877 /* 17878 * For UNIT ATTENTION we allow retries for one minute. Devices 17879 * like Sonoma can return UNIT ATTENTION close to a minute 17880 * under certain conditions. 17881 */ 17882 int retry_check_flag = SD_RETRIES_UA; 17883 boolean_t kstat_updated = B_FALSE; 17884 struct sd_sense_info si; 17885 uint8_t asc = scsi_sense_asc(sense_datap); 17886 uint8_t ascq = scsi_sense_ascq(sense_datap); 17887 17888 ASSERT(un != NULL); 17889 ASSERT(mutex_owned(SD_MUTEX(un))); 17890 ASSERT(bp != NULL); 17891 ASSERT(xp != NULL); 17892 ASSERT(pktp != NULL); 17893 17894 si.ssi_severity = SCSI_ERR_INFO; 17895 si.ssi_pfa_flag = FALSE; 17896 17897 17898 switch (asc) { 17899 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17900 if (sd_report_pfa != 0) { 17901 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17902 si.ssi_pfa_flag = TRUE; 17903 retry_check_flag = SD_RETRIES_STANDARD; 17904 goto do_retry; 17905 } 17906 17907 break; 17908 17909 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17910 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17911 un->un_resvd_status |= 17912 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17913 } 17914 #ifdef _LP64 17915 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17916 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17917 un, KM_NOSLEEP) == 0) { 17918 /* 17919 * If we can't dispatch the task we'll just 17920 * live without descriptor sense. We can 17921 * try again on the next "unit attention" 17922 */ 17923 SD_ERROR(SD_LOG_ERROR, un, 17924 "sd_sense_key_unit_attention: " 17925 "Could not dispatch " 17926 "sd_reenable_dsense_task\n"); 17927 } 17928 } 17929 #endif /* _LP64 */ 17930 /* FALLTHRU */ 17931 17932 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17933 if (!un->un_f_has_removable_media) { 17934 break; 17935 } 17936 17937 /* 17938 * When we get a unit attention from a removable-media device, 17939 * it may be in a state that will take a long time to recover 17940 * (e.g., from a reset). Since we are executing in interrupt 17941 * context here, we cannot wait around for the device to come 17942 * back. So hand this command off to sd_media_change_task() 17943 * for deferred processing under taskq thread context. (Note 17944 * that the command still may be failed if a problem is 17945 * encountered at a later time.) 17946 */ 17947 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17948 KM_NOSLEEP) == 0) { 17949 /* 17950 * Cannot dispatch the request so fail the command. 17951 */ 17952 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17953 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17954 si.ssi_severity = SCSI_ERR_FATAL; 17955 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17956 sd_return_failed_command(un, bp, EIO); 17957 } 17958 17959 /* 17960 * If failed to dispatch sd_media_change_task(), we already 17961 * updated kstat. If succeed to dispatch sd_media_change_task(), 17962 * we should update kstat later if it encounters an error. So, 17963 * we update kstat_updated flag here. 17964 */ 17965 kstat_updated = B_TRUE; 17966 17967 /* 17968 * Either the command has been successfully dispatched to a 17969 * task Q for retrying, or the dispatch failed. In either case 17970 * do NOT retry again by calling sd_retry_command. This sets up 17971 * two retries of the same command and when one completes and 17972 * frees the resources the other will access freed memory, 17973 * a bad thing. 17974 */ 17975 return; 17976 17977 default: 17978 break; 17979 } 17980 17981 /* 17982 * ASC ASCQ 17983 * 2A 09 Capacity data has changed 17984 * 2A 01 Mode parameters changed 17985 * 3F 0E Reported luns data has changed 17986 * Arrays that support logical unit expansion should report 17987 * capacity changes(2Ah/09). Mode parameters changed and 17988 * reported luns data has changed are the approximation. 17989 */ 17990 if (((asc == 0x2a) && (ascq == 0x09)) || 17991 ((asc == 0x2a) && (ascq == 0x01)) || 17992 ((asc == 0x3f) && (ascq == 0x0e))) { 17993 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 17994 KM_NOSLEEP) == 0) { 17995 SD_ERROR(SD_LOG_ERROR, un, 17996 "sd_sense_key_unit_attention: " 17997 "Could not dispatch sd_target_change_task\n"); 17998 } 17999 } 18000 18001 /* 18002 * Update kstat if we haven't done that. 18003 */ 18004 if (!kstat_updated) { 18005 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18006 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18007 } 18008 18009 do_retry: 18010 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18011 EIO, SD_UA_RETRY_DELAY, NULL); 18012 } 18013 18014 18015 18016 /* 18017 * Function: sd_sense_key_fail_command 18018 * 18019 * Description: Use to fail a command when we don't like the sense key that 18020 * was returned. 18021 * 18022 * Context: May be called from interrupt context 18023 */ 18024 18025 static void 18026 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18027 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18028 { 18029 struct sd_sense_info si; 18030 18031 ASSERT(un != NULL); 18032 ASSERT(mutex_owned(SD_MUTEX(un))); 18033 ASSERT(bp != NULL); 18034 ASSERT(xp != NULL); 18035 ASSERT(pktp != NULL); 18036 18037 si.ssi_severity = SCSI_ERR_FATAL; 18038 si.ssi_pfa_flag = FALSE; 18039 18040 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18041 sd_return_failed_command(un, bp, EIO); 18042 } 18043 18044 18045 18046 /* 18047 * Function: sd_sense_key_blank_check 18048 * 18049 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18050 * Has no monetary connotation. 18051 * 18052 * Context: May be called from interrupt context 18053 */ 18054 18055 static void 18056 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18057 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18058 { 18059 struct sd_sense_info si; 18060 18061 ASSERT(un != NULL); 18062 ASSERT(mutex_owned(SD_MUTEX(un))); 18063 ASSERT(bp != NULL); 18064 ASSERT(xp != NULL); 18065 ASSERT(pktp != NULL); 18066 18067 /* 18068 * Blank check is not fatal for removable devices, therefore 18069 * it does not require a console message. 18070 */ 18071 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18072 SCSI_ERR_FATAL; 18073 si.ssi_pfa_flag = FALSE; 18074 18075 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18076 sd_return_failed_command(un, bp, EIO); 18077 } 18078 18079 18080 18081 18082 /* 18083 * Function: sd_sense_key_aborted_command 18084 * 18085 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18086 * 18087 * Context: May be called from interrupt context 18088 */ 18089 18090 static void 18091 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18092 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18093 { 18094 struct sd_sense_info si; 18095 18096 ASSERT(un != NULL); 18097 ASSERT(mutex_owned(SD_MUTEX(un))); 18098 ASSERT(bp != NULL); 18099 ASSERT(xp != NULL); 18100 ASSERT(pktp != NULL); 18101 18102 si.ssi_severity = SCSI_ERR_FATAL; 18103 si.ssi_pfa_flag = FALSE; 18104 18105 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18106 18107 /* 18108 * This really ought to be a fatal error, but we will retry anyway 18109 * as some drives report this as a spurious error. 18110 */ 18111 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18112 &si, EIO, drv_usectohz(100000), NULL); 18113 } 18114 18115 18116 18117 /* 18118 * Function: sd_sense_key_default 18119 * 18120 * Description: Default recovery action for several SCSI sense keys (basically 18121 * attempts a retry). 18122 * 18123 * Context: May be called from interrupt context 18124 */ 18125 18126 static void 18127 sd_sense_key_default(struct sd_lun *un, 18128 uint8_t *sense_datap, 18129 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18130 { 18131 struct sd_sense_info si; 18132 uint8_t sense_key = scsi_sense_key(sense_datap); 18133 18134 ASSERT(un != NULL); 18135 ASSERT(mutex_owned(SD_MUTEX(un))); 18136 ASSERT(bp != NULL); 18137 ASSERT(xp != NULL); 18138 ASSERT(pktp != NULL); 18139 18140 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18141 18142 /* 18143 * Undecoded sense key. Attempt retries and hope that will fix 18144 * the problem. Otherwise, we're dead. 18145 */ 18146 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18147 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18148 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18149 } 18150 18151 si.ssi_severity = SCSI_ERR_FATAL; 18152 si.ssi_pfa_flag = FALSE; 18153 18154 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18155 &si, EIO, (clock_t)0, NULL); 18156 } 18157 18158 18159 18160 /* 18161 * Function: sd_print_retry_msg 18162 * 18163 * Description: Print a message indicating the retry action being taken. 18164 * 18165 * Arguments: un - ptr to associated softstate 18166 * bp - ptr to buf(9S) for the command 18167 * arg - not used. 18168 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18169 * or SD_NO_RETRY_ISSUED 18170 * 18171 * Context: May be called from interrupt context 18172 */ 18173 /* ARGSUSED */ 18174 static void 18175 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18176 { 18177 struct sd_xbuf *xp; 18178 struct scsi_pkt *pktp; 18179 char *reasonp; 18180 char *msgp; 18181 18182 ASSERT(un != NULL); 18183 ASSERT(mutex_owned(SD_MUTEX(un))); 18184 ASSERT(bp != NULL); 18185 pktp = SD_GET_PKTP(bp); 18186 ASSERT(pktp != NULL); 18187 xp = SD_GET_XBUF(bp); 18188 ASSERT(xp != NULL); 18189 18190 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18191 mutex_enter(&un->un_pm_mutex); 18192 if ((un->un_state == SD_STATE_SUSPENDED) || 18193 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18194 (pktp->pkt_flags & FLAG_SILENT)) { 18195 mutex_exit(&un->un_pm_mutex); 18196 goto update_pkt_reason; 18197 } 18198 mutex_exit(&un->un_pm_mutex); 18199 18200 /* 18201 * Suppress messages if they are all the same pkt_reason; with 18202 * TQ, many (up to 256) are returned with the same pkt_reason. 18203 * If we are in panic, then suppress the retry messages. 18204 */ 18205 switch (flag) { 18206 case SD_NO_RETRY_ISSUED: 18207 msgp = "giving up"; 18208 break; 18209 case SD_IMMEDIATE_RETRY_ISSUED: 18210 case SD_DELAYED_RETRY_ISSUED: 18211 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18212 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18213 (sd_error_level != SCSI_ERR_ALL))) { 18214 return; 18215 } 18216 msgp = "retrying command"; 18217 break; 18218 default: 18219 goto update_pkt_reason; 18220 } 18221 18222 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18223 scsi_rname(pktp->pkt_reason)); 18224 18225 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18226 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18227 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18228 } 18229 18230 update_pkt_reason: 18231 /* 18232 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18233 * This is to prevent multiple console messages for the same failure 18234 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18235 * when the command is retried successfully because there still may be 18236 * more commands coming back with the same value of pktp->pkt_reason. 18237 */ 18238 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18239 un->un_last_pkt_reason = pktp->pkt_reason; 18240 } 18241 } 18242 18243 18244 /* 18245 * Function: sd_print_cmd_incomplete_msg 18246 * 18247 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18248 * 18249 * Arguments: un - ptr to associated softstate 18250 * bp - ptr to buf(9S) for the command 18251 * arg - passed to sd_print_retry_msg() 18252 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18253 * or SD_NO_RETRY_ISSUED 18254 * 18255 * Context: May be called from interrupt context 18256 */ 18257 18258 static void 18259 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18260 int code) 18261 { 18262 dev_info_t *dip; 18263 18264 ASSERT(un != NULL); 18265 ASSERT(mutex_owned(SD_MUTEX(un))); 18266 ASSERT(bp != NULL); 18267 18268 switch (code) { 18269 case SD_NO_RETRY_ISSUED: 18270 /* Command was failed. Someone turned off this target? */ 18271 if (un->un_state != SD_STATE_OFFLINE) { 18272 /* 18273 * Suppress message if we are detaching and 18274 * device has been disconnected 18275 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18276 * private interface and not part of the DDI 18277 */ 18278 dip = un->un_sd->sd_dev; 18279 if (!(DEVI_IS_DETACHING(dip) && 18280 DEVI_IS_DEVICE_REMOVED(dip))) { 18281 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18282 "disk not responding to selection\n"); 18283 } 18284 New_state(un, SD_STATE_OFFLINE); 18285 } 18286 break; 18287 18288 case SD_DELAYED_RETRY_ISSUED: 18289 case SD_IMMEDIATE_RETRY_ISSUED: 18290 default: 18291 /* Command was successfully queued for retry */ 18292 sd_print_retry_msg(un, bp, arg, code); 18293 break; 18294 } 18295 } 18296 18297 18298 /* 18299 * Function: sd_pkt_reason_cmd_incomplete 18300 * 18301 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18302 * 18303 * Context: May be called from interrupt context 18304 */ 18305 18306 static void 18307 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18308 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18309 { 18310 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18311 18312 ASSERT(un != NULL); 18313 ASSERT(mutex_owned(SD_MUTEX(un))); 18314 ASSERT(bp != NULL); 18315 ASSERT(xp != NULL); 18316 ASSERT(pktp != NULL); 18317 18318 /* Do not do a reset if selection did not complete */ 18319 /* Note: Should this not just check the bit? */ 18320 if (pktp->pkt_state != STATE_GOT_BUS) { 18321 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18322 sd_reset_target(un, pktp); 18323 } 18324 18325 /* 18326 * If the target was not successfully selected, then set 18327 * SD_RETRIES_FAILFAST to indicate that we lost communication 18328 * with the target, and further retries and/or commands are 18329 * likely to take a long time. 18330 */ 18331 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18332 flag |= SD_RETRIES_FAILFAST; 18333 } 18334 18335 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18336 18337 sd_retry_command(un, bp, flag, 18338 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18339 } 18340 18341 18342 18343 /* 18344 * Function: sd_pkt_reason_cmd_tran_err 18345 * 18346 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18347 * 18348 * Context: May be called from interrupt context 18349 */ 18350 18351 static void 18352 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18353 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18354 { 18355 ASSERT(un != NULL); 18356 ASSERT(mutex_owned(SD_MUTEX(un))); 18357 ASSERT(bp != NULL); 18358 ASSERT(xp != NULL); 18359 ASSERT(pktp != NULL); 18360 18361 /* 18362 * Do not reset if we got a parity error, or if 18363 * selection did not complete. 18364 */ 18365 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18366 /* Note: Should this not just check the bit for pkt_state? */ 18367 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18368 (pktp->pkt_state != STATE_GOT_BUS)) { 18369 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18370 sd_reset_target(un, pktp); 18371 } 18372 18373 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18374 18375 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18376 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18377 } 18378 18379 18380 18381 /* 18382 * Function: sd_pkt_reason_cmd_reset 18383 * 18384 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18385 * 18386 * Context: May be called from interrupt context 18387 */ 18388 18389 static void 18390 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18391 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18392 { 18393 ASSERT(un != NULL); 18394 ASSERT(mutex_owned(SD_MUTEX(un))); 18395 ASSERT(bp != NULL); 18396 ASSERT(xp != NULL); 18397 ASSERT(pktp != NULL); 18398 18399 /* The target may still be running the command, so try to reset. */ 18400 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18401 sd_reset_target(un, pktp); 18402 18403 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18404 18405 /* 18406 * If pkt_reason is CMD_RESET chances are that this pkt got 18407 * reset because another target on this bus caused it. The target 18408 * that caused it should get CMD_TIMEOUT with pkt_statistics 18409 * of STAT_TIMEOUT/STAT_DEV_RESET. 18410 */ 18411 18412 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18413 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18414 } 18415 18416 18417 18418 18419 /* 18420 * Function: sd_pkt_reason_cmd_aborted 18421 * 18422 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18423 * 18424 * Context: May be called from interrupt context 18425 */ 18426 18427 static void 18428 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18429 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18430 { 18431 ASSERT(un != NULL); 18432 ASSERT(mutex_owned(SD_MUTEX(un))); 18433 ASSERT(bp != NULL); 18434 ASSERT(xp != NULL); 18435 ASSERT(pktp != NULL); 18436 18437 /* The target may still be running the command, so try to reset. */ 18438 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18439 sd_reset_target(un, pktp); 18440 18441 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18442 18443 /* 18444 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18445 * aborted because another target on this bus caused it. The target 18446 * that caused it should get CMD_TIMEOUT with pkt_statistics 18447 * of STAT_TIMEOUT/STAT_DEV_RESET. 18448 */ 18449 18450 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18451 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18452 } 18453 18454 18455 18456 /* 18457 * Function: sd_pkt_reason_cmd_timeout 18458 * 18459 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18460 * 18461 * Context: May be called from interrupt context 18462 */ 18463 18464 static void 18465 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18466 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18467 { 18468 ASSERT(un != NULL); 18469 ASSERT(mutex_owned(SD_MUTEX(un))); 18470 ASSERT(bp != NULL); 18471 ASSERT(xp != NULL); 18472 ASSERT(pktp != NULL); 18473 18474 18475 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18476 sd_reset_target(un, pktp); 18477 18478 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18479 18480 /* 18481 * A command timeout indicates that we could not establish 18482 * communication with the target, so set SD_RETRIES_FAILFAST 18483 * as further retries/commands are likely to take a long time. 18484 */ 18485 sd_retry_command(un, bp, 18486 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18487 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18488 } 18489 18490 18491 18492 /* 18493 * Function: sd_pkt_reason_cmd_unx_bus_free 18494 * 18495 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18496 * 18497 * Context: May be called from interrupt context 18498 */ 18499 18500 static void 18501 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18502 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18503 { 18504 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18505 18506 ASSERT(un != NULL); 18507 ASSERT(mutex_owned(SD_MUTEX(un))); 18508 ASSERT(bp != NULL); 18509 ASSERT(xp != NULL); 18510 ASSERT(pktp != NULL); 18511 18512 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18513 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18514 18515 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18516 sd_print_retry_msg : NULL; 18517 18518 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18519 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18520 } 18521 18522 18523 /* 18524 * Function: sd_pkt_reason_cmd_tag_reject 18525 * 18526 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18527 * 18528 * Context: May be called from interrupt context 18529 */ 18530 18531 static void 18532 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18533 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18534 { 18535 ASSERT(un != NULL); 18536 ASSERT(mutex_owned(SD_MUTEX(un))); 18537 ASSERT(bp != NULL); 18538 ASSERT(xp != NULL); 18539 ASSERT(pktp != NULL); 18540 18541 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18542 pktp->pkt_flags = 0; 18543 un->un_tagflags = 0; 18544 if (un->un_f_opt_queueing == TRUE) { 18545 un->un_throttle = min(un->un_throttle, 3); 18546 } else { 18547 un->un_throttle = 1; 18548 } 18549 mutex_exit(SD_MUTEX(un)); 18550 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18551 mutex_enter(SD_MUTEX(un)); 18552 18553 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18554 18555 /* Legacy behavior not to check retry counts here. */ 18556 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18557 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18558 } 18559 18560 18561 /* 18562 * Function: sd_pkt_reason_default 18563 * 18564 * Description: Default recovery actions for SCSA pkt_reason values that 18565 * do not have more explicit recovery actions. 18566 * 18567 * Context: May be called from interrupt context 18568 */ 18569 18570 static void 18571 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18572 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18573 { 18574 ASSERT(un != NULL); 18575 ASSERT(mutex_owned(SD_MUTEX(un))); 18576 ASSERT(bp != NULL); 18577 ASSERT(xp != NULL); 18578 ASSERT(pktp != NULL); 18579 18580 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18581 sd_reset_target(un, pktp); 18582 18583 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18584 18585 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18586 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18587 } 18588 18589 18590 18591 /* 18592 * Function: sd_pkt_status_check_condition 18593 * 18594 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18595 * 18596 * Context: May be called from interrupt context 18597 */ 18598 18599 static void 18600 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18601 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18602 { 18603 ASSERT(un != NULL); 18604 ASSERT(mutex_owned(SD_MUTEX(un))); 18605 ASSERT(bp != NULL); 18606 ASSERT(xp != NULL); 18607 ASSERT(pktp != NULL); 18608 18609 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18610 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18611 18612 /* 18613 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18614 * command will be retried after the request sense). Otherwise, retry 18615 * the command. Note: we are issuing the request sense even though the 18616 * retry limit may have been reached for the failed command. 18617 */ 18618 if (un->un_f_arq_enabled == FALSE) { 18619 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18620 "no ARQ, sending request sense command\n"); 18621 sd_send_request_sense_command(un, bp, pktp); 18622 } else { 18623 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18624 "ARQ,retrying request sense command\n"); 18625 #if defined(__i386) || defined(__amd64) 18626 /* 18627 * The SD_RETRY_DELAY value need to be adjusted here 18628 * when SD_RETRY_DELAY change in sddef.h 18629 */ 18630 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18631 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18632 NULL); 18633 #else 18634 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18635 EIO, SD_RETRY_DELAY, NULL); 18636 #endif 18637 } 18638 18639 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18640 } 18641 18642 18643 /* 18644 * Function: sd_pkt_status_busy 18645 * 18646 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18647 * 18648 * Context: May be called from interrupt context 18649 */ 18650 18651 static void 18652 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18653 struct scsi_pkt *pktp) 18654 { 18655 ASSERT(un != NULL); 18656 ASSERT(mutex_owned(SD_MUTEX(un))); 18657 ASSERT(bp != NULL); 18658 ASSERT(xp != NULL); 18659 ASSERT(pktp != NULL); 18660 18661 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18662 "sd_pkt_status_busy: entry\n"); 18663 18664 /* If retries are exhausted, just fail the command. */ 18665 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18666 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18667 "device busy too long\n"); 18668 sd_return_failed_command(un, bp, EIO); 18669 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18670 "sd_pkt_status_busy: exit\n"); 18671 return; 18672 } 18673 xp->xb_retry_count++; 18674 18675 /* 18676 * Try to reset the target. However, we do not want to perform 18677 * more than one reset if the device continues to fail. The reset 18678 * will be performed when the retry count reaches the reset 18679 * threshold. This threshold should be set such that at least 18680 * one retry is issued before the reset is performed. 18681 */ 18682 if (xp->xb_retry_count == 18683 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18684 int rval = 0; 18685 mutex_exit(SD_MUTEX(un)); 18686 if (un->un_f_allow_bus_device_reset == TRUE) { 18687 /* 18688 * First try to reset the LUN; if we cannot then 18689 * try to reset the target. 18690 */ 18691 if (un->un_f_lun_reset_enabled == TRUE) { 18692 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18693 "sd_pkt_status_busy: RESET_LUN\n"); 18694 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18695 } 18696 if (rval == 0) { 18697 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18698 "sd_pkt_status_busy: RESET_TARGET\n"); 18699 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18700 } 18701 } 18702 if (rval == 0) { 18703 /* 18704 * If the RESET_LUN and/or RESET_TARGET failed, 18705 * try RESET_ALL 18706 */ 18707 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18708 "sd_pkt_status_busy: RESET_ALL\n"); 18709 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18710 } 18711 mutex_enter(SD_MUTEX(un)); 18712 if (rval == 0) { 18713 /* 18714 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18715 * At this point we give up & fail the command. 18716 */ 18717 sd_return_failed_command(un, bp, EIO); 18718 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18719 "sd_pkt_status_busy: exit (failed cmd)\n"); 18720 return; 18721 } 18722 } 18723 18724 /* 18725 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18726 * we have already checked the retry counts above. 18727 */ 18728 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18729 EIO, un->un_busy_timeout, NULL); 18730 18731 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18732 "sd_pkt_status_busy: exit\n"); 18733 } 18734 18735 18736 /* 18737 * Function: sd_pkt_status_reservation_conflict 18738 * 18739 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18740 * command status. 18741 * 18742 * Context: May be called from interrupt context 18743 */ 18744 18745 static void 18746 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18747 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18748 { 18749 ASSERT(un != NULL); 18750 ASSERT(mutex_owned(SD_MUTEX(un))); 18751 ASSERT(bp != NULL); 18752 ASSERT(xp != NULL); 18753 ASSERT(pktp != NULL); 18754 18755 /* 18756 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18757 * conflict could be due to various reasons like incorrect keys, not 18758 * registered or not reserved etc. So, we return EACCES to the caller. 18759 */ 18760 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18761 int cmd = SD_GET_PKT_OPCODE(pktp); 18762 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18763 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18764 sd_return_failed_command(un, bp, EACCES); 18765 return; 18766 } 18767 } 18768 18769 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18770 18771 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18772 if (sd_failfast_enable != 0) { 18773 /* By definition, we must panic here.... */ 18774 sd_panic_for_res_conflict(un); 18775 /*NOTREACHED*/ 18776 } 18777 SD_ERROR(SD_LOG_IO, un, 18778 "sd_handle_resv_conflict: Disk Reserved\n"); 18779 sd_return_failed_command(un, bp, EACCES); 18780 return; 18781 } 18782 18783 /* 18784 * 1147670: retry only if sd_retry_on_reservation_conflict 18785 * property is set (default is 1). Retries will not succeed 18786 * on a disk reserved by another initiator. HA systems 18787 * may reset this via sd.conf to avoid these retries. 18788 * 18789 * Note: The legacy return code for this failure is EIO, however EACCES 18790 * seems more appropriate for a reservation conflict. 18791 */ 18792 if (sd_retry_on_reservation_conflict == 0) { 18793 SD_ERROR(SD_LOG_IO, un, 18794 "sd_handle_resv_conflict: Device Reserved\n"); 18795 sd_return_failed_command(un, bp, EIO); 18796 return; 18797 } 18798 18799 /* 18800 * Retry the command if we can. 18801 * 18802 * Note: The legacy return code for this failure is EIO, however EACCES 18803 * seems more appropriate for a reservation conflict. 18804 */ 18805 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18806 (clock_t)2, NULL); 18807 } 18808 18809 18810 18811 /* 18812 * Function: sd_pkt_status_qfull 18813 * 18814 * Description: Handle a QUEUE FULL condition from the target. This can 18815 * occur if the HBA does not handle the queue full condition. 18816 * (Basically this means third-party HBAs as Sun HBAs will 18817 * handle the queue full condition.) Note that if there are 18818 * some commands already in the transport, then the queue full 18819 * has occurred because the queue for this nexus is actually 18820 * full. If there are no commands in the transport, then the 18821 * queue full is resulting from some other initiator or lun 18822 * consuming all the resources at the target. 18823 * 18824 * Context: May be called from interrupt context 18825 */ 18826 18827 static void 18828 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18829 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18830 { 18831 ASSERT(un != NULL); 18832 ASSERT(mutex_owned(SD_MUTEX(un))); 18833 ASSERT(bp != NULL); 18834 ASSERT(xp != NULL); 18835 ASSERT(pktp != NULL); 18836 18837 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18838 "sd_pkt_status_qfull: entry\n"); 18839 18840 /* 18841 * Just lower the QFULL throttle and retry the command. Note that 18842 * we do not limit the number of retries here. 18843 */ 18844 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18845 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18846 SD_RESTART_TIMEOUT, NULL); 18847 18848 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18849 "sd_pkt_status_qfull: exit\n"); 18850 } 18851 18852 18853 /* 18854 * Function: sd_reset_target 18855 * 18856 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18857 * RESET_TARGET, or RESET_ALL. 18858 * 18859 * Context: May be called under interrupt context. 18860 */ 18861 18862 static void 18863 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18864 { 18865 int rval = 0; 18866 18867 ASSERT(un != NULL); 18868 ASSERT(mutex_owned(SD_MUTEX(un))); 18869 ASSERT(pktp != NULL); 18870 18871 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18872 18873 /* 18874 * No need to reset if the transport layer has already done so. 18875 */ 18876 if ((pktp->pkt_statistics & 18877 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18878 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18879 "sd_reset_target: no reset\n"); 18880 return; 18881 } 18882 18883 mutex_exit(SD_MUTEX(un)); 18884 18885 if (un->un_f_allow_bus_device_reset == TRUE) { 18886 if (un->un_f_lun_reset_enabled == TRUE) { 18887 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18888 "sd_reset_target: RESET_LUN\n"); 18889 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18890 } 18891 if (rval == 0) { 18892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18893 "sd_reset_target: RESET_TARGET\n"); 18894 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18895 } 18896 } 18897 18898 if (rval == 0) { 18899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18900 "sd_reset_target: RESET_ALL\n"); 18901 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18902 } 18903 18904 mutex_enter(SD_MUTEX(un)); 18905 18906 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18907 } 18908 18909 /* 18910 * Function: sd_target_change_task 18911 * 18912 * Description: Handle dynamic target change 18913 * 18914 * Context: Executes in a taskq() thread context 18915 */ 18916 static void 18917 sd_target_change_task(void *arg) 18918 { 18919 struct sd_lun *un = arg; 18920 uint64_t capacity; 18921 diskaddr_t label_cap; 18922 uint_t lbasize; 18923 sd_ssc_t *ssc; 18924 18925 ASSERT(un != NULL); 18926 ASSERT(!mutex_owned(SD_MUTEX(un))); 18927 18928 if ((un->un_f_blockcount_is_valid == FALSE) || 18929 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18930 return; 18931 } 18932 18933 ssc = sd_ssc_init(un); 18934 18935 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18936 &lbasize, SD_PATH_DIRECT) != 0) { 18937 SD_ERROR(SD_LOG_ERROR, un, 18938 "sd_target_change_task: fail to read capacity\n"); 18939 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18940 goto task_exit; 18941 } 18942 18943 mutex_enter(SD_MUTEX(un)); 18944 if (capacity <= un->un_blockcount) { 18945 mutex_exit(SD_MUTEX(un)); 18946 goto task_exit; 18947 } 18948 18949 sd_update_block_info(un, lbasize, capacity); 18950 mutex_exit(SD_MUTEX(un)); 18951 18952 /* 18953 * If lun is EFI labeled and lun capacity is greater than the 18954 * capacity contained in the label, log a sys event. 18955 */ 18956 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18957 (void*)SD_PATH_DIRECT) == 0) { 18958 mutex_enter(SD_MUTEX(un)); 18959 if (un->un_f_blockcount_is_valid && 18960 un->un_blockcount > label_cap) { 18961 mutex_exit(SD_MUTEX(un)); 18962 sd_log_lun_expansion_event(un, KM_SLEEP); 18963 } else { 18964 mutex_exit(SD_MUTEX(un)); 18965 } 18966 } 18967 18968 task_exit: 18969 sd_ssc_fini(ssc); 18970 } 18971 18972 /* 18973 * Function: sd_log_lun_expansion_event 18974 * 18975 * Description: Log lun expansion sys event 18976 * 18977 * Context: Never called from interrupt context 18978 */ 18979 static void 18980 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18981 { 18982 int err; 18983 char *path; 18984 nvlist_t *dle_attr_list; 18985 18986 /* Allocate and build sysevent attribute list */ 18987 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 18988 if (err != 0) { 18989 SD_ERROR(SD_LOG_ERROR, un, 18990 "sd_log_lun_expansion_event: fail to allocate space\n"); 18991 return; 18992 } 18993 18994 path = kmem_alloc(MAXPATHLEN, km_flag); 18995 if (path == NULL) { 18996 nvlist_free(dle_attr_list); 18997 SD_ERROR(SD_LOG_ERROR, un, 18998 "sd_log_lun_expansion_event: fail to allocate space\n"); 18999 return; 19000 } 19001 /* 19002 * Add path attribute to identify the lun. 19003 * We are using minor node 'a' as the sysevent attribute. 19004 */ 19005 (void) snprintf(path, MAXPATHLEN, "/devices"); 19006 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19007 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19008 ":a"); 19009 19010 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 19011 if (err != 0) { 19012 nvlist_free(dle_attr_list); 19013 kmem_free(path, MAXPATHLEN); 19014 SD_ERROR(SD_LOG_ERROR, un, 19015 "sd_log_lun_expansion_event: fail to add attribute\n"); 19016 return; 19017 } 19018 19019 /* Log dynamic lun expansion sysevent */ 19020 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19021 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 19022 if (err != DDI_SUCCESS) { 19023 SD_ERROR(SD_LOG_ERROR, un, 19024 "sd_log_lun_expansion_event: fail to log sysevent\n"); 19025 } 19026 19027 nvlist_free(dle_attr_list); 19028 kmem_free(path, MAXPATHLEN); 19029 } 19030 19031 /* 19032 * Function: sd_media_change_task 19033 * 19034 * Description: Recovery action for CDROM to become available. 19035 * 19036 * Context: Executes in a taskq() thread context 19037 */ 19038 19039 static void 19040 sd_media_change_task(void *arg) 19041 { 19042 struct scsi_pkt *pktp = arg; 19043 struct sd_lun *un; 19044 struct buf *bp; 19045 struct sd_xbuf *xp; 19046 int err = 0; 19047 int retry_count = 0; 19048 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19049 struct sd_sense_info si; 19050 19051 ASSERT(pktp != NULL); 19052 bp = (struct buf *)pktp->pkt_private; 19053 ASSERT(bp != NULL); 19054 xp = SD_GET_XBUF(bp); 19055 ASSERT(xp != NULL); 19056 un = SD_GET_UN(bp); 19057 ASSERT(un != NULL); 19058 ASSERT(!mutex_owned(SD_MUTEX(un))); 19059 ASSERT(un->un_f_monitor_media_state); 19060 19061 si.ssi_severity = SCSI_ERR_INFO; 19062 si.ssi_pfa_flag = FALSE; 19063 19064 /* 19065 * When a reset is issued on a CDROM, it takes a long time to 19066 * recover. First few attempts to read capacity and other things 19067 * related to handling unit attention fail (with a ASC 0x4 and 19068 * ASCQ 0x1). In that case we want to do enough retries and we want 19069 * to limit the retries in other cases of genuine failures like 19070 * no media in drive. 19071 */ 19072 while (retry_count++ < retry_limit) { 19073 if ((err = sd_handle_mchange(un)) == 0) { 19074 break; 19075 } 19076 if (err == EAGAIN) { 19077 retry_limit = SD_UNIT_ATTENTION_RETRY; 19078 } 19079 /* Sleep for 0.5 sec. & try again */ 19080 delay(drv_usectohz(500000)); 19081 } 19082 19083 /* 19084 * Dispatch (retry or fail) the original command here, 19085 * along with appropriate console messages.... 19086 * 19087 * Must grab the mutex before calling sd_retry_command, 19088 * sd_print_sense_msg and sd_return_failed_command. 19089 */ 19090 mutex_enter(SD_MUTEX(un)); 19091 if (err != SD_CMD_SUCCESS) { 19092 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19093 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19094 si.ssi_severity = SCSI_ERR_FATAL; 19095 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19096 sd_return_failed_command(un, bp, EIO); 19097 } else { 19098 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19099 &si, EIO, (clock_t)0, NULL); 19100 } 19101 mutex_exit(SD_MUTEX(un)); 19102 } 19103 19104 19105 19106 /* 19107 * Function: sd_handle_mchange 19108 * 19109 * Description: Perform geometry validation & other recovery when CDROM 19110 * has been removed from drive. 19111 * 19112 * Return Code: 0 for success 19113 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19114 * sd_send_scsi_READ_CAPACITY() 19115 * 19116 * Context: Executes in a taskq() thread context 19117 */ 19118 19119 static int 19120 sd_handle_mchange(struct sd_lun *un) 19121 { 19122 uint64_t capacity; 19123 uint32_t lbasize; 19124 int rval; 19125 sd_ssc_t *ssc; 19126 19127 ASSERT(!mutex_owned(SD_MUTEX(un))); 19128 ASSERT(un->un_f_monitor_media_state); 19129 19130 ssc = sd_ssc_init(un); 19131 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19132 SD_PATH_DIRECT_PRIORITY); 19133 19134 if (rval != 0) 19135 goto failed; 19136 19137 mutex_enter(SD_MUTEX(un)); 19138 sd_update_block_info(un, lbasize, capacity); 19139 19140 if (un->un_errstats != NULL) { 19141 struct sd_errstats *stp = 19142 (struct sd_errstats *)un->un_errstats->ks_data; 19143 stp->sd_capacity.value.ui64 = (uint64_t) 19144 ((uint64_t)un->un_blockcount * 19145 (uint64_t)un->un_tgt_blocksize); 19146 } 19147 19148 /* 19149 * Check if the media in the device is writable or not 19150 */ 19151 if (ISCD(un)) { 19152 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19153 } 19154 19155 /* 19156 * Note: Maybe let the strategy/partitioning chain worry about getting 19157 * valid geometry. 19158 */ 19159 mutex_exit(SD_MUTEX(un)); 19160 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19161 19162 19163 if (cmlb_validate(un->un_cmlbhandle, 0, 19164 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19165 sd_ssc_fini(ssc); 19166 return (EIO); 19167 } else { 19168 if (un->un_f_pkstats_enabled) { 19169 sd_set_pstats(un); 19170 SD_TRACE(SD_LOG_IO_PARTITION, un, 19171 "sd_handle_mchange: un:0x%p pstats created and " 19172 "set\n", un); 19173 } 19174 } 19175 19176 /* 19177 * Try to lock the door 19178 */ 19179 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19180 SD_PATH_DIRECT_PRIORITY); 19181 failed: 19182 if (rval != 0) 19183 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19184 sd_ssc_fini(ssc); 19185 return (rval); 19186 } 19187 19188 19189 /* 19190 * Function: sd_send_scsi_DOORLOCK 19191 * 19192 * Description: Issue the scsi DOOR LOCK command 19193 * 19194 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19195 * structure for this target. 19196 * flag - SD_REMOVAL_ALLOW 19197 * SD_REMOVAL_PREVENT 19198 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19199 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19200 * to use the USCSI "direct" chain and bypass the normal 19201 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19202 * command is issued as part of an error recovery action. 19203 * 19204 * Return Code: 0 - Success 19205 * errno return code from sd_ssc_send() 19206 * 19207 * Context: Can sleep. 19208 */ 19209 19210 static int 19211 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19212 { 19213 struct scsi_extended_sense sense_buf; 19214 union scsi_cdb cdb; 19215 struct uscsi_cmd ucmd_buf; 19216 int status; 19217 struct sd_lun *un; 19218 19219 ASSERT(ssc != NULL); 19220 un = ssc->ssc_un; 19221 ASSERT(un != NULL); 19222 ASSERT(!mutex_owned(SD_MUTEX(un))); 19223 19224 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19225 19226 /* already determined doorlock is not supported, fake success */ 19227 if (un->un_f_doorlock_supported == FALSE) { 19228 return (0); 19229 } 19230 19231 /* 19232 * If we are ejecting and see an SD_REMOVAL_PREVENT 19233 * ignore the command so we can complete the eject 19234 * operation. 19235 */ 19236 if (flag == SD_REMOVAL_PREVENT) { 19237 mutex_enter(SD_MUTEX(un)); 19238 if (un->un_f_ejecting == TRUE) { 19239 mutex_exit(SD_MUTEX(un)); 19240 return (EAGAIN); 19241 } 19242 mutex_exit(SD_MUTEX(un)); 19243 } 19244 19245 bzero(&cdb, sizeof (cdb)); 19246 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19247 19248 cdb.scc_cmd = SCMD_DOORLOCK; 19249 cdb.cdb_opaque[4] = (uchar_t)flag; 19250 19251 ucmd_buf.uscsi_cdb = (char *)&cdb; 19252 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19253 ucmd_buf.uscsi_bufaddr = NULL; 19254 ucmd_buf.uscsi_buflen = 0; 19255 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19256 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19257 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19258 ucmd_buf.uscsi_timeout = 15; 19259 19260 SD_TRACE(SD_LOG_IO, un, 19261 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19262 19263 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19264 UIO_SYSSPACE, path_flag); 19265 19266 if (status == 0) 19267 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19268 19269 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19270 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19271 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19272 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19273 19274 /* fake success and skip subsequent doorlock commands */ 19275 un->un_f_doorlock_supported = FALSE; 19276 return (0); 19277 } 19278 19279 return (status); 19280 } 19281 19282 /* 19283 * Function: sd_send_scsi_READ_CAPACITY 19284 * 19285 * Description: This routine uses the scsi READ CAPACITY command to determine 19286 * the device capacity in number of blocks and the device native 19287 * block size. If this function returns a failure, then the 19288 * values in *capp and *lbap are undefined. If the capacity 19289 * returned is 0xffffffff then the lun is too large for a 19290 * normal READ CAPACITY command and the results of a 19291 * READ CAPACITY 16 will be used instead. 19292 * 19293 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19294 * capp - ptr to unsigned 64-bit variable to receive the 19295 * capacity value from the command. 19296 * lbap - ptr to unsigned 32-bit varaible to receive the 19297 * block size value from the command 19298 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19299 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19300 * to use the USCSI "direct" chain and bypass the normal 19301 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19302 * command is issued as part of an error recovery action. 19303 * 19304 * Return Code: 0 - Success 19305 * EIO - IO error 19306 * EACCES - Reservation conflict detected 19307 * EAGAIN - Device is becoming ready 19308 * errno return code from sd_ssc_send() 19309 * 19310 * Context: Can sleep. Blocks until command completes. 19311 */ 19312 19313 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19314 19315 static int 19316 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19317 int path_flag) 19318 { 19319 struct scsi_extended_sense sense_buf; 19320 struct uscsi_cmd ucmd_buf; 19321 union scsi_cdb cdb; 19322 uint32_t *capacity_buf; 19323 uint64_t capacity; 19324 uint32_t lbasize; 19325 int status; 19326 struct sd_lun *un; 19327 19328 ASSERT(ssc != NULL); 19329 19330 un = ssc->ssc_un; 19331 ASSERT(un != NULL); 19332 ASSERT(!mutex_owned(SD_MUTEX(un))); 19333 ASSERT(capp != NULL); 19334 ASSERT(lbap != NULL); 19335 19336 SD_TRACE(SD_LOG_IO, un, 19337 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19338 19339 /* 19340 * First send a READ_CAPACITY command to the target. 19341 * (This command is mandatory under SCSI-2.) 19342 * 19343 * Set up the CDB for the READ_CAPACITY command. The Partial 19344 * Medium Indicator bit is cleared. The address field must be 19345 * zero if the PMI bit is zero. 19346 */ 19347 bzero(&cdb, sizeof (cdb)); 19348 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19349 19350 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19351 19352 cdb.scc_cmd = SCMD_READ_CAPACITY; 19353 19354 ucmd_buf.uscsi_cdb = (char *)&cdb; 19355 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19356 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19357 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19358 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19359 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19360 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19361 ucmd_buf.uscsi_timeout = 60; 19362 19363 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19364 UIO_SYSSPACE, path_flag); 19365 19366 switch (status) { 19367 case 0: 19368 /* Return failure if we did not get valid capacity data. */ 19369 if (ucmd_buf.uscsi_resid != 0) { 19370 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19371 "sd_send_scsi_READ_CAPACITY received invalid " 19372 "capacity data"); 19373 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19374 return (EIO); 19375 } 19376 /* 19377 * Read capacity and block size from the READ CAPACITY 10 data. 19378 * This data may be adjusted later due to device specific 19379 * issues. 19380 * 19381 * According to the SCSI spec, the READ CAPACITY 10 19382 * command returns the following: 19383 * 19384 * bytes 0-3: Maximum logical block address available. 19385 * (MSB in byte:0 & LSB in byte:3) 19386 * 19387 * bytes 4-7: Block length in bytes 19388 * (MSB in byte:4 & LSB in byte:7) 19389 * 19390 */ 19391 capacity = BE_32(capacity_buf[0]); 19392 lbasize = BE_32(capacity_buf[1]); 19393 19394 /* 19395 * Done with capacity_buf 19396 */ 19397 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19398 19399 /* 19400 * if the reported capacity is set to all 0xf's, then 19401 * this disk is too large and requires SBC-2 commands. 19402 * Reissue the request using READ CAPACITY 16. 19403 */ 19404 if (capacity == 0xffffffff) { 19405 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19406 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19407 &lbasize, path_flag); 19408 if (status != 0) { 19409 return (status); 19410 } 19411 } 19412 break; /* Success! */ 19413 case EIO: 19414 switch (ucmd_buf.uscsi_status) { 19415 case STATUS_RESERVATION_CONFLICT: 19416 status = EACCES; 19417 break; 19418 case STATUS_CHECK: 19419 /* 19420 * Check condition; look for ASC/ASCQ of 0x04/0x01 19421 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19422 */ 19423 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19424 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19425 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19426 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19427 return (EAGAIN); 19428 } 19429 break; 19430 default: 19431 break; 19432 } 19433 /* FALLTHRU */ 19434 default: 19435 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19436 return (status); 19437 } 19438 19439 /* 19440 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19441 * (2352 and 0 are common) so for these devices always force the value 19442 * to 2048 as required by the ATAPI specs. 19443 */ 19444 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19445 lbasize = 2048; 19446 } 19447 19448 /* 19449 * Get the maximum LBA value from the READ CAPACITY data. 19450 * Here we assume that the Partial Medium Indicator (PMI) bit 19451 * was cleared when issuing the command. This means that the LBA 19452 * returned from the device is the LBA of the last logical block 19453 * on the logical unit. The actual logical block count will be 19454 * this value plus one. 19455 * 19456 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19457 * so scale the capacity value to reflect this. 19458 */ 19459 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19460 19461 /* 19462 * Copy the values from the READ CAPACITY command into the space 19463 * provided by the caller. 19464 */ 19465 *capp = capacity; 19466 *lbap = lbasize; 19467 19468 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19469 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19470 19471 /* 19472 * Both the lbasize and capacity from the device must be nonzero, 19473 * otherwise we assume that the values are not valid and return 19474 * failure to the caller. (4203735) 19475 */ 19476 if ((capacity == 0) || (lbasize == 0)) { 19477 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19478 "sd_send_scsi_READ_CAPACITY received invalid value " 19479 "capacity %llu lbasize %d", capacity, lbasize); 19480 return (EIO); 19481 } 19482 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19483 return (0); 19484 } 19485 19486 /* 19487 * Function: sd_send_scsi_READ_CAPACITY_16 19488 * 19489 * Description: This routine uses the scsi READ CAPACITY 16 command to 19490 * determine the device capacity in number of blocks and the 19491 * device native block size. If this function returns a failure, 19492 * then the values in *capp and *lbap are undefined. 19493 * This routine should always be called by 19494 * sd_send_scsi_READ_CAPACITY which will appy any device 19495 * specific adjustments to capacity and lbasize. 19496 * 19497 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19498 * capp - ptr to unsigned 64-bit variable to receive the 19499 * capacity value from the command. 19500 * lbap - ptr to unsigned 32-bit varaible to receive the 19501 * block size value from the command 19502 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19503 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19504 * to use the USCSI "direct" chain and bypass the normal 19505 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19506 * this command is issued as part of an error recovery 19507 * action. 19508 * 19509 * Return Code: 0 - Success 19510 * EIO - IO error 19511 * EACCES - Reservation conflict detected 19512 * EAGAIN - Device is becoming ready 19513 * errno return code from sd_ssc_send() 19514 * 19515 * Context: Can sleep. Blocks until command completes. 19516 */ 19517 19518 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19519 19520 static int 19521 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19522 uint32_t *lbap, int path_flag) 19523 { 19524 struct scsi_extended_sense sense_buf; 19525 struct uscsi_cmd ucmd_buf; 19526 union scsi_cdb cdb; 19527 uint64_t *capacity16_buf; 19528 uint64_t capacity; 19529 uint32_t lbasize; 19530 int status; 19531 struct sd_lun *un; 19532 19533 ASSERT(ssc != NULL); 19534 19535 un = ssc->ssc_un; 19536 ASSERT(un != NULL); 19537 ASSERT(!mutex_owned(SD_MUTEX(un))); 19538 ASSERT(capp != NULL); 19539 ASSERT(lbap != NULL); 19540 19541 SD_TRACE(SD_LOG_IO, un, 19542 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19543 19544 /* 19545 * First send a READ_CAPACITY_16 command to the target. 19546 * 19547 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19548 * Medium Indicator bit is cleared. The address field must be 19549 * zero if the PMI bit is zero. 19550 */ 19551 bzero(&cdb, sizeof (cdb)); 19552 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19553 19554 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19555 19556 ucmd_buf.uscsi_cdb = (char *)&cdb; 19557 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19558 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19559 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19560 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19561 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19562 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19563 ucmd_buf.uscsi_timeout = 60; 19564 19565 /* 19566 * Read Capacity (16) is a Service Action In command. One 19567 * command byte (0x9E) is overloaded for multiple operations, 19568 * with the second CDB byte specifying the desired operation 19569 */ 19570 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19571 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19572 19573 /* 19574 * Fill in allocation length field 19575 */ 19576 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19577 19578 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19579 UIO_SYSSPACE, path_flag); 19580 19581 switch (status) { 19582 case 0: 19583 /* Return failure if we did not get valid capacity data. */ 19584 if (ucmd_buf.uscsi_resid > 20) { 19585 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19586 "sd_send_scsi_READ_CAPACITY_16 received invalid " 19587 "capacity data"); 19588 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19589 return (EIO); 19590 } 19591 19592 /* 19593 * Read capacity and block size from the READ CAPACITY 10 data. 19594 * This data may be adjusted later due to device specific 19595 * issues. 19596 * 19597 * According to the SCSI spec, the READ CAPACITY 10 19598 * command returns the following: 19599 * 19600 * bytes 0-7: Maximum logical block address available. 19601 * (MSB in byte:0 & LSB in byte:7) 19602 * 19603 * bytes 8-11: Block length in bytes 19604 * (MSB in byte:8 & LSB in byte:11) 19605 * 19606 */ 19607 capacity = BE_64(capacity16_buf[0]); 19608 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19609 19610 /* 19611 * Done with capacity16_buf 19612 */ 19613 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19614 19615 /* 19616 * if the reported capacity is set to all 0xf's, then 19617 * this disk is too large. This could only happen with 19618 * a device that supports LBAs larger than 64 bits which 19619 * are not defined by any current T10 standards. 19620 */ 19621 if (capacity == 0xffffffffffffffff) { 19622 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19623 "disk is too large"); 19624 return (EIO); 19625 } 19626 break; /* Success! */ 19627 case EIO: 19628 switch (ucmd_buf.uscsi_status) { 19629 case STATUS_RESERVATION_CONFLICT: 19630 status = EACCES; 19631 break; 19632 case STATUS_CHECK: 19633 /* 19634 * Check condition; look for ASC/ASCQ of 0x04/0x01 19635 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19636 */ 19637 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19638 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19639 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19640 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19641 return (EAGAIN); 19642 } 19643 break; 19644 default: 19645 break; 19646 } 19647 /* FALLTHRU */ 19648 default: 19649 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19650 return (status); 19651 } 19652 19653 *capp = capacity; 19654 *lbap = lbasize; 19655 19656 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19657 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19658 19659 return (0); 19660 } 19661 19662 19663 /* 19664 * Function: sd_send_scsi_START_STOP_UNIT 19665 * 19666 * Description: Issue a scsi START STOP UNIT command to the target. 19667 * 19668 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19669 * structure for this target. 19670 * flag - SD_TARGET_START 19671 * SD_TARGET_STOP 19672 * SD_TARGET_EJECT 19673 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19674 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19675 * to use the USCSI "direct" chain and bypass the normal 19676 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19677 * command is issued as part of an error recovery action. 19678 * 19679 * Return Code: 0 - Success 19680 * EIO - IO error 19681 * EACCES - Reservation conflict detected 19682 * ENXIO - Not Ready, medium not present 19683 * errno return code from sd_ssc_send() 19684 * 19685 * Context: Can sleep. 19686 */ 19687 19688 static int 19689 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19690 { 19691 struct scsi_extended_sense sense_buf; 19692 union scsi_cdb cdb; 19693 struct uscsi_cmd ucmd_buf; 19694 int status; 19695 struct sd_lun *un; 19696 19697 ASSERT(ssc != NULL); 19698 un = ssc->ssc_un; 19699 ASSERT(un != NULL); 19700 ASSERT(!mutex_owned(SD_MUTEX(un))); 19701 19702 SD_TRACE(SD_LOG_IO, un, 19703 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19704 19705 if (un->un_f_check_start_stop && 19706 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19707 (un->un_f_start_stop_supported != TRUE)) { 19708 return (0); 19709 } 19710 19711 /* 19712 * If we are performing an eject operation and 19713 * we receive any command other than SD_TARGET_EJECT 19714 * we should immediately return. 19715 */ 19716 if (flag != SD_TARGET_EJECT) { 19717 mutex_enter(SD_MUTEX(un)); 19718 if (un->un_f_ejecting == TRUE) { 19719 mutex_exit(SD_MUTEX(un)); 19720 return (EAGAIN); 19721 } 19722 mutex_exit(SD_MUTEX(un)); 19723 } 19724 19725 bzero(&cdb, sizeof (cdb)); 19726 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19727 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19728 19729 cdb.scc_cmd = SCMD_START_STOP; 19730 cdb.cdb_opaque[4] = (uchar_t)flag; 19731 19732 ucmd_buf.uscsi_cdb = (char *)&cdb; 19733 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19734 ucmd_buf.uscsi_bufaddr = NULL; 19735 ucmd_buf.uscsi_buflen = 0; 19736 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19737 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19738 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19739 ucmd_buf.uscsi_timeout = 200; 19740 19741 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19742 UIO_SYSSPACE, path_flag); 19743 19744 switch (status) { 19745 case 0: 19746 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19747 break; /* Success! */ 19748 case EIO: 19749 switch (ucmd_buf.uscsi_status) { 19750 case STATUS_RESERVATION_CONFLICT: 19751 status = EACCES; 19752 break; 19753 case STATUS_CHECK: 19754 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19755 switch (scsi_sense_key( 19756 (uint8_t *)&sense_buf)) { 19757 case KEY_ILLEGAL_REQUEST: 19758 status = ENOTSUP; 19759 break; 19760 case KEY_NOT_READY: 19761 if (scsi_sense_asc( 19762 (uint8_t *)&sense_buf) 19763 == 0x3A) { 19764 status = ENXIO; 19765 } 19766 break; 19767 default: 19768 break; 19769 } 19770 } 19771 break; 19772 default: 19773 break; 19774 } 19775 break; 19776 default: 19777 break; 19778 } 19779 19780 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19781 19782 return (status); 19783 } 19784 19785 19786 /* 19787 * Function: sd_start_stop_unit_callback 19788 * 19789 * Description: timeout(9F) callback to begin recovery process for a 19790 * device that has spun down. 19791 * 19792 * Arguments: arg - pointer to associated softstate struct. 19793 * 19794 * Context: Executes in a timeout(9F) thread context 19795 */ 19796 19797 static void 19798 sd_start_stop_unit_callback(void *arg) 19799 { 19800 struct sd_lun *un = arg; 19801 ASSERT(un != NULL); 19802 ASSERT(!mutex_owned(SD_MUTEX(un))); 19803 19804 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19805 19806 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19807 } 19808 19809 19810 /* 19811 * Function: sd_start_stop_unit_task 19812 * 19813 * Description: Recovery procedure when a drive is spun down. 19814 * 19815 * Arguments: arg - pointer to associated softstate struct. 19816 * 19817 * Context: Executes in a taskq() thread context 19818 */ 19819 19820 static void 19821 sd_start_stop_unit_task(void *arg) 19822 { 19823 struct sd_lun *un = arg; 19824 sd_ssc_t *ssc; 19825 int rval; 19826 19827 ASSERT(un != NULL); 19828 ASSERT(!mutex_owned(SD_MUTEX(un))); 19829 19830 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19831 19832 /* 19833 * Some unformatted drives report not ready error, no need to 19834 * restart if format has been initiated. 19835 */ 19836 mutex_enter(SD_MUTEX(un)); 19837 if (un->un_f_format_in_progress == TRUE) { 19838 mutex_exit(SD_MUTEX(un)); 19839 return; 19840 } 19841 mutex_exit(SD_MUTEX(un)); 19842 19843 /* 19844 * When a START STOP command is issued from here, it is part of a 19845 * failure recovery operation and must be issued before any other 19846 * commands, including any pending retries. Thus it must be sent 19847 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19848 * succeeds or not, we will start I/O after the attempt. 19849 */ 19850 ssc = sd_ssc_init(un); 19851 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19852 SD_PATH_DIRECT_PRIORITY); 19853 if (rval != 0) 19854 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19855 sd_ssc_fini(ssc); 19856 /* 19857 * The above call blocks until the START_STOP_UNIT command completes. 19858 * Now that it has completed, we must re-try the original IO that 19859 * received the NOT READY condition in the first place. There are 19860 * three possible conditions here: 19861 * 19862 * (1) The original IO is on un_retry_bp. 19863 * (2) The original IO is on the regular wait queue, and un_retry_bp 19864 * is NULL. 19865 * (3) The original IO is on the regular wait queue, and un_retry_bp 19866 * points to some other, unrelated bp. 19867 * 19868 * For each case, we must call sd_start_cmds() with un_retry_bp 19869 * as the argument. If un_retry_bp is NULL, this will initiate 19870 * processing of the regular wait queue. If un_retry_bp is not NULL, 19871 * then this will process the bp on un_retry_bp. That may or may not 19872 * be the original IO, but that does not matter: the important thing 19873 * is to keep the IO processing going at this point. 19874 * 19875 * Note: This is a very specific error recovery sequence associated 19876 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19877 * serialize the I/O with completion of the spin-up. 19878 */ 19879 mutex_enter(SD_MUTEX(un)); 19880 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19881 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19882 un, un->un_retry_bp); 19883 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19884 sd_start_cmds(un, un->un_retry_bp); 19885 mutex_exit(SD_MUTEX(un)); 19886 19887 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19888 } 19889 19890 19891 /* 19892 * Function: sd_send_scsi_INQUIRY 19893 * 19894 * Description: Issue the scsi INQUIRY command. 19895 * 19896 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19897 * structure for this target. 19898 * bufaddr 19899 * buflen 19900 * evpd 19901 * page_code 19902 * page_length 19903 * 19904 * Return Code: 0 - Success 19905 * errno return code from sd_ssc_send() 19906 * 19907 * Context: Can sleep. Does not return until command is completed. 19908 */ 19909 19910 static int 19911 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19912 uchar_t evpd, uchar_t page_code, size_t *residp) 19913 { 19914 union scsi_cdb cdb; 19915 struct uscsi_cmd ucmd_buf; 19916 int status; 19917 struct sd_lun *un; 19918 19919 ASSERT(ssc != NULL); 19920 un = ssc->ssc_un; 19921 ASSERT(un != NULL); 19922 ASSERT(!mutex_owned(SD_MUTEX(un))); 19923 ASSERT(bufaddr != NULL); 19924 19925 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19926 19927 bzero(&cdb, sizeof (cdb)); 19928 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19929 bzero(bufaddr, buflen); 19930 19931 cdb.scc_cmd = SCMD_INQUIRY; 19932 cdb.cdb_opaque[1] = evpd; 19933 cdb.cdb_opaque[2] = page_code; 19934 FORMG0COUNT(&cdb, buflen); 19935 19936 ucmd_buf.uscsi_cdb = (char *)&cdb; 19937 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19938 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19939 ucmd_buf.uscsi_buflen = buflen; 19940 ucmd_buf.uscsi_rqbuf = NULL; 19941 ucmd_buf.uscsi_rqlen = 0; 19942 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19943 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19944 19945 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19946 UIO_SYSSPACE, SD_PATH_DIRECT); 19947 19948 /* 19949 * Only handle status == 0, the upper-level caller 19950 * will put different assessment based on the context. 19951 */ 19952 if (status == 0) 19953 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19954 19955 if ((status == 0) && (residp != NULL)) { 19956 *residp = ucmd_buf.uscsi_resid; 19957 } 19958 19959 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19960 19961 return (status); 19962 } 19963 19964 19965 /* 19966 * Function: sd_send_scsi_TEST_UNIT_READY 19967 * 19968 * Description: Issue the scsi TEST UNIT READY command. 19969 * This routine can be told to set the flag USCSI_DIAGNOSE to 19970 * prevent retrying failed commands. Use this when the intent 19971 * is either to check for device readiness, to clear a Unit 19972 * Attention, or to clear any outstanding sense data. 19973 * However under specific conditions the expected behavior 19974 * is for retries to bring a device ready, so use the flag 19975 * with caution. 19976 * 19977 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19978 * structure for this target. 19979 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19980 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19981 * 0: dont check for media present, do retries on cmd. 19982 * 19983 * Return Code: 0 - Success 19984 * EIO - IO error 19985 * EACCES - Reservation conflict detected 19986 * ENXIO - Not Ready, medium not present 19987 * errno return code from sd_ssc_send() 19988 * 19989 * Context: Can sleep. Does not return until command is completed. 19990 */ 19991 19992 static int 19993 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 19994 { 19995 struct scsi_extended_sense sense_buf; 19996 union scsi_cdb cdb; 19997 struct uscsi_cmd ucmd_buf; 19998 int status; 19999 struct sd_lun *un; 20000 20001 ASSERT(ssc != NULL); 20002 un = ssc->ssc_un; 20003 ASSERT(un != NULL); 20004 ASSERT(!mutex_owned(SD_MUTEX(un))); 20005 20006 SD_TRACE(SD_LOG_IO, un, 20007 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20008 20009 /* 20010 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20011 * timeouts when they receive a TUR and the queue is not empty. Check 20012 * the configuration flag set during attach (indicating the drive has 20013 * this firmware bug) and un_ncmds_in_transport before issuing the 20014 * TUR. If there are 20015 * pending commands return success, this is a bit arbitrary but is ok 20016 * for non-removables (i.e. the eliteI disks) and non-clustering 20017 * configurations. 20018 */ 20019 if (un->un_f_cfg_tur_check == TRUE) { 20020 mutex_enter(SD_MUTEX(un)); 20021 if (un->un_ncmds_in_transport != 0) { 20022 mutex_exit(SD_MUTEX(un)); 20023 return (0); 20024 } 20025 mutex_exit(SD_MUTEX(un)); 20026 } 20027 20028 bzero(&cdb, sizeof (cdb)); 20029 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20030 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20031 20032 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20033 20034 ucmd_buf.uscsi_cdb = (char *)&cdb; 20035 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20036 ucmd_buf.uscsi_bufaddr = NULL; 20037 ucmd_buf.uscsi_buflen = 0; 20038 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20039 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20040 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20041 20042 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20043 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20044 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20045 } 20046 ucmd_buf.uscsi_timeout = 60; 20047 20048 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20049 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20050 SD_PATH_STANDARD)); 20051 20052 switch (status) { 20053 case 0: 20054 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20055 break; /* Success! */ 20056 case EIO: 20057 switch (ucmd_buf.uscsi_status) { 20058 case STATUS_RESERVATION_CONFLICT: 20059 status = EACCES; 20060 break; 20061 case STATUS_CHECK: 20062 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20063 break; 20064 } 20065 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20066 (scsi_sense_key((uint8_t *)&sense_buf) == 20067 KEY_NOT_READY) && 20068 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20069 status = ENXIO; 20070 } 20071 break; 20072 default: 20073 break; 20074 } 20075 break; 20076 default: 20077 break; 20078 } 20079 20080 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20081 20082 return (status); 20083 } 20084 20085 /* 20086 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20087 * 20088 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20089 * 20090 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20091 * structure for this target. 20092 * 20093 * Return Code: 0 - Success 20094 * EACCES 20095 * ENOTSUP 20096 * errno return code from sd_ssc_send() 20097 * 20098 * Context: Can sleep. Does not return until command is completed. 20099 */ 20100 20101 static int 20102 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20103 uint16_t data_len, uchar_t *data_bufp) 20104 { 20105 struct scsi_extended_sense sense_buf; 20106 union scsi_cdb cdb; 20107 struct uscsi_cmd ucmd_buf; 20108 int status; 20109 int no_caller_buf = FALSE; 20110 struct sd_lun *un; 20111 20112 ASSERT(ssc != NULL); 20113 un = ssc->ssc_un; 20114 ASSERT(un != NULL); 20115 ASSERT(!mutex_owned(SD_MUTEX(un))); 20116 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20117 20118 SD_TRACE(SD_LOG_IO, un, 20119 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20120 20121 bzero(&cdb, sizeof (cdb)); 20122 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20123 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20124 if (data_bufp == NULL) { 20125 /* Allocate a default buf if the caller did not give one */ 20126 ASSERT(data_len == 0); 20127 data_len = MHIOC_RESV_KEY_SIZE; 20128 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20129 no_caller_buf = TRUE; 20130 } 20131 20132 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20133 cdb.cdb_opaque[1] = usr_cmd; 20134 FORMG1COUNT(&cdb, data_len); 20135 20136 ucmd_buf.uscsi_cdb = (char *)&cdb; 20137 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20138 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20139 ucmd_buf.uscsi_buflen = data_len; 20140 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20141 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20142 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20143 ucmd_buf.uscsi_timeout = 60; 20144 20145 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20146 UIO_SYSSPACE, SD_PATH_STANDARD); 20147 20148 switch (status) { 20149 case 0: 20150 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20151 20152 break; /* Success! */ 20153 case EIO: 20154 switch (ucmd_buf.uscsi_status) { 20155 case STATUS_RESERVATION_CONFLICT: 20156 status = EACCES; 20157 break; 20158 case STATUS_CHECK: 20159 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20160 (scsi_sense_key((uint8_t *)&sense_buf) == 20161 KEY_ILLEGAL_REQUEST)) { 20162 status = ENOTSUP; 20163 } 20164 break; 20165 default: 20166 break; 20167 } 20168 break; 20169 default: 20170 break; 20171 } 20172 20173 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20174 20175 if (no_caller_buf == TRUE) { 20176 kmem_free(data_bufp, data_len); 20177 } 20178 20179 return (status); 20180 } 20181 20182 20183 /* 20184 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20185 * 20186 * Description: This routine is the driver entry point for handling CD-ROM 20187 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20188 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20189 * device. 20190 * 20191 * Arguments: ssc - ssc contains un - pointer to soft state struct 20192 * for the target. 20193 * usr_cmd SCSI-3 reservation facility command (one of 20194 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20195 * SD_SCSI3_PREEMPTANDABORT) 20196 * usr_bufp - user provided pointer register, reserve descriptor or 20197 * preempt and abort structure (mhioc_register_t, 20198 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20199 * 20200 * Return Code: 0 - Success 20201 * EACCES 20202 * ENOTSUP 20203 * errno return code from sd_ssc_send() 20204 * 20205 * Context: Can sleep. Does not return until command is completed. 20206 */ 20207 20208 static int 20209 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20210 uchar_t *usr_bufp) 20211 { 20212 struct scsi_extended_sense sense_buf; 20213 union scsi_cdb cdb; 20214 struct uscsi_cmd ucmd_buf; 20215 int status; 20216 uchar_t data_len = sizeof (sd_prout_t); 20217 sd_prout_t *prp; 20218 struct sd_lun *un; 20219 20220 ASSERT(ssc != NULL); 20221 un = ssc->ssc_un; 20222 ASSERT(un != NULL); 20223 ASSERT(!mutex_owned(SD_MUTEX(un))); 20224 ASSERT(data_len == 24); /* required by scsi spec */ 20225 20226 SD_TRACE(SD_LOG_IO, un, 20227 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20228 20229 if (usr_bufp == NULL) { 20230 return (EINVAL); 20231 } 20232 20233 bzero(&cdb, sizeof (cdb)); 20234 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20235 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20236 prp = kmem_zalloc(data_len, KM_SLEEP); 20237 20238 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20239 cdb.cdb_opaque[1] = usr_cmd; 20240 FORMG1COUNT(&cdb, data_len); 20241 20242 ucmd_buf.uscsi_cdb = (char *)&cdb; 20243 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20244 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20245 ucmd_buf.uscsi_buflen = data_len; 20246 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20247 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20248 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20249 ucmd_buf.uscsi_timeout = 60; 20250 20251 switch (usr_cmd) { 20252 case SD_SCSI3_REGISTER: { 20253 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20254 20255 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20256 bcopy(ptr->newkey.key, prp->service_key, 20257 MHIOC_RESV_KEY_SIZE); 20258 prp->aptpl = ptr->aptpl; 20259 break; 20260 } 20261 case SD_SCSI3_RESERVE: 20262 case SD_SCSI3_RELEASE: { 20263 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20264 20265 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20266 prp->scope_address = BE_32(ptr->scope_specific_addr); 20267 cdb.cdb_opaque[2] = ptr->type; 20268 break; 20269 } 20270 case SD_SCSI3_PREEMPTANDABORT: { 20271 mhioc_preemptandabort_t *ptr = 20272 (mhioc_preemptandabort_t *)usr_bufp; 20273 20274 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20275 bcopy(ptr->victim_key.key, prp->service_key, 20276 MHIOC_RESV_KEY_SIZE); 20277 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20278 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20279 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20280 break; 20281 } 20282 case SD_SCSI3_REGISTERANDIGNOREKEY: 20283 { 20284 mhioc_registerandignorekey_t *ptr; 20285 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20286 bcopy(ptr->newkey.key, 20287 prp->service_key, MHIOC_RESV_KEY_SIZE); 20288 prp->aptpl = ptr->aptpl; 20289 break; 20290 } 20291 default: 20292 ASSERT(FALSE); 20293 break; 20294 } 20295 20296 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20297 UIO_SYSSPACE, SD_PATH_STANDARD); 20298 20299 switch (status) { 20300 case 0: 20301 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20302 break; /* Success! */ 20303 case EIO: 20304 switch (ucmd_buf.uscsi_status) { 20305 case STATUS_RESERVATION_CONFLICT: 20306 status = EACCES; 20307 break; 20308 case STATUS_CHECK: 20309 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20310 (scsi_sense_key((uint8_t *)&sense_buf) == 20311 KEY_ILLEGAL_REQUEST)) { 20312 status = ENOTSUP; 20313 } 20314 break; 20315 default: 20316 break; 20317 } 20318 break; 20319 default: 20320 break; 20321 } 20322 20323 kmem_free(prp, data_len); 20324 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20325 return (status); 20326 } 20327 20328 20329 /* 20330 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20331 * 20332 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20333 * 20334 * Arguments: un - pointer to the target's soft state struct 20335 * dkc - pointer to the callback structure 20336 * 20337 * Return Code: 0 - success 20338 * errno-type error code 20339 * 20340 * Context: kernel thread context only. 20341 * 20342 * _______________________________________________________________ 20343 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20344 * |FLUSH_VOLATILE| | operation | 20345 * |______________|______________|_________________________________| 20346 * | 0 | NULL | Synchronous flush on both | 20347 * | | | volatile and non-volatile cache | 20348 * |______________|______________|_________________________________| 20349 * | 1 | NULL | Synchronous flush on volatile | 20350 * | | | cache; disk drivers may suppress| 20351 * | | | flush if disk table indicates | 20352 * | | | non-volatile cache | 20353 * |______________|______________|_________________________________| 20354 * | 0 | !NULL | Asynchronous flush on both | 20355 * | | | volatile and non-volatile cache;| 20356 * |______________|______________|_________________________________| 20357 * | 1 | !NULL | Asynchronous flush on volatile | 20358 * | | | cache; disk drivers may suppress| 20359 * | | | flush if disk table indicates | 20360 * | | | non-volatile cache | 20361 * |______________|______________|_________________________________| 20362 * 20363 */ 20364 20365 static int 20366 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20367 { 20368 struct sd_uscsi_info *uip; 20369 struct uscsi_cmd *uscmd; 20370 union scsi_cdb *cdb; 20371 struct buf *bp; 20372 int rval = 0; 20373 int is_async; 20374 20375 SD_TRACE(SD_LOG_IO, un, 20376 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20377 20378 ASSERT(un != NULL); 20379 ASSERT(!mutex_owned(SD_MUTEX(un))); 20380 20381 if (dkc == NULL || dkc->dkc_callback == NULL) { 20382 is_async = FALSE; 20383 } else { 20384 is_async = TRUE; 20385 } 20386 20387 mutex_enter(SD_MUTEX(un)); 20388 /* check whether cache flush should be suppressed */ 20389 if (un->un_f_suppress_cache_flush == TRUE) { 20390 mutex_exit(SD_MUTEX(un)); 20391 /* 20392 * suppress the cache flush if the device is told to do 20393 * so by sd.conf or disk table 20394 */ 20395 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20396 skip the cache flush since suppress_cache_flush is %d!\n", 20397 un->un_f_suppress_cache_flush); 20398 20399 if (is_async == TRUE) { 20400 /* invoke callback for asynchronous flush */ 20401 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20402 } 20403 return (rval); 20404 } 20405 mutex_exit(SD_MUTEX(un)); 20406 20407 /* 20408 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20409 * set properly 20410 */ 20411 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20412 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20413 20414 mutex_enter(SD_MUTEX(un)); 20415 if (dkc != NULL && un->un_f_sync_nv_supported && 20416 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20417 /* 20418 * if the device supports SYNC_NV bit, turn on 20419 * the SYNC_NV bit to only flush volatile cache 20420 */ 20421 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20422 } 20423 mutex_exit(SD_MUTEX(un)); 20424 20425 /* 20426 * First get some memory for the uscsi_cmd struct and cdb 20427 * and initialize for SYNCHRONIZE_CACHE cmd. 20428 */ 20429 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20430 uscmd->uscsi_cdblen = CDB_GROUP1; 20431 uscmd->uscsi_cdb = (caddr_t)cdb; 20432 uscmd->uscsi_bufaddr = NULL; 20433 uscmd->uscsi_buflen = 0; 20434 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20435 uscmd->uscsi_rqlen = SENSE_LENGTH; 20436 uscmd->uscsi_rqresid = SENSE_LENGTH; 20437 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20438 uscmd->uscsi_timeout = sd_io_time; 20439 20440 /* 20441 * Allocate an sd_uscsi_info struct and fill it with the info 20442 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20443 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20444 * since we allocate the buf here in this function, we do not 20445 * need to preserve the prior contents of b_private. 20446 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20447 */ 20448 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20449 uip->ui_flags = SD_PATH_DIRECT; 20450 uip->ui_cmdp = uscmd; 20451 20452 bp = getrbuf(KM_SLEEP); 20453 bp->b_private = uip; 20454 20455 /* 20456 * Setup buffer to carry uscsi request. 20457 */ 20458 bp->b_flags = B_BUSY; 20459 bp->b_bcount = 0; 20460 bp->b_blkno = 0; 20461 20462 if (is_async == TRUE) { 20463 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20464 uip->ui_dkc = *dkc; 20465 } 20466 20467 bp->b_edev = SD_GET_DEV(un); 20468 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20469 20470 /* 20471 * Unset un_f_sync_cache_required flag 20472 */ 20473 mutex_enter(SD_MUTEX(un)); 20474 un->un_f_sync_cache_required = FALSE; 20475 mutex_exit(SD_MUTEX(un)); 20476 20477 (void) sd_uscsi_strategy(bp); 20478 20479 /* 20480 * If synchronous request, wait for completion 20481 * If async just return and let b_iodone callback 20482 * cleanup. 20483 * NOTE: On return, u_ncmds_in_driver will be decremented, 20484 * but it was also incremented in sd_uscsi_strategy(), so 20485 * we should be ok. 20486 */ 20487 if (is_async == FALSE) { 20488 (void) biowait(bp); 20489 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20490 } 20491 20492 return (rval); 20493 } 20494 20495 20496 static int 20497 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20498 { 20499 struct sd_uscsi_info *uip; 20500 struct uscsi_cmd *uscmd; 20501 uint8_t *sense_buf; 20502 struct sd_lun *un; 20503 int status; 20504 union scsi_cdb *cdb; 20505 20506 uip = (struct sd_uscsi_info *)(bp->b_private); 20507 ASSERT(uip != NULL); 20508 20509 uscmd = uip->ui_cmdp; 20510 ASSERT(uscmd != NULL); 20511 20512 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20513 ASSERT(sense_buf != NULL); 20514 20515 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20516 ASSERT(un != NULL); 20517 20518 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20519 20520 status = geterror(bp); 20521 switch (status) { 20522 case 0: 20523 break; /* Success! */ 20524 case EIO: 20525 switch (uscmd->uscsi_status) { 20526 case STATUS_RESERVATION_CONFLICT: 20527 /* Ignore reservation conflict */ 20528 status = 0; 20529 goto done; 20530 20531 case STATUS_CHECK: 20532 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20533 (scsi_sense_key(sense_buf) == 20534 KEY_ILLEGAL_REQUEST)) { 20535 /* Ignore Illegal Request error */ 20536 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20537 mutex_enter(SD_MUTEX(un)); 20538 un->un_f_sync_nv_supported = FALSE; 20539 mutex_exit(SD_MUTEX(un)); 20540 status = 0; 20541 SD_TRACE(SD_LOG_IO, un, 20542 "un_f_sync_nv_supported \ 20543 is set to false.\n"); 20544 goto done; 20545 } 20546 20547 mutex_enter(SD_MUTEX(un)); 20548 un->un_f_sync_cache_supported = FALSE; 20549 mutex_exit(SD_MUTEX(un)); 20550 SD_TRACE(SD_LOG_IO, un, 20551 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20552 un_f_sync_cache_supported set to false \ 20553 with asc = %x, ascq = %x\n", 20554 scsi_sense_asc(sense_buf), 20555 scsi_sense_ascq(sense_buf)); 20556 status = ENOTSUP; 20557 goto done; 20558 } 20559 break; 20560 default: 20561 break; 20562 } 20563 /* FALLTHRU */ 20564 default: 20565 /* 20566 * Turn on the un_f_sync_cache_required flag 20567 * since the SYNC CACHE command failed 20568 */ 20569 mutex_enter(SD_MUTEX(un)); 20570 un->un_f_sync_cache_required = TRUE; 20571 mutex_exit(SD_MUTEX(un)); 20572 20573 /* 20574 * Don't log an error message if this device 20575 * has removable media. 20576 */ 20577 if (!un->un_f_has_removable_media) { 20578 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20579 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20580 } 20581 break; 20582 } 20583 20584 done: 20585 if (uip->ui_dkc.dkc_callback != NULL) { 20586 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20587 } 20588 20589 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20590 freerbuf(bp); 20591 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20592 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20593 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20594 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20595 20596 return (status); 20597 } 20598 20599 20600 /* 20601 * Function: sd_send_scsi_GET_CONFIGURATION 20602 * 20603 * Description: Issues the get configuration command to the device. 20604 * Called from sd_check_for_writable_cd & sd_get_media_info 20605 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20606 * Arguments: ssc 20607 * ucmdbuf 20608 * rqbuf 20609 * rqbuflen 20610 * bufaddr 20611 * buflen 20612 * path_flag 20613 * 20614 * Return Code: 0 - Success 20615 * errno return code from sd_ssc_send() 20616 * 20617 * Context: Can sleep. Does not return until command is completed. 20618 * 20619 */ 20620 20621 static int 20622 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20623 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20624 int path_flag) 20625 { 20626 char cdb[CDB_GROUP1]; 20627 int status; 20628 struct sd_lun *un; 20629 20630 ASSERT(ssc != NULL); 20631 un = ssc->ssc_un; 20632 ASSERT(un != NULL); 20633 ASSERT(!mutex_owned(SD_MUTEX(un))); 20634 ASSERT(bufaddr != NULL); 20635 ASSERT(ucmdbuf != NULL); 20636 ASSERT(rqbuf != NULL); 20637 20638 SD_TRACE(SD_LOG_IO, un, 20639 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20640 20641 bzero(cdb, sizeof (cdb)); 20642 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20643 bzero(rqbuf, rqbuflen); 20644 bzero(bufaddr, buflen); 20645 20646 /* 20647 * Set up cdb field for the get configuration command. 20648 */ 20649 cdb[0] = SCMD_GET_CONFIGURATION; 20650 cdb[1] = 0x02; /* Requested Type */ 20651 cdb[8] = SD_PROFILE_HEADER_LEN; 20652 ucmdbuf->uscsi_cdb = cdb; 20653 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20654 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20655 ucmdbuf->uscsi_buflen = buflen; 20656 ucmdbuf->uscsi_timeout = sd_io_time; 20657 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20658 ucmdbuf->uscsi_rqlen = rqbuflen; 20659 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20660 20661 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20662 UIO_SYSSPACE, path_flag); 20663 20664 switch (status) { 20665 case 0: 20666 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20667 break; /* Success! */ 20668 case EIO: 20669 switch (ucmdbuf->uscsi_status) { 20670 case STATUS_RESERVATION_CONFLICT: 20671 status = EACCES; 20672 break; 20673 default: 20674 break; 20675 } 20676 break; 20677 default: 20678 break; 20679 } 20680 20681 if (status == 0) { 20682 SD_DUMP_MEMORY(un, SD_LOG_IO, 20683 "sd_send_scsi_GET_CONFIGURATION: data", 20684 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20685 } 20686 20687 SD_TRACE(SD_LOG_IO, un, 20688 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20689 20690 return (status); 20691 } 20692 20693 /* 20694 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20695 * 20696 * Description: Issues the get configuration command to the device to 20697 * retrieve a specific feature. Called from 20698 * sd_check_for_writable_cd & sd_set_mmc_caps. 20699 * Arguments: ssc 20700 * ucmdbuf 20701 * rqbuf 20702 * rqbuflen 20703 * bufaddr 20704 * buflen 20705 * feature 20706 * 20707 * Return Code: 0 - Success 20708 * errno return code from sd_ssc_send() 20709 * 20710 * Context: Can sleep. Does not return until command is completed. 20711 * 20712 */ 20713 static int 20714 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20715 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20716 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20717 { 20718 char cdb[CDB_GROUP1]; 20719 int status; 20720 struct sd_lun *un; 20721 20722 ASSERT(ssc != NULL); 20723 un = ssc->ssc_un; 20724 ASSERT(un != NULL); 20725 ASSERT(!mutex_owned(SD_MUTEX(un))); 20726 ASSERT(bufaddr != NULL); 20727 ASSERT(ucmdbuf != NULL); 20728 ASSERT(rqbuf != NULL); 20729 20730 SD_TRACE(SD_LOG_IO, un, 20731 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20732 20733 bzero(cdb, sizeof (cdb)); 20734 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20735 bzero(rqbuf, rqbuflen); 20736 bzero(bufaddr, buflen); 20737 20738 /* 20739 * Set up cdb field for the get configuration command. 20740 */ 20741 cdb[0] = SCMD_GET_CONFIGURATION; 20742 cdb[1] = 0x02; /* Requested Type */ 20743 cdb[3] = feature; 20744 cdb[8] = buflen; 20745 ucmdbuf->uscsi_cdb = cdb; 20746 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20747 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20748 ucmdbuf->uscsi_buflen = buflen; 20749 ucmdbuf->uscsi_timeout = sd_io_time; 20750 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20751 ucmdbuf->uscsi_rqlen = rqbuflen; 20752 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20753 20754 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20755 UIO_SYSSPACE, path_flag); 20756 20757 switch (status) { 20758 case 0: 20759 20760 break; /* Success! */ 20761 case EIO: 20762 switch (ucmdbuf->uscsi_status) { 20763 case STATUS_RESERVATION_CONFLICT: 20764 status = EACCES; 20765 break; 20766 default: 20767 break; 20768 } 20769 break; 20770 default: 20771 break; 20772 } 20773 20774 if (status == 0) { 20775 SD_DUMP_MEMORY(un, SD_LOG_IO, 20776 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20777 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20778 } 20779 20780 SD_TRACE(SD_LOG_IO, un, 20781 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20782 20783 return (status); 20784 } 20785 20786 20787 /* 20788 * Function: sd_send_scsi_MODE_SENSE 20789 * 20790 * Description: Utility function for issuing a scsi MODE SENSE command. 20791 * Note: This routine uses a consistent implementation for Group0, 20792 * Group1, and Group2 commands across all platforms. ATAPI devices 20793 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20794 * 20795 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20796 * structure for this target. 20797 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20798 * CDB_GROUP[1|2] (10 byte). 20799 * bufaddr - buffer for page data retrieved from the target. 20800 * buflen - size of page to be retrieved. 20801 * page_code - page code of data to be retrieved from the target. 20802 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20803 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20804 * to use the USCSI "direct" chain and bypass the normal 20805 * command waitq. 20806 * 20807 * Return Code: 0 - Success 20808 * errno return code from sd_ssc_send() 20809 * 20810 * Context: Can sleep. Does not return until command is completed. 20811 */ 20812 20813 static int 20814 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20815 size_t buflen, uchar_t page_code, int path_flag) 20816 { 20817 struct scsi_extended_sense sense_buf; 20818 union scsi_cdb cdb; 20819 struct uscsi_cmd ucmd_buf; 20820 int status; 20821 int headlen; 20822 struct sd_lun *un; 20823 20824 ASSERT(ssc != NULL); 20825 un = ssc->ssc_un; 20826 ASSERT(un != NULL); 20827 ASSERT(!mutex_owned(SD_MUTEX(un))); 20828 ASSERT(bufaddr != NULL); 20829 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20830 (cdbsize == CDB_GROUP2)); 20831 20832 SD_TRACE(SD_LOG_IO, un, 20833 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20834 20835 bzero(&cdb, sizeof (cdb)); 20836 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20837 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20838 bzero(bufaddr, buflen); 20839 20840 if (cdbsize == CDB_GROUP0) { 20841 cdb.scc_cmd = SCMD_MODE_SENSE; 20842 cdb.cdb_opaque[2] = page_code; 20843 FORMG0COUNT(&cdb, buflen); 20844 headlen = MODE_HEADER_LENGTH; 20845 } else { 20846 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20847 cdb.cdb_opaque[2] = page_code; 20848 FORMG1COUNT(&cdb, buflen); 20849 headlen = MODE_HEADER_LENGTH_GRP2; 20850 } 20851 20852 ASSERT(headlen <= buflen); 20853 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20854 20855 ucmd_buf.uscsi_cdb = (char *)&cdb; 20856 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20857 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20858 ucmd_buf.uscsi_buflen = buflen; 20859 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20860 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20861 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20862 ucmd_buf.uscsi_timeout = 60; 20863 20864 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20865 UIO_SYSSPACE, path_flag); 20866 20867 switch (status) { 20868 case 0: 20869 /* 20870 * sr_check_wp() uses 0x3f page code and check the header of 20871 * mode page to determine if target device is write-protected. 20872 * But some USB devices return 0 bytes for 0x3f page code. For 20873 * this case, make sure that mode page header is returned at 20874 * least. 20875 */ 20876 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20877 status = EIO; 20878 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20879 "mode page header is not returned"); 20880 } 20881 break; /* Success! */ 20882 case EIO: 20883 switch (ucmd_buf.uscsi_status) { 20884 case STATUS_RESERVATION_CONFLICT: 20885 status = EACCES; 20886 break; 20887 default: 20888 break; 20889 } 20890 break; 20891 default: 20892 break; 20893 } 20894 20895 if (status == 0) { 20896 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20897 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20898 } 20899 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20900 20901 return (status); 20902 } 20903 20904 20905 /* 20906 * Function: sd_send_scsi_MODE_SELECT 20907 * 20908 * Description: Utility function for issuing a scsi MODE SELECT command. 20909 * Note: This routine uses a consistent implementation for Group0, 20910 * Group1, and Group2 commands across all platforms. ATAPI devices 20911 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20912 * 20913 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20914 * structure for this target. 20915 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20916 * CDB_GROUP[1|2] (10 byte). 20917 * bufaddr - buffer for page data retrieved from the target. 20918 * buflen - size of page to be retrieved. 20919 * save_page - boolean to determin if SP bit should be set. 20920 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20921 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20922 * to use the USCSI "direct" chain and bypass the normal 20923 * command waitq. 20924 * 20925 * Return Code: 0 - Success 20926 * errno return code from sd_ssc_send() 20927 * 20928 * Context: Can sleep. Does not return until command is completed. 20929 */ 20930 20931 static int 20932 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20933 size_t buflen, uchar_t save_page, int path_flag) 20934 { 20935 struct scsi_extended_sense sense_buf; 20936 union scsi_cdb cdb; 20937 struct uscsi_cmd ucmd_buf; 20938 int status; 20939 struct sd_lun *un; 20940 20941 ASSERT(ssc != NULL); 20942 un = ssc->ssc_un; 20943 ASSERT(un != NULL); 20944 ASSERT(!mutex_owned(SD_MUTEX(un))); 20945 ASSERT(bufaddr != NULL); 20946 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20947 (cdbsize == CDB_GROUP2)); 20948 20949 SD_TRACE(SD_LOG_IO, un, 20950 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20951 20952 bzero(&cdb, sizeof (cdb)); 20953 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20954 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20955 20956 /* Set the PF bit for many third party drives */ 20957 cdb.cdb_opaque[1] = 0x10; 20958 20959 /* Set the savepage(SP) bit if given */ 20960 if (save_page == SD_SAVE_PAGE) { 20961 cdb.cdb_opaque[1] |= 0x01; 20962 } 20963 20964 if (cdbsize == CDB_GROUP0) { 20965 cdb.scc_cmd = SCMD_MODE_SELECT; 20966 FORMG0COUNT(&cdb, buflen); 20967 } else { 20968 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20969 FORMG1COUNT(&cdb, buflen); 20970 } 20971 20972 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20973 20974 ucmd_buf.uscsi_cdb = (char *)&cdb; 20975 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20976 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20977 ucmd_buf.uscsi_buflen = buflen; 20978 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20979 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20980 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20981 ucmd_buf.uscsi_timeout = 60; 20982 20983 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20984 UIO_SYSSPACE, path_flag); 20985 20986 switch (status) { 20987 case 0: 20988 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20989 break; /* Success! */ 20990 case EIO: 20991 switch (ucmd_buf.uscsi_status) { 20992 case STATUS_RESERVATION_CONFLICT: 20993 status = EACCES; 20994 break; 20995 default: 20996 break; 20997 } 20998 break; 20999 default: 21000 break; 21001 } 21002 21003 if (status == 0) { 21004 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21005 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21006 } 21007 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21008 21009 return (status); 21010 } 21011 21012 21013 /* 21014 * Function: sd_send_scsi_RDWR 21015 * 21016 * Description: Issue a scsi READ or WRITE command with the given parameters. 21017 * 21018 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21019 * structure for this target. 21020 * cmd: SCMD_READ or SCMD_WRITE 21021 * bufaddr: Address of caller's buffer to receive the RDWR data 21022 * buflen: Length of caller's buffer receive the RDWR data. 21023 * start_block: Block number for the start of the RDWR operation. 21024 * (Assumes target-native block size.) 21025 * residp: Pointer to variable to receive the redisual of the 21026 * RDWR operation (may be NULL of no residual requested). 21027 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21028 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21029 * to use the USCSI "direct" chain and bypass the normal 21030 * command waitq. 21031 * 21032 * Return Code: 0 - Success 21033 * errno return code from sd_ssc_send() 21034 * 21035 * Context: Can sleep. Does not return until command is completed. 21036 */ 21037 21038 static int 21039 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21040 size_t buflen, daddr_t start_block, int path_flag) 21041 { 21042 struct scsi_extended_sense sense_buf; 21043 union scsi_cdb cdb; 21044 struct uscsi_cmd ucmd_buf; 21045 uint32_t block_count; 21046 int status; 21047 int cdbsize; 21048 uchar_t flag; 21049 struct sd_lun *un; 21050 21051 ASSERT(ssc != NULL); 21052 un = ssc->ssc_un; 21053 ASSERT(un != NULL); 21054 ASSERT(!mutex_owned(SD_MUTEX(un))); 21055 ASSERT(bufaddr != NULL); 21056 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21057 21058 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21059 21060 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21061 return (EINVAL); 21062 } 21063 21064 mutex_enter(SD_MUTEX(un)); 21065 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21066 mutex_exit(SD_MUTEX(un)); 21067 21068 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21069 21070 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21071 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21072 bufaddr, buflen, start_block, block_count); 21073 21074 bzero(&cdb, sizeof (cdb)); 21075 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21076 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21077 21078 /* Compute CDB size to use */ 21079 if (start_block > 0xffffffff) 21080 cdbsize = CDB_GROUP4; 21081 else if ((start_block & 0xFFE00000) || 21082 (un->un_f_cfg_is_atapi == TRUE)) 21083 cdbsize = CDB_GROUP1; 21084 else 21085 cdbsize = CDB_GROUP0; 21086 21087 switch (cdbsize) { 21088 case CDB_GROUP0: /* 6-byte CDBs */ 21089 cdb.scc_cmd = cmd; 21090 FORMG0ADDR(&cdb, start_block); 21091 FORMG0COUNT(&cdb, block_count); 21092 break; 21093 case CDB_GROUP1: /* 10-byte CDBs */ 21094 cdb.scc_cmd = cmd | SCMD_GROUP1; 21095 FORMG1ADDR(&cdb, start_block); 21096 FORMG1COUNT(&cdb, block_count); 21097 break; 21098 case CDB_GROUP4: /* 16-byte CDBs */ 21099 cdb.scc_cmd = cmd | SCMD_GROUP4; 21100 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21101 FORMG4COUNT(&cdb, block_count); 21102 break; 21103 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21104 default: 21105 /* All others reserved */ 21106 return (EINVAL); 21107 } 21108 21109 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21110 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21111 21112 ucmd_buf.uscsi_cdb = (char *)&cdb; 21113 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21114 ucmd_buf.uscsi_bufaddr = bufaddr; 21115 ucmd_buf.uscsi_buflen = buflen; 21116 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21117 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21118 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21119 ucmd_buf.uscsi_timeout = 60; 21120 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21121 UIO_SYSSPACE, path_flag); 21122 21123 switch (status) { 21124 case 0: 21125 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21126 break; /* Success! */ 21127 case EIO: 21128 switch (ucmd_buf.uscsi_status) { 21129 case STATUS_RESERVATION_CONFLICT: 21130 status = EACCES; 21131 break; 21132 default: 21133 break; 21134 } 21135 break; 21136 default: 21137 break; 21138 } 21139 21140 if (status == 0) { 21141 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21142 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21143 } 21144 21145 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21146 21147 return (status); 21148 } 21149 21150 21151 /* 21152 * Function: sd_send_scsi_LOG_SENSE 21153 * 21154 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21155 * 21156 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21157 * structure for this target. 21158 * 21159 * Return Code: 0 - Success 21160 * errno return code from sd_ssc_send() 21161 * 21162 * Context: Can sleep. Does not return until command is completed. 21163 */ 21164 21165 static int 21166 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21167 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21168 int path_flag) 21169 21170 { 21171 struct scsi_extended_sense sense_buf; 21172 union scsi_cdb cdb; 21173 struct uscsi_cmd ucmd_buf; 21174 int status; 21175 struct sd_lun *un; 21176 21177 ASSERT(ssc != NULL); 21178 un = ssc->ssc_un; 21179 ASSERT(un != NULL); 21180 ASSERT(!mutex_owned(SD_MUTEX(un))); 21181 21182 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21183 21184 bzero(&cdb, sizeof (cdb)); 21185 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21186 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21187 21188 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21189 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21190 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21191 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21192 FORMG1COUNT(&cdb, buflen); 21193 21194 ucmd_buf.uscsi_cdb = (char *)&cdb; 21195 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21196 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21197 ucmd_buf.uscsi_buflen = buflen; 21198 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21199 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21200 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21201 ucmd_buf.uscsi_timeout = 60; 21202 21203 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21204 UIO_SYSSPACE, path_flag); 21205 21206 switch (status) { 21207 case 0: 21208 break; 21209 case EIO: 21210 switch (ucmd_buf.uscsi_status) { 21211 case STATUS_RESERVATION_CONFLICT: 21212 status = EACCES; 21213 break; 21214 case STATUS_CHECK: 21215 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21216 (scsi_sense_key((uint8_t *)&sense_buf) == 21217 KEY_ILLEGAL_REQUEST) && 21218 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21219 /* 21220 * ASC 0x24: INVALID FIELD IN CDB 21221 */ 21222 switch (page_code) { 21223 case START_STOP_CYCLE_PAGE: 21224 /* 21225 * The start stop cycle counter is 21226 * implemented as page 0x31 in earlier 21227 * generation disks. In new generation 21228 * disks the start stop cycle counter is 21229 * implemented as page 0xE. To properly 21230 * handle this case if an attempt for 21231 * log page 0xE is made and fails we 21232 * will try again using page 0x31. 21233 * 21234 * Network storage BU committed to 21235 * maintain the page 0x31 for this 21236 * purpose and will not have any other 21237 * page implemented with page code 0x31 21238 * until all disks transition to the 21239 * standard page. 21240 */ 21241 mutex_enter(SD_MUTEX(un)); 21242 un->un_start_stop_cycle_page = 21243 START_STOP_CYCLE_VU_PAGE; 21244 cdb.cdb_opaque[2] = 21245 (char)(page_control << 6) | 21246 un->un_start_stop_cycle_page; 21247 mutex_exit(SD_MUTEX(un)); 21248 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21249 status = sd_ssc_send( 21250 ssc, &ucmd_buf, FKIOCTL, 21251 UIO_SYSSPACE, path_flag); 21252 21253 break; 21254 case TEMPERATURE_PAGE: 21255 status = ENOTTY; 21256 break; 21257 default: 21258 break; 21259 } 21260 } 21261 break; 21262 default: 21263 break; 21264 } 21265 break; 21266 default: 21267 break; 21268 } 21269 21270 if (status == 0) { 21271 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21272 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21273 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21274 } 21275 21276 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21277 21278 return (status); 21279 } 21280 21281 21282 /* 21283 * Function: sdioctl 21284 * 21285 * Description: Driver's ioctl(9e) entry point function. 21286 * 21287 * Arguments: dev - device number 21288 * cmd - ioctl operation to be performed 21289 * arg - user argument, contains data to be set or reference 21290 * parameter for get 21291 * flag - bit flag, indicating open settings, 32/64 bit type 21292 * cred_p - user credential pointer 21293 * rval_p - calling process return value (OPT) 21294 * 21295 * Return Code: EINVAL 21296 * ENOTTY 21297 * ENXIO 21298 * EIO 21299 * EFAULT 21300 * ENOTSUP 21301 * EPERM 21302 * 21303 * Context: Called from the device switch at normal priority. 21304 */ 21305 21306 static int 21307 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21308 { 21309 struct sd_lun *un = NULL; 21310 int err = 0; 21311 int i = 0; 21312 cred_t *cr; 21313 int tmprval = EINVAL; 21314 boolean_t is_valid; 21315 sd_ssc_t *ssc; 21316 21317 /* 21318 * All device accesses go thru sdstrategy where we check on suspend 21319 * status 21320 */ 21321 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21322 return (ENXIO); 21323 } 21324 21325 ASSERT(!mutex_owned(SD_MUTEX(un))); 21326 21327 /* Initialize sd_ssc_t for internal uscsi commands */ 21328 ssc = sd_ssc_init(un); 21329 21330 is_valid = SD_IS_VALID_LABEL(un); 21331 21332 /* 21333 * Moved this wait from sd_uscsi_strategy to here for 21334 * reasons of deadlock prevention. Internal driver commands, 21335 * specifically those to change a devices power level, result 21336 * in a call to sd_uscsi_strategy. 21337 */ 21338 mutex_enter(SD_MUTEX(un)); 21339 while ((un->un_state == SD_STATE_SUSPENDED) || 21340 (un->un_state == SD_STATE_PM_CHANGING)) { 21341 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21342 } 21343 /* 21344 * Twiddling the counter here protects commands from now 21345 * through to the top of sd_uscsi_strategy. Without the 21346 * counter inc. a power down, for example, could get in 21347 * after the above check for state is made and before 21348 * execution gets to the top of sd_uscsi_strategy. 21349 * That would cause problems. 21350 */ 21351 un->un_ncmds_in_driver++; 21352 21353 if (!is_valid && 21354 (flag & (FNDELAY | FNONBLOCK))) { 21355 switch (cmd) { 21356 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21357 case DKIOCGVTOC: 21358 case DKIOCGEXTVTOC: 21359 case DKIOCGAPART: 21360 case DKIOCPARTINFO: 21361 case DKIOCEXTPARTINFO: 21362 case DKIOCSGEOM: 21363 case DKIOCSAPART: 21364 case DKIOCGETEFI: 21365 case DKIOCPARTITION: 21366 case DKIOCSVTOC: 21367 case DKIOCSEXTVTOC: 21368 case DKIOCSETEFI: 21369 case DKIOCGMBOOT: 21370 case DKIOCSMBOOT: 21371 case DKIOCG_PHYGEOM: 21372 case DKIOCG_VIRTGEOM: 21373 /* let cmlb handle it */ 21374 goto skip_ready_valid; 21375 21376 case CDROMPAUSE: 21377 case CDROMRESUME: 21378 case CDROMPLAYMSF: 21379 case CDROMPLAYTRKIND: 21380 case CDROMREADTOCHDR: 21381 case CDROMREADTOCENTRY: 21382 case CDROMSTOP: 21383 case CDROMSTART: 21384 case CDROMVOLCTRL: 21385 case CDROMSUBCHNL: 21386 case CDROMREADMODE2: 21387 case CDROMREADMODE1: 21388 case CDROMREADOFFSET: 21389 case CDROMSBLKMODE: 21390 case CDROMGBLKMODE: 21391 case CDROMGDRVSPEED: 21392 case CDROMSDRVSPEED: 21393 case CDROMCDDA: 21394 case CDROMCDXA: 21395 case CDROMSUBCODE: 21396 if (!ISCD(un)) { 21397 un->un_ncmds_in_driver--; 21398 ASSERT(un->un_ncmds_in_driver >= 0); 21399 mutex_exit(SD_MUTEX(un)); 21400 err = ENOTTY; 21401 goto done_without_assess; 21402 } 21403 break; 21404 case FDEJECT: 21405 case DKIOCEJECT: 21406 case CDROMEJECT: 21407 if (!un->un_f_eject_media_supported) { 21408 un->un_ncmds_in_driver--; 21409 ASSERT(un->un_ncmds_in_driver >= 0); 21410 mutex_exit(SD_MUTEX(un)); 21411 err = ENOTTY; 21412 goto done_without_assess; 21413 } 21414 break; 21415 case DKIOCFLUSHWRITECACHE: 21416 mutex_exit(SD_MUTEX(un)); 21417 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21418 if (err != 0) { 21419 mutex_enter(SD_MUTEX(un)); 21420 un->un_ncmds_in_driver--; 21421 ASSERT(un->un_ncmds_in_driver >= 0); 21422 mutex_exit(SD_MUTEX(un)); 21423 err = EIO; 21424 goto done_quick_assess; 21425 } 21426 mutex_enter(SD_MUTEX(un)); 21427 /* FALLTHROUGH */ 21428 case DKIOCREMOVABLE: 21429 case DKIOCHOTPLUGGABLE: 21430 case DKIOCINFO: 21431 case DKIOCGMEDIAINFO: 21432 case MHIOCENFAILFAST: 21433 case MHIOCSTATUS: 21434 case MHIOCTKOWN: 21435 case MHIOCRELEASE: 21436 case MHIOCGRP_INKEYS: 21437 case MHIOCGRP_INRESV: 21438 case MHIOCGRP_REGISTER: 21439 case MHIOCGRP_RESERVE: 21440 case MHIOCGRP_PREEMPTANDABORT: 21441 case MHIOCGRP_REGISTERANDIGNOREKEY: 21442 case CDROMCLOSETRAY: 21443 case USCSICMD: 21444 goto skip_ready_valid; 21445 default: 21446 break; 21447 } 21448 21449 mutex_exit(SD_MUTEX(un)); 21450 err = sd_ready_and_valid(ssc, SDPART(dev)); 21451 mutex_enter(SD_MUTEX(un)); 21452 21453 if (err != SD_READY_VALID) { 21454 switch (cmd) { 21455 case DKIOCSTATE: 21456 case CDROMGDRVSPEED: 21457 case CDROMSDRVSPEED: 21458 case FDEJECT: /* for eject command */ 21459 case DKIOCEJECT: 21460 case CDROMEJECT: 21461 case DKIOCREMOVABLE: 21462 case DKIOCHOTPLUGGABLE: 21463 break; 21464 default: 21465 if (un->un_f_has_removable_media) { 21466 err = ENXIO; 21467 } else { 21468 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21469 if (err == SD_RESERVED_BY_OTHERS) { 21470 err = EACCES; 21471 } else { 21472 err = EIO; 21473 } 21474 } 21475 un->un_ncmds_in_driver--; 21476 ASSERT(un->un_ncmds_in_driver >= 0); 21477 mutex_exit(SD_MUTEX(un)); 21478 21479 goto done_without_assess; 21480 } 21481 } 21482 } 21483 21484 skip_ready_valid: 21485 mutex_exit(SD_MUTEX(un)); 21486 21487 switch (cmd) { 21488 case DKIOCINFO: 21489 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21490 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21491 break; 21492 21493 case DKIOCGMEDIAINFO: 21494 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21495 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21496 break; 21497 21498 case DKIOCGGEOM: 21499 case DKIOCGVTOC: 21500 case DKIOCGEXTVTOC: 21501 case DKIOCGAPART: 21502 case DKIOCPARTINFO: 21503 case DKIOCEXTPARTINFO: 21504 case DKIOCSGEOM: 21505 case DKIOCSAPART: 21506 case DKIOCGETEFI: 21507 case DKIOCPARTITION: 21508 case DKIOCSVTOC: 21509 case DKIOCSEXTVTOC: 21510 case DKIOCSETEFI: 21511 case DKIOCGMBOOT: 21512 case DKIOCSMBOOT: 21513 case DKIOCG_PHYGEOM: 21514 case DKIOCG_VIRTGEOM: 21515 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21516 21517 /* TUR should spin up */ 21518 21519 if (un->un_f_has_removable_media) 21520 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21521 SD_CHECK_FOR_MEDIA); 21522 21523 else 21524 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21525 21526 if (err != 0) 21527 goto done_with_assess; 21528 21529 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21530 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21531 21532 if ((err == 0) && 21533 ((cmd == DKIOCSETEFI) || 21534 (un->un_f_pkstats_enabled) && 21535 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 21536 cmd == DKIOCSEXTVTOC))) { 21537 21538 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21539 (void *)SD_PATH_DIRECT); 21540 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21541 sd_set_pstats(un); 21542 SD_TRACE(SD_LOG_IO_PARTITION, un, 21543 "sd_ioctl: un:0x%p pstats created and " 21544 "set\n", un); 21545 } 21546 } 21547 21548 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 21549 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21550 21551 mutex_enter(SD_MUTEX(un)); 21552 if (un->un_f_devid_supported && 21553 (un->un_f_opt_fab_devid == TRUE)) { 21554 if (un->un_devid == NULL) { 21555 sd_register_devid(ssc, SD_DEVINFO(un), 21556 SD_TARGET_IS_UNRESERVED); 21557 } else { 21558 /* 21559 * The device id for this disk 21560 * has been fabricated. The 21561 * device id must be preserved 21562 * by writing it back out to 21563 * disk. 21564 */ 21565 if (sd_write_deviceid(ssc) != 0) { 21566 ddi_devid_free(un->un_devid); 21567 un->un_devid = NULL; 21568 } 21569 } 21570 } 21571 mutex_exit(SD_MUTEX(un)); 21572 } 21573 21574 break; 21575 21576 case DKIOCLOCK: 21577 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21578 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21579 SD_PATH_STANDARD); 21580 goto done_with_assess; 21581 21582 case DKIOCUNLOCK: 21583 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21584 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21585 SD_PATH_STANDARD); 21586 goto done_with_assess; 21587 21588 case DKIOCSTATE: { 21589 enum dkio_state state; 21590 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21591 21592 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21593 err = EFAULT; 21594 } else { 21595 err = sd_check_media(dev, state); 21596 if (err == 0) { 21597 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21598 sizeof (int), flag) != 0) 21599 err = EFAULT; 21600 } 21601 } 21602 break; 21603 } 21604 21605 case DKIOCREMOVABLE: 21606 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21607 i = un->un_f_has_removable_media ? 1 : 0; 21608 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21609 err = EFAULT; 21610 } else { 21611 err = 0; 21612 } 21613 break; 21614 21615 case DKIOCHOTPLUGGABLE: 21616 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21617 i = un->un_f_is_hotpluggable ? 1 : 0; 21618 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21619 err = EFAULT; 21620 } else { 21621 err = 0; 21622 } 21623 break; 21624 21625 case DKIOCGTEMPERATURE: 21626 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21627 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21628 break; 21629 21630 case MHIOCENFAILFAST: 21631 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21632 if ((err = drv_priv(cred_p)) == 0) { 21633 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21634 } 21635 break; 21636 21637 case MHIOCTKOWN: 21638 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21639 if ((err = drv_priv(cred_p)) == 0) { 21640 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21641 } 21642 break; 21643 21644 case MHIOCRELEASE: 21645 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21646 if ((err = drv_priv(cred_p)) == 0) { 21647 err = sd_mhdioc_release(dev); 21648 } 21649 break; 21650 21651 case MHIOCSTATUS: 21652 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21653 if ((err = drv_priv(cred_p)) == 0) { 21654 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21655 case 0: 21656 err = 0; 21657 break; 21658 case EACCES: 21659 *rval_p = 1; 21660 err = 0; 21661 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21662 break; 21663 default: 21664 err = EIO; 21665 goto done_with_assess; 21666 } 21667 } 21668 break; 21669 21670 case MHIOCQRESERVE: 21671 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21672 if ((err = drv_priv(cred_p)) == 0) { 21673 err = sd_reserve_release(dev, SD_RESERVE); 21674 } 21675 break; 21676 21677 case MHIOCREREGISTERDEVID: 21678 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21679 if (drv_priv(cred_p) == EPERM) { 21680 err = EPERM; 21681 } else if (!un->un_f_devid_supported) { 21682 err = ENOTTY; 21683 } else { 21684 err = sd_mhdioc_register_devid(dev); 21685 } 21686 break; 21687 21688 case MHIOCGRP_INKEYS: 21689 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21690 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21691 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21692 err = ENOTSUP; 21693 } else { 21694 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21695 flag); 21696 } 21697 } 21698 break; 21699 21700 case MHIOCGRP_INRESV: 21701 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21702 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21703 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21704 err = ENOTSUP; 21705 } else { 21706 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21707 } 21708 } 21709 break; 21710 21711 case MHIOCGRP_REGISTER: 21712 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21713 if ((err = drv_priv(cred_p)) != EPERM) { 21714 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21715 err = ENOTSUP; 21716 } else if (arg != NULL) { 21717 mhioc_register_t reg; 21718 if (ddi_copyin((void *)arg, ®, 21719 sizeof (mhioc_register_t), flag) != 0) { 21720 err = EFAULT; 21721 } else { 21722 err = 21723 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21724 ssc, SD_SCSI3_REGISTER, 21725 (uchar_t *)®); 21726 if (err != 0) 21727 goto done_with_assess; 21728 } 21729 } 21730 } 21731 break; 21732 21733 case MHIOCGRP_RESERVE: 21734 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21735 if ((err = drv_priv(cred_p)) != EPERM) { 21736 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21737 err = ENOTSUP; 21738 } else if (arg != NULL) { 21739 mhioc_resv_desc_t resv_desc; 21740 if (ddi_copyin((void *)arg, &resv_desc, 21741 sizeof (mhioc_resv_desc_t), flag) != 0) { 21742 err = EFAULT; 21743 } else { 21744 err = 21745 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21746 ssc, SD_SCSI3_RESERVE, 21747 (uchar_t *)&resv_desc); 21748 if (err != 0) 21749 goto done_with_assess; 21750 } 21751 } 21752 } 21753 break; 21754 21755 case MHIOCGRP_PREEMPTANDABORT: 21756 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21757 if ((err = drv_priv(cred_p)) != EPERM) { 21758 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21759 err = ENOTSUP; 21760 } else if (arg != NULL) { 21761 mhioc_preemptandabort_t preempt_abort; 21762 if (ddi_copyin((void *)arg, &preempt_abort, 21763 sizeof (mhioc_preemptandabort_t), 21764 flag) != 0) { 21765 err = EFAULT; 21766 } else { 21767 err = 21768 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21769 ssc, SD_SCSI3_PREEMPTANDABORT, 21770 (uchar_t *)&preempt_abort); 21771 if (err != 0) 21772 goto done_with_assess; 21773 } 21774 } 21775 } 21776 break; 21777 21778 case MHIOCGRP_REGISTERANDIGNOREKEY: 21779 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21780 if ((err = drv_priv(cred_p)) != EPERM) { 21781 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21782 err = ENOTSUP; 21783 } else if (arg != NULL) { 21784 mhioc_registerandignorekey_t r_and_i; 21785 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21786 sizeof (mhioc_registerandignorekey_t), 21787 flag) != 0) { 21788 err = EFAULT; 21789 } else { 21790 err = 21791 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21792 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21793 (uchar_t *)&r_and_i); 21794 if (err != 0) 21795 goto done_with_assess; 21796 } 21797 } 21798 } 21799 break; 21800 21801 case USCSICMD: 21802 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21803 cr = ddi_get_cred(); 21804 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21805 err = EPERM; 21806 } else { 21807 enum uio_seg uioseg; 21808 21809 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21810 UIO_USERSPACE; 21811 if (un->un_f_format_in_progress == TRUE) { 21812 err = EAGAIN; 21813 break; 21814 } 21815 21816 err = sd_ssc_send(ssc, 21817 (struct uscsi_cmd *)arg, 21818 flag, uioseg, SD_PATH_STANDARD); 21819 if (err != 0) 21820 goto done_with_assess; 21821 else 21822 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21823 } 21824 break; 21825 21826 case CDROMPAUSE: 21827 case CDROMRESUME: 21828 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21829 if (!ISCD(un)) { 21830 err = ENOTTY; 21831 } else { 21832 err = sr_pause_resume(dev, cmd); 21833 } 21834 break; 21835 21836 case CDROMPLAYMSF: 21837 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21838 if (!ISCD(un)) { 21839 err = ENOTTY; 21840 } else { 21841 err = sr_play_msf(dev, (caddr_t)arg, flag); 21842 } 21843 break; 21844 21845 case CDROMPLAYTRKIND: 21846 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21847 #if defined(__i386) || defined(__amd64) 21848 /* 21849 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21850 */ 21851 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21852 #else 21853 if (!ISCD(un)) { 21854 #endif 21855 err = ENOTTY; 21856 } else { 21857 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21858 } 21859 break; 21860 21861 case CDROMREADTOCHDR: 21862 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21863 if (!ISCD(un)) { 21864 err = ENOTTY; 21865 } else { 21866 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21867 } 21868 break; 21869 21870 case CDROMREADTOCENTRY: 21871 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21872 if (!ISCD(un)) { 21873 err = ENOTTY; 21874 } else { 21875 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21876 } 21877 break; 21878 21879 case CDROMSTOP: 21880 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21881 if (!ISCD(un)) { 21882 err = ENOTTY; 21883 } else { 21884 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21885 SD_PATH_STANDARD); 21886 goto done_with_assess; 21887 } 21888 break; 21889 21890 case CDROMSTART: 21891 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21892 if (!ISCD(un)) { 21893 err = ENOTTY; 21894 } else { 21895 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21896 SD_PATH_STANDARD); 21897 goto done_with_assess; 21898 } 21899 break; 21900 21901 case CDROMCLOSETRAY: 21902 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21903 if (!ISCD(un)) { 21904 err = ENOTTY; 21905 } else { 21906 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21907 SD_PATH_STANDARD); 21908 goto done_with_assess; 21909 } 21910 break; 21911 21912 case FDEJECT: /* for eject command */ 21913 case DKIOCEJECT: 21914 case CDROMEJECT: 21915 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21916 if (!un->un_f_eject_media_supported) { 21917 err = ENOTTY; 21918 } else { 21919 err = sr_eject(dev); 21920 } 21921 break; 21922 21923 case CDROMVOLCTRL: 21924 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21925 if (!ISCD(un)) { 21926 err = ENOTTY; 21927 } else { 21928 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21929 } 21930 break; 21931 21932 case CDROMSUBCHNL: 21933 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21934 if (!ISCD(un)) { 21935 err = ENOTTY; 21936 } else { 21937 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21938 } 21939 break; 21940 21941 case CDROMREADMODE2: 21942 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21943 if (!ISCD(un)) { 21944 err = ENOTTY; 21945 } else if (un->un_f_cfg_is_atapi == TRUE) { 21946 /* 21947 * If the drive supports READ CD, use that instead of 21948 * switching the LBA size via a MODE SELECT 21949 * Block Descriptor 21950 */ 21951 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21952 } else { 21953 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21954 } 21955 break; 21956 21957 case CDROMREADMODE1: 21958 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21959 if (!ISCD(un)) { 21960 err = ENOTTY; 21961 } else { 21962 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21963 } 21964 break; 21965 21966 case CDROMREADOFFSET: 21967 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21968 if (!ISCD(un)) { 21969 err = ENOTTY; 21970 } else { 21971 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21972 flag); 21973 } 21974 break; 21975 21976 case CDROMSBLKMODE: 21977 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21978 /* 21979 * There is no means of changing block size in case of atapi 21980 * drives, thus return ENOTTY if drive type is atapi 21981 */ 21982 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21983 err = ENOTTY; 21984 } else if (un->un_f_mmc_cap == TRUE) { 21985 21986 /* 21987 * MMC Devices do not support changing the 21988 * logical block size 21989 * 21990 * Note: EINVAL is being returned instead of ENOTTY to 21991 * maintain consistancy with the original mmc 21992 * driver update. 21993 */ 21994 err = EINVAL; 21995 } else { 21996 mutex_enter(SD_MUTEX(un)); 21997 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21998 (un->un_ncmds_in_transport > 0)) { 21999 mutex_exit(SD_MUTEX(un)); 22000 err = EINVAL; 22001 } else { 22002 mutex_exit(SD_MUTEX(un)); 22003 err = sr_change_blkmode(dev, cmd, arg, flag); 22004 } 22005 } 22006 break; 22007 22008 case CDROMGBLKMODE: 22009 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22010 if (!ISCD(un)) { 22011 err = ENOTTY; 22012 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22013 (un->un_f_blockcount_is_valid != FALSE)) { 22014 /* 22015 * Drive is an ATAPI drive so return target block 22016 * size for ATAPI drives since we cannot change the 22017 * blocksize on ATAPI drives. Used primarily to detect 22018 * if an ATAPI cdrom is present. 22019 */ 22020 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22021 sizeof (int), flag) != 0) { 22022 err = EFAULT; 22023 } else { 22024 err = 0; 22025 } 22026 22027 } else { 22028 /* 22029 * Drive supports changing block sizes via a Mode 22030 * Select. 22031 */ 22032 err = sr_change_blkmode(dev, cmd, arg, flag); 22033 } 22034 break; 22035 22036 case CDROMGDRVSPEED: 22037 case CDROMSDRVSPEED: 22038 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22039 if (!ISCD(un)) { 22040 err = ENOTTY; 22041 } else if (un->un_f_mmc_cap == TRUE) { 22042 /* 22043 * Note: In the future the driver implementation 22044 * for getting and 22045 * setting cd speed should entail: 22046 * 1) If non-mmc try the Toshiba mode page 22047 * (sr_change_speed) 22048 * 2) If mmc but no support for Real Time Streaming try 22049 * the SET CD SPEED (0xBB) command 22050 * (sr_atapi_change_speed) 22051 * 3) If mmc and support for Real Time Streaming 22052 * try the GET PERFORMANCE and SET STREAMING 22053 * commands (not yet implemented, 4380808) 22054 */ 22055 /* 22056 * As per recent MMC spec, CD-ROM speed is variable 22057 * and changes with LBA. Since there is no such 22058 * things as drive speed now, fail this ioctl. 22059 * 22060 * Note: EINVAL is returned for consistancy of original 22061 * implementation which included support for getting 22062 * the drive speed of mmc devices but not setting 22063 * the drive speed. Thus EINVAL would be returned 22064 * if a set request was made for an mmc device. 22065 * We no longer support get or set speed for 22066 * mmc but need to remain consistent with regard 22067 * to the error code returned. 22068 */ 22069 err = EINVAL; 22070 } else if (un->un_f_cfg_is_atapi == TRUE) { 22071 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22072 } else { 22073 err = sr_change_speed(dev, cmd, arg, flag); 22074 } 22075 break; 22076 22077 case CDROMCDDA: 22078 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22079 if (!ISCD(un)) { 22080 err = ENOTTY; 22081 } else { 22082 err = sr_read_cdda(dev, (void *)arg, flag); 22083 } 22084 break; 22085 22086 case CDROMCDXA: 22087 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22088 if (!ISCD(un)) { 22089 err = ENOTTY; 22090 } else { 22091 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22092 } 22093 break; 22094 22095 case CDROMSUBCODE: 22096 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22097 if (!ISCD(un)) { 22098 err = ENOTTY; 22099 } else { 22100 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22101 } 22102 break; 22103 22104 22105 #ifdef SDDEBUG 22106 /* RESET/ABORTS testing ioctls */ 22107 case DKIOCRESET: { 22108 int reset_level; 22109 22110 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22111 err = EFAULT; 22112 } else { 22113 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22114 "reset_level = 0x%lx\n", reset_level); 22115 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22116 err = 0; 22117 } else { 22118 err = EIO; 22119 } 22120 } 22121 break; 22122 } 22123 22124 case DKIOCABORT: 22125 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22126 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22127 err = 0; 22128 } else { 22129 err = EIO; 22130 } 22131 break; 22132 #endif 22133 22134 #ifdef SD_FAULT_INJECTION 22135 /* SDIOC FaultInjection testing ioctls */ 22136 case SDIOCSTART: 22137 case SDIOCSTOP: 22138 case SDIOCINSERTPKT: 22139 case SDIOCINSERTXB: 22140 case SDIOCINSERTUN: 22141 case SDIOCINSERTARQ: 22142 case SDIOCPUSH: 22143 case SDIOCRETRIEVE: 22144 case SDIOCRUN: 22145 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22146 "SDIOC detected cmd:0x%X:\n", cmd); 22147 /* call error generator */ 22148 sd_faultinjection_ioctl(cmd, arg, un); 22149 err = 0; 22150 break; 22151 22152 #endif /* SD_FAULT_INJECTION */ 22153 22154 case DKIOCFLUSHWRITECACHE: 22155 { 22156 struct dk_callback *dkc = (struct dk_callback *)arg; 22157 22158 mutex_enter(SD_MUTEX(un)); 22159 if (!un->un_f_sync_cache_supported || 22160 !un->un_f_write_cache_enabled) { 22161 err = un->un_f_sync_cache_supported ? 22162 0 : ENOTSUP; 22163 mutex_exit(SD_MUTEX(un)); 22164 if ((flag & FKIOCTL) && dkc != NULL && 22165 dkc->dkc_callback != NULL) { 22166 (*dkc->dkc_callback)(dkc->dkc_cookie, 22167 err); 22168 /* 22169 * Did callback and reported error. 22170 * Since we did a callback, ioctl 22171 * should return 0. 22172 */ 22173 err = 0; 22174 } 22175 break; 22176 } 22177 mutex_exit(SD_MUTEX(un)); 22178 22179 if ((flag & FKIOCTL) && dkc != NULL && 22180 dkc->dkc_callback != NULL) { 22181 /* async SYNC CACHE request */ 22182 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22183 } else { 22184 /* synchronous SYNC CACHE request */ 22185 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22186 } 22187 } 22188 break; 22189 22190 case DKIOCGETWCE: { 22191 22192 int wce; 22193 22194 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22195 break; 22196 } 22197 22198 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22199 err = EFAULT; 22200 } 22201 break; 22202 } 22203 22204 case DKIOCSETWCE: { 22205 22206 int wce, sync_supported; 22207 22208 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22209 err = EFAULT; 22210 break; 22211 } 22212 22213 /* 22214 * Synchronize multiple threads trying to enable 22215 * or disable the cache via the un_f_wcc_cv 22216 * condition variable. 22217 */ 22218 mutex_enter(SD_MUTEX(un)); 22219 22220 /* 22221 * Don't allow the cache to be enabled if the 22222 * config file has it disabled. 22223 */ 22224 if (un->un_f_opt_disable_cache && wce) { 22225 mutex_exit(SD_MUTEX(un)); 22226 err = EINVAL; 22227 break; 22228 } 22229 22230 /* 22231 * Wait for write cache change in progress 22232 * bit to be clear before proceeding. 22233 */ 22234 while (un->un_f_wcc_inprog) 22235 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22236 22237 un->un_f_wcc_inprog = 1; 22238 22239 if (un->un_f_write_cache_enabled && wce == 0) { 22240 /* 22241 * Disable the write cache. Don't clear 22242 * un_f_write_cache_enabled until after 22243 * the mode select and flush are complete. 22244 */ 22245 sync_supported = un->un_f_sync_cache_supported; 22246 22247 /* 22248 * If cache flush is suppressed, we assume that the 22249 * controller firmware will take care of managing the 22250 * write cache for us: no need to explicitly 22251 * disable it. 22252 */ 22253 if (!un->un_f_suppress_cache_flush) { 22254 mutex_exit(SD_MUTEX(un)); 22255 if ((err = sd_cache_control(ssc, 22256 SD_CACHE_NOCHANGE, 22257 SD_CACHE_DISABLE)) == 0 && 22258 sync_supported) { 22259 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22260 NULL); 22261 } 22262 } else { 22263 mutex_exit(SD_MUTEX(un)); 22264 } 22265 22266 mutex_enter(SD_MUTEX(un)); 22267 if (err == 0) { 22268 un->un_f_write_cache_enabled = 0; 22269 } 22270 22271 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22272 /* 22273 * Set un_f_write_cache_enabled first, so there is 22274 * no window where the cache is enabled, but the 22275 * bit says it isn't. 22276 */ 22277 un->un_f_write_cache_enabled = 1; 22278 22279 /* 22280 * If cache flush is suppressed, we assume that the 22281 * controller firmware will take care of managing the 22282 * write cache for us: no need to explicitly 22283 * enable it. 22284 */ 22285 if (!un->un_f_suppress_cache_flush) { 22286 mutex_exit(SD_MUTEX(un)); 22287 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22288 SD_CACHE_ENABLE); 22289 } else { 22290 mutex_exit(SD_MUTEX(un)); 22291 } 22292 22293 mutex_enter(SD_MUTEX(un)); 22294 22295 if (err) { 22296 un->un_f_write_cache_enabled = 0; 22297 } 22298 } 22299 22300 un->un_f_wcc_inprog = 0; 22301 cv_broadcast(&un->un_wcc_cv); 22302 mutex_exit(SD_MUTEX(un)); 22303 break; 22304 } 22305 22306 default: 22307 err = ENOTTY; 22308 break; 22309 } 22310 mutex_enter(SD_MUTEX(un)); 22311 un->un_ncmds_in_driver--; 22312 ASSERT(un->un_ncmds_in_driver >= 0); 22313 mutex_exit(SD_MUTEX(un)); 22314 22315 22316 done_without_assess: 22317 sd_ssc_fini(ssc); 22318 22319 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22320 return (err); 22321 22322 done_with_assess: 22323 mutex_enter(SD_MUTEX(un)); 22324 un->un_ncmds_in_driver--; 22325 ASSERT(un->un_ncmds_in_driver >= 0); 22326 mutex_exit(SD_MUTEX(un)); 22327 22328 done_quick_assess: 22329 if (err != 0) 22330 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22331 /* Uninitialize sd_ssc_t pointer */ 22332 sd_ssc_fini(ssc); 22333 22334 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22335 return (err); 22336 } 22337 22338 22339 /* 22340 * Function: sd_dkio_ctrl_info 22341 * 22342 * Description: This routine is the driver entry point for handling controller 22343 * information ioctl requests (DKIOCINFO). 22344 * 22345 * Arguments: dev - the device number 22346 * arg - pointer to user provided dk_cinfo structure 22347 * specifying the controller type and attributes. 22348 * flag - this argument is a pass through to ddi_copyxxx() 22349 * directly from the mode argument of ioctl(). 22350 * 22351 * Return Code: 0 22352 * EFAULT 22353 * ENXIO 22354 */ 22355 22356 static int 22357 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22358 { 22359 struct sd_lun *un = NULL; 22360 struct dk_cinfo *info; 22361 dev_info_t *pdip; 22362 int lun, tgt; 22363 22364 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22365 return (ENXIO); 22366 } 22367 22368 info = (struct dk_cinfo *) 22369 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22370 22371 switch (un->un_ctype) { 22372 case CTYPE_CDROM: 22373 info->dki_ctype = DKC_CDROM; 22374 break; 22375 default: 22376 info->dki_ctype = DKC_SCSI_CCS; 22377 break; 22378 } 22379 pdip = ddi_get_parent(SD_DEVINFO(un)); 22380 info->dki_cnum = ddi_get_instance(pdip); 22381 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22382 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22383 } else { 22384 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22385 DK_DEVLEN - 1); 22386 } 22387 22388 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22389 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22390 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22391 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22392 22393 /* Unit Information */ 22394 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22395 info->dki_slave = ((tgt << 3) | lun); 22396 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22397 DK_DEVLEN - 1); 22398 info->dki_flags = DKI_FMTVOL; 22399 info->dki_partition = SDPART(dev); 22400 22401 /* Max Transfer size of this device in blocks */ 22402 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22403 info->dki_addr = 0; 22404 info->dki_space = 0; 22405 info->dki_prio = 0; 22406 info->dki_vec = 0; 22407 22408 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22409 kmem_free(info, sizeof (struct dk_cinfo)); 22410 return (EFAULT); 22411 } else { 22412 kmem_free(info, sizeof (struct dk_cinfo)); 22413 return (0); 22414 } 22415 } 22416 22417 22418 /* 22419 * Function: sd_get_media_info 22420 * 22421 * Description: This routine is the driver entry point for handling ioctl 22422 * requests for the media type or command set profile used by the 22423 * drive to operate on the media (DKIOCGMEDIAINFO). 22424 * 22425 * Arguments: dev - the device number 22426 * arg - pointer to user provided dk_minfo structure 22427 * specifying the media type, logical block size and 22428 * drive capacity. 22429 * flag - this argument is a pass through to ddi_copyxxx() 22430 * directly from the mode argument of ioctl(). 22431 * 22432 * Return Code: 0 22433 * EACCESS 22434 * EFAULT 22435 * ENXIO 22436 * EIO 22437 */ 22438 22439 static int 22440 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22441 { 22442 struct sd_lun *un = NULL; 22443 struct uscsi_cmd com; 22444 struct scsi_inquiry *sinq; 22445 struct dk_minfo media_info; 22446 u_longlong_t media_capacity; 22447 uint64_t capacity; 22448 uint_t lbasize; 22449 uchar_t *out_data; 22450 uchar_t *rqbuf; 22451 int rval = 0; 22452 int rtn; 22453 sd_ssc_t *ssc; 22454 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22455 (un->un_state == SD_STATE_OFFLINE)) { 22456 return (ENXIO); 22457 } 22458 22459 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22460 22461 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22462 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22463 22464 /* Issue a TUR to determine if the drive is ready with media present */ 22465 ssc = sd_ssc_init(un); 22466 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22467 if (rval == ENXIO) { 22468 goto done; 22469 } else if (rval != 0) { 22470 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22471 } 22472 22473 /* Now get configuration data */ 22474 if (ISCD(un)) { 22475 media_info.dki_media_type = DK_CDROM; 22476 22477 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22478 if (un->un_f_mmc_cap == TRUE) { 22479 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22480 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22481 SD_PATH_STANDARD); 22482 22483 if (rtn) { 22484 /* 22485 * We ignore all failures for CD and need to 22486 * put the assessment before processing code 22487 * to avoid missing assessment for FMA. 22488 */ 22489 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22490 /* 22491 * Failed for other than an illegal request 22492 * or command not supported 22493 */ 22494 if ((com.uscsi_status == STATUS_CHECK) && 22495 (com.uscsi_rqstatus == STATUS_GOOD)) { 22496 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22497 (rqbuf[12] != 0x20)) { 22498 rval = EIO; 22499 goto no_assessment; 22500 } 22501 } 22502 } else { 22503 /* 22504 * The GET CONFIGURATION command succeeded 22505 * so set the media type according to the 22506 * returned data 22507 */ 22508 media_info.dki_media_type = out_data[6]; 22509 media_info.dki_media_type <<= 8; 22510 media_info.dki_media_type |= out_data[7]; 22511 } 22512 } 22513 } else { 22514 /* 22515 * The profile list is not available, so we attempt to identify 22516 * the media type based on the inquiry data 22517 */ 22518 sinq = un->un_sd->sd_inq; 22519 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22520 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22521 /* This is a direct access device or optical disk */ 22522 media_info.dki_media_type = DK_FIXED_DISK; 22523 22524 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22525 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22526 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22527 media_info.dki_media_type = DK_ZIP; 22528 } else if ( 22529 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22530 media_info.dki_media_type = DK_JAZ; 22531 } 22532 } 22533 } else { 22534 /* 22535 * Not a CD, direct access or optical disk so return 22536 * unknown media 22537 */ 22538 media_info.dki_media_type = DK_UNKNOWN; 22539 } 22540 } 22541 22542 /* Now read the capacity so we can provide the lbasize and capacity */ 22543 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22544 SD_PATH_DIRECT); 22545 switch (rval) { 22546 case 0: 22547 break; 22548 case EACCES: 22549 rval = EACCES; 22550 goto done; 22551 default: 22552 rval = EIO; 22553 goto done; 22554 } 22555 22556 /* 22557 * If lun is expanded dynamically, update the un structure. 22558 */ 22559 mutex_enter(SD_MUTEX(un)); 22560 if ((un->un_f_blockcount_is_valid == TRUE) && 22561 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22562 (capacity > un->un_blockcount)) { 22563 sd_update_block_info(un, lbasize, capacity); 22564 } 22565 mutex_exit(SD_MUTEX(un)); 22566 22567 media_info.dki_lbsize = lbasize; 22568 media_capacity = capacity; 22569 22570 /* 22571 * sd_send_scsi_READ_CAPACITY() reports capacity in 22572 * un->un_sys_blocksize chunks. So we need to convert it into 22573 * cap.lbasize chunks. 22574 */ 22575 media_capacity *= un->un_sys_blocksize; 22576 media_capacity /= lbasize; 22577 media_info.dki_capacity = media_capacity; 22578 22579 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22580 rval = EFAULT; 22581 /* Put goto. Anybody might add some code below in future */ 22582 goto no_assessment; 22583 } 22584 done: 22585 if (rval != 0) { 22586 if (rval == EIO) 22587 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22588 else 22589 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22590 } 22591 no_assessment: 22592 sd_ssc_fini(ssc); 22593 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22594 kmem_free(rqbuf, SENSE_LENGTH); 22595 return (rval); 22596 } 22597 22598 22599 /* 22600 * Function: sd_check_media 22601 * 22602 * Description: This utility routine implements the functionality for the 22603 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22604 * driver state changes from that specified by the user 22605 * (inserted or ejected). For example, if the user specifies 22606 * DKIO_EJECTED and the current media state is inserted this 22607 * routine will immediately return DKIO_INSERTED. However, if the 22608 * current media state is not inserted the user thread will be 22609 * blocked until the drive state changes. If DKIO_NONE is specified 22610 * the user thread will block until a drive state change occurs. 22611 * 22612 * Arguments: dev - the device number 22613 * state - user pointer to a dkio_state, updated with the current 22614 * drive state at return. 22615 * 22616 * Return Code: ENXIO 22617 * EIO 22618 * EAGAIN 22619 * EINTR 22620 */ 22621 22622 static int 22623 sd_check_media(dev_t dev, enum dkio_state state) 22624 { 22625 struct sd_lun *un = NULL; 22626 enum dkio_state prev_state; 22627 opaque_t token = NULL; 22628 int rval = 0; 22629 sd_ssc_t *ssc; 22630 dev_t sub_dev; 22631 22632 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22633 return (ENXIO); 22634 } 22635 22636 /* 22637 * sub_dev is used when submitting request to scsi watch. 22638 * All submissions are unified to use same device number. 22639 */ 22640 sub_dev = sd_make_device(SD_DEVINFO(un)); 22641 22642 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22643 22644 ssc = sd_ssc_init(un); 22645 22646 mutex_enter(SD_MUTEX(un)); 22647 22648 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22649 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22650 22651 prev_state = un->un_mediastate; 22652 22653 /* is there anything to do? */ 22654 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22655 /* 22656 * submit the request to the scsi_watch service; 22657 * scsi_media_watch_cb() does the real work 22658 */ 22659 mutex_exit(SD_MUTEX(un)); 22660 22661 /* 22662 * This change handles the case where a scsi watch request is 22663 * added to a device that is powered down. To accomplish this 22664 * we power up the device before adding the scsi watch request, 22665 * since the scsi watch sends a TUR directly to the device 22666 * which the device cannot handle if it is powered down. 22667 */ 22668 if (sd_pm_entry(un) != DDI_SUCCESS) { 22669 mutex_enter(SD_MUTEX(un)); 22670 goto done; 22671 } 22672 22673 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22674 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22675 (caddr_t)sub_dev); 22676 22677 sd_pm_exit(un); 22678 22679 mutex_enter(SD_MUTEX(un)); 22680 if (token == NULL) { 22681 rval = EAGAIN; 22682 goto done; 22683 } 22684 22685 /* 22686 * This is a special case IOCTL that doesn't return 22687 * until the media state changes. Routine sdpower 22688 * knows about and handles this so don't count it 22689 * as an active cmd in the driver, which would 22690 * keep the device busy to the pm framework. 22691 * If the count isn't decremented the device can't 22692 * be powered down. 22693 */ 22694 un->un_ncmds_in_driver--; 22695 ASSERT(un->un_ncmds_in_driver >= 0); 22696 22697 /* 22698 * if a prior request had been made, this will be the same 22699 * token, as scsi_watch was designed that way. 22700 */ 22701 un->un_swr_token = token; 22702 un->un_specified_mediastate = state; 22703 22704 /* 22705 * now wait for media change 22706 * we will not be signalled unless mediastate == state but it is 22707 * still better to test for this condition, since there is a 22708 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22709 */ 22710 SD_TRACE(SD_LOG_COMMON, un, 22711 "sd_check_media: waiting for media state change\n"); 22712 while (un->un_mediastate == state) { 22713 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22714 SD_TRACE(SD_LOG_COMMON, un, 22715 "sd_check_media: waiting for media state " 22716 "was interrupted\n"); 22717 un->un_ncmds_in_driver++; 22718 rval = EINTR; 22719 goto done; 22720 } 22721 SD_TRACE(SD_LOG_COMMON, un, 22722 "sd_check_media: received signal, state=%x\n", 22723 un->un_mediastate); 22724 } 22725 /* 22726 * Inc the counter to indicate the device once again 22727 * has an active outstanding cmd. 22728 */ 22729 un->un_ncmds_in_driver++; 22730 } 22731 22732 /* invalidate geometry */ 22733 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22734 sr_ejected(un); 22735 } 22736 22737 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22738 uint64_t capacity; 22739 uint_t lbasize; 22740 22741 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22742 mutex_exit(SD_MUTEX(un)); 22743 /* 22744 * Since the following routines use SD_PATH_DIRECT, we must 22745 * call PM directly before the upcoming disk accesses. This 22746 * may cause the disk to be power/spin up. 22747 */ 22748 22749 if (sd_pm_entry(un) == DDI_SUCCESS) { 22750 rval = sd_send_scsi_READ_CAPACITY(ssc, 22751 &capacity, &lbasize, SD_PATH_DIRECT); 22752 if (rval != 0) { 22753 sd_pm_exit(un); 22754 if (rval == EIO) 22755 sd_ssc_assessment(ssc, 22756 SD_FMT_STATUS_CHECK); 22757 else 22758 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22759 mutex_enter(SD_MUTEX(un)); 22760 goto done; 22761 } 22762 } else { 22763 rval = EIO; 22764 mutex_enter(SD_MUTEX(un)); 22765 goto done; 22766 } 22767 mutex_enter(SD_MUTEX(un)); 22768 22769 sd_update_block_info(un, lbasize, capacity); 22770 22771 /* 22772 * Check if the media in the device is writable or not 22773 */ 22774 if (ISCD(un)) { 22775 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22776 } 22777 22778 mutex_exit(SD_MUTEX(un)); 22779 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22780 if ((cmlb_validate(un->un_cmlbhandle, 0, 22781 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22782 sd_set_pstats(un); 22783 SD_TRACE(SD_LOG_IO_PARTITION, un, 22784 "sd_check_media: un:0x%p pstats created and " 22785 "set\n", un); 22786 } 22787 22788 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22789 SD_PATH_DIRECT); 22790 22791 sd_pm_exit(un); 22792 22793 if (rval != 0) { 22794 if (rval == EIO) 22795 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22796 else 22797 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22798 } 22799 22800 mutex_enter(SD_MUTEX(un)); 22801 } 22802 done: 22803 sd_ssc_fini(ssc); 22804 un->un_f_watcht_stopped = FALSE; 22805 if (token != NULL && un->un_swr_token != NULL) { 22806 /* 22807 * Use of this local token and the mutex ensures that we avoid 22808 * some race conditions associated with terminating the 22809 * scsi watch. 22810 */ 22811 token = un->un_swr_token; 22812 mutex_exit(SD_MUTEX(un)); 22813 (void) scsi_watch_request_terminate(token, 22814 SCSI_WATCH_TERMINATE_WAIT); 22815 if (scsi_watch_get_ref_count(token) == 0) { 22816 mutex_enter(SD_MUTEX(un)); 22817 un->un_swr_token = (opaque_t)NULL; 22818 } else { 22819 mutex_enter(SD_MUTEX(un)); 22820 } 22821 } 22822 22823 /* 22824 * Update the capacity kstat value, if no media previously 22825 * (capacity kstat is 0) and a media has been inserted 22826 * (un_f_blockcount_is_valid == TRUE) 22827 */ 22828 if (un->un_errstats) { 22829 struct sd_errstats *stp = NULL; 22830 22831 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22832 if ((stp->sd_capacity.value.ui64 == 0) && 22833 (un->un_f_blockcount_is_valid == TRUE)) { 22834 stp->sd_capacity.value.ui64 = 22835 (uint64_t)((uint64_t)un->un_blockcount * 22836 un->un_sys_blocksize); 22837 } 22838 } 22839 mutex_exit(SD_MUTEX(un)); 22840 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22841 return (rval); 22842 } 22843 22844 22845 /* 22846 * Function: sd_delayed_cv_broadcast 22847 * 22848 * Description: Delayed cv_broadcast to allow for target to recover from media 22849 * insertion. 22850 * 22851 * Arguments: arg - driver soft state (unit) structure 22852 */ 22853 22854 static void 22855 sd_delayed_cv_broadcast(void *arg) 22856 { 22857 struct sd_lun *un = arg; 22858 22859 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22860 22861 mutex_enter(SD_MUTEX(un)); 22862 un->un_dcvb_timeid = NULL; 22863 cv_broadcast(&un->un_state_cv); 22864 mutex_exit(SD_MUTEX(un)); 22865 } 22866 22867 22868 /* 22869 * Function: sd_media_watch_cb 22870 * 22871 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22872 * routine processes the TUR sense data and updates the driver 22873 * state if a transition has occurred. The user thread 22874 * (sd_check_media) is then signalled. 22875 * 22876 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22877 * among multiple watches that share this callback function 22878 * resultp - scsi watch facility result packet containing scsi 22879 * packet, status byte and sense data 22880 * 22881 * Return Code: 0 for success, -1 for failure 22882 */ 22883 22884 static int 22885 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22886 { 22887 struct sd_lun *un; 22888 struct scsi_status *statusp = resultp->statusp; 22889 uint8_t *sensep = (uint8_t *)resultp->sensep; 22890 enum dkio_state state = DKIO_NONE; 22891 dev_t dev = (dev_t)arg; 22892 uchar_t actual_sense_length; 22893 uint8_t skey, asc, ascq; 22894 22895 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22896 return (-1); 22897 } 22898 actual_sense_length = resultp->actual_sense_length; 22899 22900 mutex_enter(SD_MUTEX(un)); 22901 SD_TRACE(SD_LOG_COMMON, un, 22902 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22903 *((char *)statusp), (void *)sensep, actual_sense_length); 22904 22905 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22906 un->un_mediastate = DKIO_DEV_GONE; 22907 cv_broadcast(&un->un_state_cv); 22908 mutex_exit(SD_MUTEX(un)); 22909 22910 return (0); 22911 } 22912 22913 /* 22914 * If there was a check condition then sensep points to valid sense data 22915 * If status was not a check condition but a reservation or busy status 22916 * then the new state is DKIO_NONE 22917 */ 22918 if (sensep != NULL) { 22919 skey = scsi_sense_key(sensep); 22920 asc = scsi_sense_asc(sensep); 22921 ascq = scsi_sense_ascq(sensep); 22922 22923 SD_INFO(SD_LOG_COMMON, un, 22924 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22925 skey, asc, ascq); 22926 /* This routine only uses up to 13 bytes of sense data. */ 22927 if (actual_sense_length >= 13) { 22928 if (skey == KEY_UNIT_ATTENTION) { 22929 if (asc == 0x28) { 22930 state = DKIO_INSERTED; 22931 } 22932 } else if (skey == KEY_NOT_READY) { 22933 /* 22934 * Sense data of 02/06/00 means that the 22935 * drive could not read the media (No 22936 * reference position found). In this case 22937 * to prevent a hang on the DKIOCSTATE IOCTL 22938 * we set the media state to DKIO_INSERTED. 22939 */ 22940 if (asc == 0x06 && ascq == 0x00) 22941 state = DKIO_INSERTED; 22942 22943 /* 22944 * if 02/04/02 means that the host 22945 * should send start command. Explicitly 22946 * leave the media state as is 22947 * (inserted) as the media is inserted 22948 * and host has stopped device for PM 22949 * reasons. Upon next true read/write 22950 * to this media will bring the 22951 * device to the right state good for 22952 * media access. 22953 */ 22954 if (asc == 0x3a) { 22955 state = DKIO_EJECTED; 22956 } else { 22957 /* 22958 * If the drive is busy with an 22959 * operation or long write, keep the 22960 * media in an inserted state. 22961 */ 22962 22963 if ((asc == 0x04) && 22964 ((ascq == 0x02) || 22965 (ascq == 0x07) || 22966 (ascq == 0x08))) { 22967 state = DKIO_INSERTED; 22968 } 22969 } 22970 } else if (skey == KEY_NO_SENSE) { 22971 if ((asc == 0x00) && (ascq == 0x00)) { 22972 /* 22973 * Sense Data 00/00/00 does not provide 22974 * any information about the state of 22975 * the media. Ignore it. 22976 */ 22977 mutex_exit(SD_MUTEX(un)); 22978 return (0); 22979 } 22980 } 22981 } 22982 } else if ((*((char *)statusp) == STATUS_GOOD) && 22983 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22984 state = DKIO_INSERTED; 22985 } 22986 22987 SD_TRACE(SD_LOG_COMMON, un, 22988 "sd_media_watch_cb: state=%x, specified=%x\n", 22989 state, un->un_specified_mediastate); 22990 22991 /* 22992 * now signal the waiting thread if this is *not* the specified state; 22993 * delay the signal if the state is DKIO_INSERTED to allow the target 22994 * to recover 22995 */ 22996 if (state != un->un_specified_mediastate) { 22997 un->un_mediastate = state; 22998 if (state == DKIO_INSERTED) { 22999 /* 23000 * delay the signal to give the drive a chance 23001 * to do what it apparently needs to do 23002 */ 23003 SD_TRACE(SD_LOG_COMMON, un, 23004 "sd_media_watch_cb: delayed cv_broadcast\n"); 23005 if (un->un_dcvb_timeid == NULL) { 23006 un->un_dcvb_timeid = 23007 timeout(sd_delayed_cv_broadcast, un, 23008 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23009 } 23010 } else { 23011 SD_TRACE(SD_LOG_COMMON, un, 23012 "sd_media_watch_cb: immediate cv_broadcast\n"); 23013 cv_broadcast(&un->un_state_cv); 23014 } 23015 } 23016 mutex_exit(SD_MUTEX(un)); 23017 return (0); 23018 } 23019 23020 23021 /* 23022 * Function: sd_dkio_get_temp 23023 * 23024 * Description: This routine is the driver entry point for handling ioctl 23025 * requests to get the disk temperature. 23026 * 23027 * Arguments: dev - the device number 23028 * arg - pointer to user provided dk_temperature structure. 23029 * flag - this argument is a pass through to ddi_copyxxx() 23030 * directly from the mode argument of ioctl(). 23031 * 23032 * Return Code: 0 23033 * EFAULT 23034 * ENXIO 23035 * EAGAIN 23036 */ 23037 23038 static int 23039 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23040 { 23041 struct sd_lun *un = NULL; 23042 struct dk_temperature *dktemp = NULL; 23043 uchar_t *temperature_page; 23044 int rval = 0; 23045 int path_flag = SD_PATH_STANDARD; 23046 sd_ssc_t *ssc; 23047 23048 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23049 return (ENXIO); 23050 } 23051 23052 ssc = sd_ssc_init(un); 23053 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23054 23055 /* copyin the disk temp argument to get the user flags */ 23056 if (ddi_copyin((void *)arg, dktemp, 23057 sizeof (struct dk_temperature), flag) != 0) { 23058 rval = EFAULT; 23059 goto done; 23060 } 23061 23062 /* Initialize the temperature to invalid. */ 23063 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23064 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23065 23066 /* 23067 * Note: Investigate removing the "bypass pm" semantic. 23068 * Can we just bypass PM always? 23069 */ 23070 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23071 path_flag = SD_PATH_DIRECT; 23072 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23073 mutex_enter(&un->un_pm_mutex); 23074 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23075 /* 23076 * If DKT_BYPASS_PM is set, and the drive happens to be 23077 * in low power mode, we can not wake it up, Need to 23078 * return EAGAIN. 23079 */ 23080 mutex_exit(&un->un_pm_mutex); 23081 rval = EAGAIN; 23082 goto done; 23083 } else { 23084 /* 23085 * Indicate to PM the device is busy. This is required 23086 * to avoid a race - i.e. the ioctl is issuing a 23087 * command and the pm framework brings down the device 23088 * to low power mode (possible power cut-off on some 23089 * platforms). 23090 */ 23091 mutex_exit(&un->un_pm_mutex); 23092 if (sd_pm_entry(un) != DDI_SUCCESS) { 23093 rval = EAGAIN; 23094 goto done; 23095 } 23096 } 23097 } 23098 23099 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23100 23101 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23102 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23103 if (rval != 0) 23104 goto done2; 23105 23106 /* 23107 * For the current temperature verify that the parameter length is 0x02 23108 * and the parameter code is 0x00 23109 */ 23110 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23111 (temperature_page[5] == 0x00)) { 23112 if (temperature_page[9] == 0xFF) { 23113 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23114 } else { 23115 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23116 } 23117 } 23118 23119 /* 23120 * For the reference temperature verify that the parameter 23121 * length is 0x02 and the parameter code is 0x01 23122 */ 23123 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23124 (temperature_page[11] == 0x01)) { 23125 if (temperature_page[15] == 0xFF) { 23126 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23127 } else { 23128 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23129 } 23130 } 23131 23132 /* Do the copyout regardless of the temperature commands status. */ 23133 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23134 flag) != 0) { 23135 rval = EFAULT; 23136 goto done1; 23137 } 23138 23139 done2: 23140 if (rval != 0) { 23141 if (rval == EIO) 23142 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23143 else 23144 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23145 } 23146 done1: 23147 if (path_flag == SD_PATH_DIRECT) { 23148 sd_pm_exit(un); 23149 } 23150 23151 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23152 done: 23153 sd_ssc_fini(ssc); 23154 if (dktemp != NULL) { 23155 kmem_free(dktemp, sizeof (struct dk_temperature)); 23156 } 23157 23158 return (rval); 23159 } 23160 23161 23162 /* 23163 * Function: sd_log_page_supported 23164 * 23165 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23166 * supported log pages. 23167 * 23168 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23169 * structure for this target. 23170 * log_page - 23171 * 23172 * Return Code: -1 - on error (log sense is optional and may not be supported). 23173 * 0 - log page not found. 23174 * 1 - log page found. 23175 */ 23176 23177 static int 23178 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23179 { 23180 uchar_t *log_page_data; 23181 int i; 23182 int match = 0; 23183 int log_size; 23184 int status = 0; 23185 struct sd_lun *un; 23186 23187 ASSERT(ssc != NULL); 23188 un = ssc->ssc_un; 23189 ASSERT(un != NULL); 23190 23191 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23192 23193 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23194 SD_PATH_DIRECT); 23195 23196 if (status != 0) { 23197 if (status == EIO) { 23198 /* 23199 * Some disks do not support log sense, we 23200 * should ignore this kind of error(sense key is 23201 * 0x5 - illegal request). 23202 */ 23203 uint8_t *sensep; 23204 int senlen; 23205 23206 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23207 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23208 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23209 23210 if (senlen > 0 && 23211 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23212 sd_ssc_assessment(ssc, 23213 SD_FMT_IGNORE_COMPROMISE); 23214 } else { 23215 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23216 } 23217 } else { 23218 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23219 } 23220 23221 SD_ERROR(SD_LOG_COMMON, un, 23222 "sd_log_page_supported: failed log page retrieval\n"); 23223 kmem_free(log_page_data, 0xFF); 23224 return (-1); 23225 } 23226 23227 log_size = log_page_data[3]; 23228 23229 /* 23230 * The list of supported log pages start from the fourth byte. Check 23231 * until we run out of log pages or a match is found. 23232 */ 23233 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23234 if (log_page_data[i] == log_page) { 23235 match++; 23236 } 23237 } 23238 kmem_free(log_page_data, 0xFF); 23239 return (match); 23240 } 23241 23242 23243 /* 23244 * Function: sd_mhdioc_failfast 23245 * 23246 * Description: This routine is the driver entry point for handling ioctl 23247 * requests to enable/disable the multihost failfast option. 23248 * (MHIOCENFAILFAST) 23249 * 23250 * Arguments: dev - the device number 23251 * arg - user specified probing interval. 23252 * flag - this argument is a pass through to ddi_copyxxx() 23253 * directly from the mode argument of ioctl(). 23254 * 23255 * Return Code: 0 23256 * EFAULT 23257 * ENXIO 23258 */ 23259 23260 static int 23261 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23262 { 23263 struct sd_lun *un = NULL; 23264 int mh_time; 23265 int rval = 0; 23266 23267 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23268 return (ENXIO); 23269 } 23270 23271 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23272 return (EFAULT); 23273 23274 if (mh_time) { 23275 mutex_enter(SD_MUTEX(un)); 23276 un->un_resvd_status |= SD_FAILFAST; 23277 mutex_exit(SD_MUTEX(un)); 23278 /* 23279 * If mh_time is INT_MAX, then this ioctl is being used for 23280 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23281 */ 23282 if (mh_time != INT_MAX) { 23283 rval = sd_check_mhd(dev, mh_time); 23284 } 23285 } else { 23286 (void) sd_check_mhd(dev, 0); 23287 mutex_enter(SD_MUTEX(un)); 23288 un->un_resvd_status &= ~SD_FAILFAST; 23289 mutex_exit(SD_MUTEX(un)); 23290 } 23291 return (rval); 23292 } 23293 23294 23295 /* 23296 * Function: sd_mhdioc_takeown 23297 * 23298 * Description: This routine is the driver entry point for handling ioctl 23299 * requests to forcefully acquire exclusive access rights to the 23300 * multihost disk (MHIOCTKOWN). 23301 * 23302 * Arguments: dev - the device number 23303 * arg - user provided structure specifying the delay 23304 * parameters in milliseconds 23305 * flag - this argument is a pass through to ddi_copyxxx() 23306 * directly from the mode argument of ioctl(). 23307 * 23308 * Return Code: 0 23309 * EFAULT 23310 * ENXIO 23311 */ 23312 23313 static int 23314 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23315 { 23316 struct sd_lun *un = NULL; 23317 struct mhioctkown *tkown = NULL; 23318 int rval = 0; 23319 23320 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23321 return (ENXIO); 23322 } 23323 23324 if (arg != NULL) { 23325 tkown = (struct mhioctkown *) 23326 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23327 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23328 if (rval != 0) { 23329 rval = EFAULT; 23330 goto error; 23331 } 23332 } 23333 23334 rval = sd_take_ownership(dev, tkown); 23335 mutex_enter(SD_MUTEX(un)); 23336 if (rval == 0) { 23337 un->un_resvd_status |= SD_RESERVE; 23338 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23339 sd_reinstate_resv_delay = 23340 tkown->reinstate_resv_delay * 1000; 23341 } else { 23342 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23343 } 23344 /* 23345 * Give the scsi_watch routine interval set by 23346 * the MHIOCENFAILFAST ioctl precedence here. 23347 */ 23348 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23349 mutex_exit(SD_MUTEX(un)); 23350 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23351 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23352 "sd_mhdioc_takeown : %d\n", 23353 sd_reinstate_resv_delay); 23354 } else { 23355 mutex_exit(SD_MUTEX(un)); 23356 } 23357 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23358 sd_mhd_reset_notify_cb, (caddr_t)un); 23359 } else { 23360 un->un_resvd_status &= ~SD_RESERVE; 23361 mutex_exit(SD_MUTEX(un)); 23362 } 23363 23364 error: 23365 if (tkown != NULL) { 23366 kmem_free(tkown, sizeof (struct mhioctkown)); 23367 } 23368 return (rval); 23369 } 23370 23371 23372 /* 23373 * Function: sd_mhdioc_release 23374 * 23375 * Description: This routine is the driver entry point for handling ioctl 23376 * requests to release exclusive access rights to the multihost 23377 * disk (MHIOCRELEASE). 23378 * 23379 * Arguments: dev - the device number 23380 * 23381 * Return Code: 0 23382 * ENXIO 23383 */ 23384 23385 static int 23386 sd_mhdioc_release(dev_t dev) 23387 { 23388 struct sd_lun *un = NULL; 23389 timeout_id_t resvd_timeid_save; 23390 int resvd_status_save; 23391 int rval = 0; 23392 23393 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23394 return (ENXIO); 23395 } 23396 23397 mutex_enter(SD_MUTEX(un)); 23398 resvd_status_save = un->un_resvd_status; 23399 un->un_resvd_status &= 23400 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23401 if (un->un_resvd_timeid) { 23402 resvd_timeid_save = un->un_resvd_timeid; 23403 un->un_resvd_timeid = NULL; 23404 mutex_exit(SD_MUTEX(un)); 23405 (void) untimeout(resvd_timeid_save); 23406 } else { 23407 mutex_exit(SD_MUTEX(un)); 23408 } 23409 23410 /* 23411 * destroy any pending timeout thread that may be attempting to 23412 * reinstate reservation on this device. 23413 */ 23414 sd_rmv_resv_reclaim_req(dev); 23415 23416 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23417 mutex_enter(SD_MUTEX(un)); 23418 if ((un->un_mhd_token) && 23419 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23420 mutex_exit(SD_MUTEX(un)); 23421 (void) sd_check_mhd(dev, 0); 23422 } else { 23423 mutex_exit(SD_MUTEX(un)); 23424 } 23425 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23426 sd_mhd_reset_notify_cb, (caddr_t)un); 23427 } else { 23428 /* 23429 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23430 */ 23431 mutex_enter(SD_MUTEX(un)); 23432 un->un_resvd_status = resvd_status_save; 23433 mutex_exit(SD_MUTEX(un)); 23434 } 23435 return (rval); 23436 } 23437 23438 23439 /* 23440 * Function: sd_mhdioc_register_devid 23441 * 23442 * Description: This routine is the driver entry point for handling ioctl 23443 * requests to register the device id (MHIOCREREGISTERDEVID). 23444 * 23445 * Note: The implementation for this ioctl has been updated to 23446 * be consistent with the original PSARC case (1999/357) 23447 * (4375899, 4241671, 4220005) 23448 * 23449 * Arguments: dev - the device number 23450 * 23451 * Return Code: 0 23452 * ENXIO 23453 */ 23454 23455 static int 23456 sd_mhdioc_register_devid(dev_t dev) 23457 { 23458 struct sd_lun *un = NULL; 23459 int rval = 0; 23460 sd_ssc_t *ssc; 23461 23462 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23463 return (ENXIO); 23464 } 23465 23466 ASSERT(!mutex_owned(SD_MUTEX(un))); 23467 23468 mutex_enter(SD_MUTEX(un)); 23469 23470 /* If a devid already exists, de-register it */ 23471 if (un->un_devid != NULL) { 23472 ddi_devid_unregister(SD_DEVINFO(un)); 23473 /* 23474 * After unregister devid, needs to free devid memory 23475 */ 23476 ddi_devid_free(un->un_devid); 23477 un->un_devid = NULL; 23478 } 23479 23480 /* Check for reservation conflict */ 23481 mutex_exit(SD_MUTEX(un)); 23482 ssc = sd_ssc_init(un); 23483 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23484 mutex_enter(SD_MUTEX(un)); 23485 23486 switch (rval) { 23487 case 0: 23488 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23489 break; 23490 case EACCES: 23491 break; 23492 default: 23493 rval = EIO; 23494 } 23495 23496 mutex_exit(SD_MUTEX(un)); 23497 if (rval != 0) { 23498 if (rval == EIO) 23499 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23500 else 23501 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23502 } 23503 sd_ssc_fini(ssc); 23504 return (rval); 23505 } 23506 23507 23508 /* 23509 * Function: sd_mhdioc_inkeys 23510 * 23511 * Description: This routine is the driver entry point for handling ioctl 23512 * requests to issue the SCSI-3 Persistent In Read Keys command 23513 * to the device (MHIOCGRP_INKEYS). 23514 * 23515 * Arguments: dev - the device number 23516 * arg - user provided in_keys structure 23517 * flag - this argument is a pass through to ddi_copyxxx() 23518 * directly from the mode argument of ioctl(). 23519 * 23520 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23521 * ENXIO 23522 * EFAULT 23523 */ 23524 23525 static int 23526 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23527 { 23528 struct sd_lun *un; 23529 mhioc_inkeys_t inkeys; 23530 int rval = 0; 23531 23532 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23533 return (ENXIO); 23534 } 23535 23536 #ifdef _MULTI_DATAMODEL 23537 switch (ddi_model_convert_from(flag & FMODELS)) { 23538 case DDI_MODEL_ILP32: { 23539 struct mhioc_inkeys32 inkeys32; 23540 23541 if (ddi_copyin(arg, &inkeys32, 23542 sizeof (struct mhioc_inkeys32), flag) != 0) { 23543 return (EFAULT); 23544 } 23545 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23546 if ((rval = sd_persistent_reservation_in_read_keys(un, 23547 &inkeys, flag)) != 0) { 23548 return (rval); 23549 } 23550 inkeys32.generation = inkeys.generation; 23551 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23552 flag) != 0) { 23553 return (EFAULT); 23554 } 23555 break; 23556 } 23557 case DDI_MODEL_NONE: 23558 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23559 flag) != 0) { 23560 return (EFAULT); 23561 } 23562 if ((rval = sd_persistent_reservation_in_read_keys(un, 23563 &inkeys, flag)) != 0) { 23564 return (rval); 23565 } 23566 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23567 flag) != 0) { 23568 return (EFAULT); 23569 } 23570 break; 23571 } 23572 23573 #else /* ! _MULTI_DATAMODEL */ 23574 23575 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23576 return (EFAULT); 23577 } 23578 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23579 if (rval != 0) { 23580 return (rval); 23581 } 23582 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23583 return (EFAULT); 23584 } 23585 23586 #endif /* _MULTI_DATAMODEL */ 23587 23588 return (rval); 23589 } 23590 23591 23592 /* 23593 * Function: sd_mhdioc_inresv 23594 * 23595 * Description: This routine is the driver entry point for handling ioctl 23596 * requests to issue the SCSI-3 Persistent In Read Reservations 23597 * command to the device (MHIOCGRP_INKEYS). 23598 * 23599 * Arguments: dev - the device number 23600 * arg - user provided in_resv structure 23601 * flag - this argument is a pass through to ddi_copyxxx() 23602 * directly from the mode argument of ioctl(). 23603 * 23604 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23605 * ENXIO 23606 * EFAULT 23607 */ 23608 23609 static int 23610 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23611 { 23612 struct sd_lun *un; 23613 mhioc_inresvs_t inresvs; 23614 int rval = 0; 23615 23616 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23617 return (ENXIO); 23618 } 23619 23620 #ifdef _MULTI_DATAMODEL 23621 23622 switch (ddi_model_convert_from(flag & FMODELS)) { 23623 case DDI_MODEL_ILP32: { 23624 struct mhioc_inresvs32 inresvs32; 23625 23626 if (ddi_copyin(arg, &inresvs32, 23627 sizeof (struct mhioc_inresvs32), flag) != 0) { 23628 return (EFAULT); 23629 } 23630 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23631 if ((rval = sd_persistent_reservation_in_read_resv(un, 23632 &inresvs, flag)) != 0) { 23633 return (rval); 23634 } 23635 inresvs32.generation = inresvs.generation; 23636 if (ddi_copyout(&inresvs32, arg, 23637 sizeof (struct mhioc_inresvs32), flag) != 0) { 23638 return (EFAULT); 23639 } 23640 break; 23641 } 23642 case DDI_MODEL_NONE: 23643 if (ddi_copyin(arg, &inresvs, 23644 sizeof (mhioc_inresvs_t), flag) != 0) { 23645 return (EFAULT); 23646 } 23647 if ((rval = sd_persistent_reservation_in_read_resv(un, 23648 &inresvs, flag)) != 0) { 23649 return (rval); 23650 } 23651 if (ddi_copyout(&inresvs, arg, 23652 sizeof (mhioc_inresvs_t), flag) != 0) { 23653 return (EFAULT); 23654 } 23655 break; 23656 } 23657 23658 #else /* ! _MULTI_DATAMODEL */ 23659 23660 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23661 return (EFAULT); 23662 } 23663 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23664 if (rval != 0) { 23665 return (rval); 23666 } 23667 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23668 return (EFAULT); 23669 } 23670 23671 #endif /* ! _MULTI_DATAMODEL */ 23672 23673 return (rval); 23674 } 23675 23676 23677 /* 23678 * The following routines support the clustering functionality described below 23679 * and implement lost reservation reclaim functionality. 23680 * 23681 * Clustering 23682 * ---------- 23683 * The clustering code uses two different, independent forms of SCSI 23684 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23685 * Persistent Group Reservations. For any particular disk, it will use either 23686 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23687 * 23688 * SCSI-2 23689 * The cluster software takes ownership of a multi-hosted disk by issuing the 23690 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23691 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23692 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23693 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23694 * driver. The meaning of failfast is that if the driver (on this host) ever 23695 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23696 * it should immediately panic the host. The motivation for this ioctl is that 23697 * if this host does encounter reservation conflict, the underlying cause is 23698 * that some other host of the cluster has decided that this host is no longer 23699 * in the cluster and has seized control of the disks for itself. Since this 23700 * host is no longer in the cluster, it ought to panic itself. The 23701 * MHIOCENFAILFAST ioctl does two things: 23702 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23703 * error to panic the host 23704 * (b) it sets up a periodic timer to test whether this host still has 23705 * "access" (in that no other host has reserved the device): if the 23706 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23707 * purpose of that periodic timer is to handle scenarios where the host is 23708 * otherwise temporarily quiescent, temporarily doing no real i/o. 23709 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23710 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23711 * the device itself. 23712 * 23713 * SCSI-3 PGR 23714 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23715 * facility is supported through the shared multihost disk ioctls 23716 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23717 * MHIOCGRP_PREEMPTANDABORT) 23718 * 23719 * Reservation Reclaim: 23720 * -------------------- 23721 * To support the lost reservation reclaim operations this driver creates a 23722 * single thread to handle reinstating reservations on all devices that have 23723 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23724 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23725 * and the reservation reclaim thread loops through the requests to regain the 23726 * lost reservations. 23727 */ 23728 23729 /* 23730 * Function: sd_check_mhd() 23731 * 23732 * Description: This function sets up and submits a scsi watch request or 23733 * terminates an existing watch request. This routine is used in 23734 * support of reservation reclaim. 23735 * 23736 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23737 * among multiple watches that share the callback function 23738 * interval - the number of microseconds specifying the watch 23739 * interval for issuing TEST UNIT READY commands. If 23740 * set to 0 the watch should be terminated. If the 23741 * interval is set to 0 and if the device is required 23742 * to hold reservation while disabling failfast, the 23743 * watch is restarted with an interval of 23744 * reinstate_resv_delay. 23745 * 23746 * Return Code: 0 - Successful submit/terminate of scsi watch request 23747 * ENXIO - Indicates an invalid device was specified 23748 * EAGAIN - Unable to submit the scsi watch request 23749 */ 23750 23751 static int 23752 sd_check_mhd(dev_t dev, int interval) 23753 { 23754 struct sd_lun *un; 23755 opaque_t token; 23756 23757 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23758 return (ENXIO); 23759 } 23760 23761 /* is this a watch termination request? */ 23762 if (interval == 0) { 23763 mutex_enter(SD_MUTEX(un)); 23764 /* if there is an existing watch task then terminate it */ 23765 if (un->un_mhd_token) { 23766 token = un->un_mhd_token; 23767 un->un_mhd_token = NULL; 23768 mutex_exit(SD_MUTEX(un)); 23769 (void) scsi_watch_request_terminate(token, 23770 SCSI_WATCH_TERMINATE_ALL_WAIT); 23771 mutex_enter(SD_MUTEX(un)); 23772 } else { 23773 mutex_exit(SD_MUTEX(un)); 23774 /* 23775 * Note: If we return here we don't check for the 23776 * failfast case. This is the original legacy 23777 * implementation but perhaps we should be checking 23778 * the failfast case. 23779 */ 23780 return (0); 23781 } 23782 /* 23783 * If the device is required to hold reservation while 23784 * disabling failfast, we need to restart the scsi_watch 23785 * routine with an interval of reinstate_resv_delay. 23786 */ 23787 if (un->un_resvd_status & SD_RESERVE) { 23788 interval = sd_reinstate_resv_delay/1000; 23789 } else { 23790 /* no failfast so bail */ 23791 mutex_exit(SD_MUTEX(un)); 23792 return (0); 23793 } 23794 mutex_exit(SD_MUTEX(un)); 23795 } 23796 23797 /* 23798 * adjust minimum time interval to 1 second, 23799 * and convert from msecs to usecs 23800 */ 23801 if (interval > 0 && interval < 1000) { 23802 interval = 1000; 23803 } 23804 interval *= 1000; 23805 23806 /* 23807 * submit the request to the scsi_watch service 23808 */ 23809 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23810 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23811 if (token == NULL) { 23812 return (EAGAIN); 23813 } 23814 23815 /* 23816 * save token for termination later on 23817 */ 23818 mutex_enter(SD_MUTEX(un)); 23819 un->un_mhd_token = token; 23820 mutex_exit(SD_MUTEX(un)); 23821 return (0); 23822 } 23823 23824 23825 /* 23826 * Function: sd_mhd_watch_cb() 23827 * 23828 * Description: This function is the call back function used by the scsi watch 23829 * facility. The scsi watch facility sends the "Test Unit Ready" 23830 * and processes the status. If applicable (i.e. a "Unit Attention" 23831 * status and automatic "Request Sense" not used) the scsi watch 23832 * facility will send a "Request Sense" and retrieve the sense data 23833 * to be passed to this callback function. In either case the 23834 * automatic "Request Sense" or the facility submitting one, this 23835 * callback is passed the status and sense data. 23836 * 23837 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23838 * among multiple watches that share this callback function 23839 * resultp - scsi watch facility result packet containing scsi 23840 * packet, status byte and sense data 23841 * 23842 * Return Code: 0 - continue the watch task 23843 * non-zero - terminate the watch task 23844 */ 23845 23846 static int 23847 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23848 { 23849 struct sd_lun *un; 23850 struct scsi_status *statusp; 23851 uint8_t *sensep; 23852 struct scsi_pkt *pkt; 23853 uchar_t actual_sense_length; 23854 dev_t dev = (dev_t)arg; 23855 23856 ASSERT(resultp != NULL); 23857 statusp = resultp->statusp; 23858 sensep = (uint8_t *)resultp->sensep; 23859 pkt = resultp->pkt; 23860 actual_sense_length = resultp->actual_sense_length; 23861 23862 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23863 return (ENXIO); 23864 } 23865 23866 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23867 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23868 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23869 23870 /* Begin processing of the status and/or sense data */ 23871 if (pkt->pkt_reason != CMD_CMPLT) { 23872 /* Handle the incomplete packet */ 23873 sd_mhd_watch_incomplete(un, pkt); 23874 return (0); 23875 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23876 if (*((unsigned char *)statusp) 23877 == STATUS_RESERVATION_CONFLICT) { 23878 /* 23879 * Handle a reservation conflict by panicking if 23880 * configured for failfast or by logging the conflict 23881 * and updating the reservation status 23882 */ 23883 mutex_enter(SD_MUTEX(un)); 23884 if ((un->un_resvd_status & SD_FAILFAST) && 23885 (sd_failfast_enable)) { 23886 sd_panic_for_res_conflict(un); 23887 /*NOTREACHED*/ 23888 } 23889 SD_INFO(SD_LOG_IOCTL_MHD, un, 23890 "sd_mhd_watch_cb: Reservation Conflict\n"); 23891 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23892 mutex_exit(SD_MUTEX(un)); 23893 } 23894 } 23895 23896 if (sensep != NULL) { 23897 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23898 mutex_enter(SD_MUTEX(un)); 23899 if ((scsi_sense_asc(sensep) == 23900 SD_SCSI_RESET_SENSE_CODE) && 23901 (un->un_resvd_status & SD_RESERVE)) { 23902 /* 23903 * The additional sense code indicates a power 23904 * on or bus device reset has occurred; update 23905 * the reservation status. 23906 */ 23907 un->un_resvd_status |= 23908 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23909 SD_INFO(SD_LOG_IOCTL_MHD, un, 23910 "sd_mhd_watch_cb: Lost Reservation\n"); 23911 } 23912 } else { 23913 return (0); 23914 } 23915 } else { 23916 mutex_enter(SD_MUTEX(un)); 23917 } 23918 23919 if ((un->un_resvd_status & SD_RESERVE) && 23920 (un->un_resvd_status & SD_LOST_RESERVE)) { 23921 if (un->un_resvd_status & SD_WANT_RESERVE) { 23922 /* 23923 * A reset occurred in between the last probe and this 23924 * one so if a timeout is pending cancel it. 23925 */ 23926 if (un->un_resvd_timeid) { 23927 timeout_id_t temp_id = un->un_resvd_timeid; 23928 un->un_resvd_timeid = NULL; 23929 mutex_exit(SD_MUTEX(un)); 23930 (void) untimeout(temp_id); 23931 mutex_enter(SD_MUTEX(un)); 23932 } 23933 un->un_resvd_status &= ~SD_WANT_RESERVE; 23934 } 23935 if (un->un_resvd_timeid == 0) { 23936 /* Schedule a timeout to handle the lost reservation */ 23937 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23938 (void *)dev, 23939 drv_usectohz(sd_reinstate_resv_delay)); 23940 } 23941 } 23942 mutex_exit(SD_MUTEX(un)); 23943 return (0); 23944 } 23945 23946 23947 /* 23948 * Function: sd_mhd_watch_incomplete() 23949 * 23950 * Description: This function is used to find out why a scsi pkt sent by the 23951 * scsi watch facility was not completed. Under some scenarios this 23952 * routine will return. Otherwise it will send a bus reset to see 23953 * if the drive is still online. 23954 * 23955 * Arguments: un - driver soft state (unit) structure 23956 * pkt - incomplete scsi pkt 23957 */ 23958 23959 static void 23960 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23961 { 23962 int be_chatty; 23963 int perr; 23964 23965 ASSERT(pkt != NULL); 23966 ASSERT(un != NULL); 23967 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23968 perr = (pkt->pkt_statistics & STAT_PERR); 23969 23970 mutex_enter(SD_MUTEX(un)); 23971 if (un->un_state == SD_STATE_DUMPING) { 23972 mutex_exit(SD_MUTEX(un)); 23973 return; 23974 } 23975 23976 switch (pkt->pkt_reason) { 23977 case CMD_UNX_BUS_FREE: 23978 /* 23979 * If we had a parity error that caused the target to drop BSY*, 23980 * don't be chatty about it. 23981 */ 23982 if (perr && be_chatty) { 23983 be_chatty = 0; 23984 } 23985 break; 23986 case CMD_TAG_REJECT: 23987 /* 23988 * The SCSI-2 spec states that a tag reject will be sent by the 23989 * target if tagged queuing is not supported. A tag reject may 23990 * also be sent during certain initialization periods or to 23991 * control internal resources. For the latter case the target 23992 * may also return Queue Full. 23993 * 23994 * If this driver receives a tag reject from a target that is 23995 * going through an init period or controlling internal 23996 * resources tagged queuing will be disabled. This is a less 23997 * than optimal behavior but the driver is unable to determine 23998 * the target state and assumes tagged queueing is not supported 23999 */ 24000 pkt->pkt_flags = 0; 24001 un->un_tagflags = 0; 24002 24003 if (un->un_f_opt_queueing == TRUE) { 24004 un->un_throttle = min(un->un_throttle, 3); 24005 } else { 24006 un->un_throttle = 1; 24007 } 24008 mutex_exit(SD_MUTEX(un)); 24009 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24010 mutex_enter(SD_MUTEX(un)); 24011 break; 24012 case CMD_INCOMPLETE: 24013 /* 24014 * The transport stopped with an abnormal state, fallthrough and 24015 * reset the target and/or bus unless selection did not complete 24016 * (indicated by STATE_GOT_BUS) in which case we don't want to 24017 * go through a target/bus reset 24018 */ 24019 if (pkt->pkt_state == STATE_GOT_BUS) { 24020 break; 24021 } 24022 /*FALLTHROUGH*/ 24023 24024 case CMD_TIMEOUT: 24025 default: 24026 /* 24027 * The lun may still be running the command, so a lun reset 24028 * should be attempted. If the lun reset fails or cannot be 24029 * issued, than try a target reset. Lastly try a bus reset. 24030 */ 24031 if ((pkt->pkt_statistics & 24032 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24033 int reset_retval = 0; 24034 mutex_exit(SD_MUTEX(un)); 24035 if (un->un_f_allow_bus_device_reset == TRUE) { 24036 if (un->un_f_lun_reset_enabled == TRUE) { 24037 reset_retval = 24038 scsi_reset(SD_ADDRESS(un), 24039 RESET_LUN); 24040 } 24041 if (reset_retval == 0) { 24042 reset_retval = 24043 scsi_reset(SD_ADDRESS(un), 24044 RESET_TARGET); 24045 } 24046 } 24047 if (reset_retval == 0) { 24048 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24049 } 24050 mutex_enter(SD_MUTEX(un)); 24051 } 24052 break; 24053 } 24054 24055 /* A device/bus reset has occurred; update the reservation status. */ 24056 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24057 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24058 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24059 un->un_resvd_status |= 24060 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24061 SD_INFO(SD_LOG_IOCTL_MHD, un, 24062 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24063 } 24064 } 24065 24066 /* 24067 * The disk has been turned off; Update the device state. 24068 * 24069 * Note: Should we be offlining the disk here? 24070 */ 24071 if (pkt->pkt_state == STATE_GOT_BUS) { 24072 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24073 "Disk not responding to selection\n"); 24074 if (un->un_state != SD_STATE_OFFLINE) { 24075 New_state(un, SD_STATE_OFFLINE); 24076 } 24077 } else if (be_chatty) { 24078 /* 24079 * suppress messages if they are all the same pkt reason; 24080 * with TQ, many (up to 256) are returned with the same 24081 * pkt_reason 24082 */ 24083 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24084 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24085 "sd_mhd_watch_incomplete: " 24086 "SCSI transport failed: reason '%s'\n", 24087 scsi_rname(pkt->pkt_reason)); 24088 } 24089 } 24090 un->un_last_pkt_reason = pkt->pkt_reason; 24091 mutex_exit(SD_MUTEX(un)); 24092 } 24093 24094 24095 /* 24096 * Function: sd_sname() 24097 * 24098 * Description: This is a simple little routine to return a string containing 24099 * a printable description of command status byte for use in 24100 * logging. 24101 * 24102 * Arguments: status - pointer to a status byte 24103 * 24104 * Return Code: char * - string containing status description. 24105 */ 24106 24107 static char * 24108 sd_sname(uchar_t status) 24109 { 24110 switch (status & STATUS_MASK) { 24111 case STATUS_GOOD: 24112 return ("good status"); 24113 case STATUS_CHECK: 24114 return ("check condition"); 24115 case STATUS_MET: 24116 return ("condition met"); 24117 case STATUS_BUSY: 24118 return ("busy"); 24119 case STATUS_INTERMEDIATE: 24120 return ("intermediate"); 24121 case STATUS_INTERMEDIATE_MET: 24122 return ("intermediate - condition met"); 24123 case STATUS_RESERVATION_CONFLICT: 24124 return ("reservation_conflict"); 24125 case STATUS_TERMINATED: 24126 return ("command terminated"); 24127 case STATUS_QFULL: 24128 return ("queue full"); 24129 default: 24130 return ("<unknown status>"); 24131 } 24132 } 24133 24134 24135 /* 24136 * Function: sd_mhd_resvd_recover() 24137 * 24138 * Description: This function adds a reservation entry to the 24139 * sd_resv_reclaim_request list and signals the reservation 24140 * reclaim thread that there is work pending. If the reservation 24141 * reclaim thread has not been previously created this function 24142 * will kick it off. 24143 * 24144 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24145 * among multiple watches that share this callback function 24146 * 24147 * Context: This routine is called by timeout() and is run in interrupt 24148 * context. It must not sleep or call other functions which may 24149 * sleep. 24150 */ 24151 24152 static void 24153 sd_mhd_resvd_recover(void *arg) 24154 { 24155 dev_t dev = (dev_t)arg; 24156 struct sd_lun *un; 24157 struct sd_thr_request *sd_treq = NULL; 24158 struct sd_thr_request *sd_cur = NULL; 24159 struct sd_thr_request *sd_prev = NULL; 24160 int already_there = 0; 24161 24162 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24163 return; 24164 } 24165 24166 mutex_enter(SD_MUTEX(un)); 24167 un->un_resvd_timeid = NULL; 24168 if (un->un_resvd_status & SD_WANT_RESERVE) { 24169 /* 24170 * There was a reset so don't issue the reserve, allow the 24171 * sd_mhd_watch_cb callback function to notice this and 24172 * reschedule the timeout for reservation. 24173 */ 24174 mutex_exit(SD_MUTEX(un)); 24175 return; 24176 } 24177 mutex_exit(SD_MUTEX(un)); 24178 24179 /* 24180 * Add this device to the sd_resv_reclaim_request list and the 24181 * sd_resv_reclaim_thread should take care of the rest. 24182 * 24183 * Note: We can't sleep in this context so if the memory allocation 24184 * fails allow the sd_mhd_watch_cb callback function to notice this and 24185 * reschedule the timeout for reservation. (4378460) 24186 */ 24187 sd_treq = (struct sd_thr_request *) 24188 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24189 if (sd_treq == NULL) { 24190 return; 24191 } 24192 24193 sd_treq->sd_thr_req_next = NULL; 24194 sd_treq->dev = dev; 24195 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24196 if (sd_tr.srq_thr_req_head == NULL) { 24197 sd_tr.srq_thr_req_head = sd_treq; 24198 } else { 24199 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24200 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24201 if (sd_cur->dev == dev) { 24202 /* 24203 * already in Queue so don't log 24204 * another request for the device 24205 */ 24206 already_there = 1; 24207 break; 24208 } 24209 sd_prev = sd_cur; 24210 } 24211 if (!already_there) { 24212 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24213 "logging request for %lx\n", dev); 24214 sd_prev->sd_thr_req_next = sd_treq; 24215 } else { 24216 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24217 } 24218 } 24219 24220 /* 24221 * Create a kernel thread to do the reservation reclaim and free up this 24222 * thread. We cannot block this thread while we go away to do the 24223 * reservation reclaim 24224 */ 24225 if (sd_tr.srq_resv_reclaim_thread == NULL) 24226 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24227 sd_resv_reclaim_thread, NULL, 24228 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24229 24230 /* Tell the reservation reclaim thread that it has work to do */ 24231 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24232 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24233 } 24234 24235 /* 24236 * Function: sd_resv_reclaim_thread() 24237 * 24238 * Description: This function implements the reservation reclaim operations 24239 * 24240 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24241 * among multiple watches that share this callback function 24242 */ 24243 24244 static void 24245 sd_resv_reclaim_thread() 24246 { 24247 struct sd_lun *un; 24248 struct sd_thr_request *sd_mhreq; 24249 24250 /* Wait for work */ 24251 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24252 if (sd_tr.srq_thr_req_head == NULL) { 24253 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24254 &sd_tr.srq_resv_reclaim_mutex); 24255 } 24256 24257 /* Loop while we have work */ 24258 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24259 un = ddi_get_soft_state(sd_state, 24260 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24261 if (un == NULL) { 24262 /* 24263 * softstate structure is NULL so just 24264 * dequeue the request and continue 24265 */ 24266 sd_tr.srq_thr_req_head = 24267 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24268 kmem_free(sd_tr.srq_thr_cur_req, 24269 sizeof (struct sd_thr_request)); 24270 continue; 24271 } 24272 24273 /* dequeue the request */ 24274 sd_mhreq = sd_tr.srq_thr_cur_req; 24275 sd_tr.srq_thr_req_head = 24276 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24277 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24278 24279 /* 24280 * Reclaim reservation only if SD_RESERVE is still set. There 24281 * may have been a call to MHIOCRELEASE before we got here. 24282 */ 24283 mutex_enter(SD_MUTEX(un)); 24284 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24285 /* 24286 * Note: The SD_LOST_RESERVE flag is cleared before 24287 * reclaiming the reservation. If this is done after the 24288 * call to sd_reserve_release a reservation loss in the 24289 * window between pkt completion of reserve cmd and 24290 * mutex_enter below may not be recognized 24291 */ 24292 un->un_resvd_status &= ~SD_LOST_RESERVE; 24293 mutex_exit(SD_MUTEX(un)); 24294 24295 if (sd_reserve_release(sd_mhreq->dev, 24296 SD_RESERVE) == 0) { 24297 mutex_enter(SD_MUTEX(un)); 24298 un->un_resvd_status |= SD_RESERVE; 24299 mutex_exit(SD_MUTEX(un)); 24300 SD_INFO(SD_LOG_IOCTL_MHD, un, 24301 "sd_resv_reclaim_thread: " 24302 "Reservation Recovered\n"); 24303 } else { 24304 mutex_enter(SD_MUTEX(un)); 24305 un->un_resvd_status |= SD_LOST_RESERVE; 24306 mutex_exit(SD_MUTEX(un)); 24307 SD_INFO(SD_LOG_IOCTL_MHD, un, 24308 "sd_resv_reclaim_thread: Failed " 24309 "Reservation Recovery\n"); 24310 } 24311 } else { 24312 mutex_exit(SD_MUTEX(un)); 24313 } 24314 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24315 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24316 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24317 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24318 /* 24319 * wakeup the destroy thread if anyone is waiting on 24320 * us to complete. 24321 */ 24322 cv_signal(&sd_tr.srq_inprocess_cv); 24323 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24324 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24325 } 24326 24327 /* 24328 * cleanup the sd_tr structure now that this thread will not exist 24329 */ 24330 ASSERT(sd_tr.srq_thr_req_head == NULL); 24331 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24332 sd_tr.srq_resv_reclaim_thread = NULL; 24333 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24334 thread_exit(); 24335 } 24336 24337 24338 /* 24339 * Function: sd_rmv_resv_reclaim_req() 24340 * 24341 * Description: This function removes any pending reservation reclaim requests 24342 * for the specified device. 24343 * 24344 * Arguments: dev - the device 'dev_t' 24345 */ 24346 24347 static void 24348 sd_rmv_resv_reclaim_req(dev_t dev) 24349 { 24350 struct sd_thr_request *sd_mhreq; 24351 struct sd_thr_request *sd_prev; 24352 24353 /* Remove a reservation reclaim request from the list */ 24354 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24355 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24356 /* 24357 * We are attempting to reinstate reservation for 24358 * this device. We wait for sd_reserve_release() 24359 * to return before we return. 24360 */ 24361 cv_wait(&sd_tr.srq_inprocess_cv, 24362 &sd_tr.srq_resv_reclaim_mutex); 24363 } else { 24364 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24365 if (sd_mhreq && sd_mhreq->dev == dev) { 24366 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24367 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24368 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24369 return; 24370 } 24371 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24372 if (sd_mhreq && sd_mhreq->dev == dev) { 24373 break; 24374 } 24375 sd_prev = sd_mhreq; 24376 } 24377 if (sd_mhreq != NULL) { 24378 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24379 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24380 } 24381 } 24382 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24383 } 24384 24385 24386 /* 24387 * Function: sd_mhd_reset_notify_cb() 24388 * 24389 * Description: This is a call back function for scsi_reset_notify. This 24390 * function updates the softstate reserved status and logs the 24391 * reset. The driver scsi watch facility callback function 24392 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24393 * will reclaim the reservation. 24394 * 24395 * Arguments: arg - driver soft state (unit) structure 24396 */ 24397 24398 static void 24399 sd_mhd_reset_notify_cb(caddr_t arg) 24400 { 24401 struct sd_lun *un = (struct sd_lun *)arg; 24402 24403 mutex_enter(SD_MUTEX(un)); 24404 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24405 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24406 SD_INFO(SD_LOG_IOCTL_MHD, un, 24407 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24408 } 24409 mutex_exit(SD_MUTEX(un)); 24410 } 24411 24412 24413 /* 24414 * Function: sd_take_ownership() 24415 * 24416 * Description: This routine implements an algorithm to achieve a stable 24417 * reservation on disks which don't implement priority reserve, 24418 * and makes sure that other host lose re-reservation attempts. 24419 * This algorithm contains of a loop that keeps issuing the RESERVE 24420 * for some period of time (min_ownership_delay, default 6 seconds) 24421 * During that loop, it looks to see if there has been a bus device 24422 * reset or bus reset (both of which cause an existing reservation 24423 * to be lost). If the reservation is lost issue RESERVE until a 24424 * period of min_ownership_delay with no resets has gone by, or 24425 * until max_ownership_delay has expired. This loop ensures that 24426 * the host really did manage to reserve the device, in spite of 24427 * resets. The looping for min_ownership_delay (default six 24428 * seconds) is important to early generation clustering products, 24429 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24430 * MHIOCENFAILFAST periodic timer of two seconds. By having 24431 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24432 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24433 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24434 * have already noticed, via the MHIOCENFAILFAST polling, that it 24435 * no longer "owns" the disk and will have panicked itself. Thus, 24436 * the host issuing the MHIOCTKOWN is assured (with timing 24437 * dependencies) that by the time it actually starts to use the 24438 * disk for real work, the old owner is no longer accessing it. 24439 * 24440 * min_ownership_delay is the minimum amount of time for which the 24441 * disk must be reserved continuously devoid of resets before the 24442 * MHIOCTKOWN ioctl will return success. 24443 * 24444 * max_ownership_delay indicates the amount of time by which the 24445 * take ownership should succeed or timeout with an error. 24446 * 24447 * Arguments: dev - the device 'dev_t' 24448 * *p - struct containing timing info. 24449 * 24450 * Return Code: 0 for success or error code 24451 */ 24452 24453 static int 24454 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24455 { 24456 struct sd_lun *un; 24457 int rval; 24458 int err; 24459 int reservation_count = 0; 24460 int min_ownership_delay = 6000000; /* in usec */ 24461 int max_ownership_delay = 30000000; /* in usec */ 24462 clock_t start_time; /* starting time of this algorithm */ 24463 clock_t end_time; /* time limit for giving up */ 24464 clock_t ownership_time; /* time limit for stable ownership */ 24465 clock_t current_time; 24466 clock_t previous_current_time; 24467 24468 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24469 return (ENXIO); 24470 } 24471 24472 /* 24473 * Attempt a device reservation. A priority reservation is requested. 24474 */ 24475 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24476 != SD_SUCCESS) { 24477 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24478 "sd_take_ownership: return(1)=%d\n", rval); 24479 return (rval); 24480 } 24481 24482 /* Update the softstate reserved status to indicate the reservation */ 24483 mutex_enter(SD_MUTEX(un)); 24484 un->un_resvd_status |= SD_RESERVE; 24485 un->un_resvd_status &= 24486 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24487 mutex_exit(SD_MUTEX(un)); 24488 24489 if (p != NULL) { 24490 if (p->min_ownership_delay != 0) { 24491 min_ownership_delay = p->min_ownership_delay * 1000; 24492 } 24493 if (p->max_ownership_delay != 0) { 24494 max_ownership_delay = p->max_ownership_delay * 1000; 24495 } 24496 } 24497 SD_INFO(SD_LOG_IOCTL_MHD, un, 24498 "sd_take_ownership: min, max delays: %d, %d\n", 24499 min_ownership_delay, max_ownership_delay); 24500 24501 start_time = ddi_get_lbolt(); 24502 current_time = start_time; 24503 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24504 end_time = start_time + drv_usectohz(max_ownership_delay); 24505 24506 while (current_time - end_time < 0) { 24507 delay(drv_usectohz(500000)); 24508 24509 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24510 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24511 mutex_enter(SD_MUTEX(un)); 24512 rval = (un->un_resvd_status & 24513 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24514 mutex_exit(SD_MUTEX(un)); 24515 break; 24516 } 24517 } 24518 previous_current_time = current_time; 24519 current_time = ddi_get_lbolt(); 24520 mutex_enter(SD_MUTEX(un)); 24521 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24522 ownership_time = ddi_get_lbolt() + 24523 drv_usectohz(min_ownership_delay); 24524 reservation_count = 0; 24525 } else { 24526 reservation_count++; 24527 } 24528 un->un_resvd_status |= SD_RESERVE; 24529 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24530 mutex_exit(SD_MUTEX(un)); 24531 24532 SD_INFO(SD_LOG_IOCTL_MHD, un, 24533 "sd_take_ownership: ticks for loop iteration=%ld, " 24534 "reservation=%s\n", (current_time - previous_current_time), 24535 reservation_count ? "ok" : "reclaimed"); 24536 24537 if (current_time - ownership_time >= 0 && 24538 reservation_count >= 4) { 24539 rval = 0; /* Achieved a stable ownership */ 24540 break; 24541 } 24542 if (current_time - end_time >= 0) { 24543 rval = EACCES; /* No ownership in max possible time */ 24544 break; 24545 } 24546 } 24547 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24548 "sd_take_ownership: return(2)=%d\n", rval); 24549 return (rval); 24550 } 24551 24552 24553 /* 24554 * Function: sd_reserve_release() 24555 * 24556 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24557 * PRIORITY RESERVE commands based on a user specified command type 24558 * 24559 * Arguments: dev - the device 'dev_t' 24560 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24561 * SD_RESERVE, SD_RELEASE 24562 * 24563 * Return Code: 0 or Error Code 24564 */ 24565 24566 static int 24567 sd_reserve_release(dev_t dev, int cmd) 24568 { 24569 struct uscsi_cmd *com = NULL; 24570 struct sd_lun *un = NULL; 24571 char cdb[CDB_GROUP0]; 24572 int rval; 24573 24574 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24575 (cmd == SD_PRIORITY_RESERVE)); 24576 24577 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24578 return (ENXIO); 24579 } 24580 24581 /* instantiate and initialize the command and cdb */ 24582 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24583 bzero(cdb, CDB_GROUP0); 24584 com->uscsi_flags = USCSI_SILENT; 24585 com->uscsi_timeout = un->un_reserve_release_time; 24586 com->uscsi_cdblen = CDB_GROUP0; 24587 com->uscsi_cdb = cdb; 24588 if (cmd == SD_RELEASE) { 24589 cdb[0] = SCMD_RELEASE; 24590 } else { 24591 cdb[0] = SCMD_RESERVE; 24592 } 24593 24594 /* Send the command. */ 24595 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24596 SD_PATH_STANDARD); 24597 24598 /* 24599 * "break" a reservation that is held by another host, by issuing a 24600 * reset if priority reserve is desired, and we could not get the 24601 * device. 24602 */ 24603 if ((cmd == SD_PRIORITY_RESERVE) && 24604 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24605 /* 24606 * First try to reset the LUN. If we cannot, then try a target 24607 * reset, followed by a bus reset if the target reset fails. 24608 */ 24609 int reset_retval = 0; 24610 if (un->un_f_lun_reset_enabled == TRUE) { 24611 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24612 } 24613 if (reset_retval == 0) { 24614 /* The LUN reset either failed or was not issued */ 24615 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24616 } 24617 if ((reset_retval == 0) && 24618 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24619 rval = EIO; 24620 kmem_free(com, sizeof (*com)); 24621 return (rval); 24622 } 24623 24624 bzero(com, sizeof (struct uscsi_cmd)); 24625 com->uscsi_flags = USCSI_SILENT; 24626 com->uscsi_cdb = cdb; 24627 com->uscsi_cdblen = CDB_GROUP0; 24628 com->uscsi_timeout = 5; 24629 24630 /* 24631 * Reissue the last reserve command, this time without request 24632 * sense. Assume that it is just a regular reserve command. 24633 */ 24634 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24635 SD_PATH_STANDARD); 24636 } 24637 24638 /* Return an error if still getting a reservation conflict. */ 24639 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24640 rval = EACCES; 24641 } 24642 24643 kmem_free(com, sizeof (*com)); 24644 return (rval); 24645 } 24646 24647 24648 #define SD_NDUMP_RETRIES 12 24649 /* 24650 * System Crash Dump routine 24651 */ 24652 24653 static int 24654 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24655 { 24656 int instance; 24657 int partition; 24658 int i; 24659 int err; 24660 struct sd_lun *un; 24661 struct scsi_pkt *wr_pktp; 24662 struct buf *wr_bp; 24663 struct buf wr_buf; 24664 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24665 daddr_t tgt_blkno; /* rmw - blkno for target */ 24666 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24667 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24668 size_t io_start_offset; 24669 int doing_rmw = FALSE; 24670 int rval; 24671 ssize_t dma_resid; 24672 daddr_t oblkno; 24673 diskaddr_t nblks = 0; 24674 diskaddr_t start_block; 24675 24676 instance = SDUNIT(dev); 24677 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24678 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24679 return (ENXIO); 24680 } 24681 24682 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24683 24684 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24685 24686 partition = SDPART(dev); 24687 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24688 24689 /* Validate blocks to dump at against partition size. */ 24690 24691 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24692 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24693 24694 if ((blkno + nblk) > nblks) { 24695 SD_TRACE(SD_LOG_DUMP, un, 24696 "sddump: dump range larger than partition: " 24697 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24698 blkno, nblk, nblks); 24699 return (EINVAL); 24700 } 24701 24702 mutex_enter(&un->un_pm_mutex); 24703 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24704 struct scsi_pkt *start_pktp; 24705 24706 mutex_exit(&un->un_pm_mutex); 24707 24708 /* 24709 * use pm framework to power on HBA 1st 24710 */ 24711 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24712 24713 /* 24714 * Dump no long uses sdpower to power on a device, it's 24715 * in-line here so it can be done in polled mode. 24716 */ 24717 24718 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24719 24720 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24721 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24722 24723 if (start_pktp == NULL) { 24724 /* We were not given a SCSI packet, fail. */ 24725 return (EIO); 24726 } 24727 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24728 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24729 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24730 start_pktp->pkt_flags = FLAG_NOINTR; 24731 24732 mutex_enter(SD_MUTEX(un)); 24733 SD_FILL_SCSI1_LUN(un, start_pktp); 24734 mutex_exit(SD_MUTEX(un)); 24735 /* 24736 * Scsi_poll returns 0 (success) if the command completes and 24737 * the status block is STATUS_GOOD. 24738 */ 24739 if (sd_scsi_poll(un, start_pktp) != 0) { 24740 scsi_destroy_pkt(start_pktp); 24741 return (EIO); 24742 } 24743 scsi_destroy_pkt(start_pktp); 24744 (void) sd_ddi_pm_resume(un); 24745 } else { 24746 mutex_exit(&un->un_pm_mutex); 24747 } 24748 24749 mutex_enter(SD_MUTEX(un)); 24750 un->un_throttle = 0; 24751 24752 /* 24753 * The first time through, reset the specific target device. 24754 * However, when cpr calls sddump we know that sd is in a 24755 * a good state so no bus reset is required. 24756 * Clear sense data via Request Sense cmd. 24757 * In sddump we don't care about allow_bus_device_reset anymore 24758 */ 24759 24760 if ((un->un_state != SD_STATE_SUSPENDED) && 24761 (un->un_state != SD_STATE_DUMPING)) { 24762 24763 New_state(un, SD_STATE_DUMPING); 24764 24765 if (un->un_f_is_fibre == FALSE) { 24766 mutex_exit(SD_MUTEX(un)); 24767 /* 24768 * Attempt a bus reset for parallel scsi. 24769 * 24770 * Note: A bus reset is required because on some host 24771 * systems (i.e. E420R) a bus device reset is 24772 * insufficient to reset the state of the target. 24773 * 24774 * Note: Don't issue the reset for fibre-channel, 24775 * because this tends to hang the bus (loop) for 24776 * too long while everyone is logging out and in 24777 * and the deadman timer for dumping will fire 24778 * before the dump is complete. 24779 */ 24780 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24781 mutex_enter(SD_MUTEX(un)); 24782 Restore_state(un); 24783 mutex_exit(SD_MUTEX(un)); 24784 return (EIO); 24785 } 24786 24787 /* Delay to give the device some recovery time. */ 24788 drv_usecwait(10000); 24789 24790 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24791 SD_INFO(SD_LOG_DUMP, un, 24792 "sddump: sd_send_polled_RQS failed\n"); 24793 } 24794 mutex_enter(SD_MUTEX(un)); 24795 } 24796 } 24797 24798 /* 24799 * Convert the partition-relative block number to a 24800 * disk physical block number. 24801 */ 24802 blkno += start_block; 24803 24804 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24805 24806 24807 /* 24808 * Check if the device has a non-512 block size. 24809 */ 24810 wr_bp = NULL; 24811 if (NOT_DEVBSIZE(un)) { 24812 tgt_byte_offset = blkno * un->un_sys_blocksize; 24813 tgt_byte_count = nblk * un->un_sys_blocksize; 24814 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24815 (tgt_byte_count % un->un_tgt_blocksize)) { 24816 doing_rmw = TRUE; 24817 /* 24818 * Calculate the block number and number of block 24819 * in terms of the media block size. 24820 */ 24821 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24822 tgt_nblk = 24823 ((tgt_byte_offset + tgt_byte_count + 24824 (un->un_tgt_blocksize - 1)) / 24825 un->un_tgt_blocksize) - tgt_blkno; 24826 24827 /* 24828 * Invoke the routine which is going to do read part 24829 * of read-modify-write. 24830 * Note that this routine returns a pointer to 24831 * a valid bp in wr_bp. 24832 */ 24833 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24834 &wr_bp); 24835 if (err) { 24836 mutex_exit(SD_MUTEX(un)); 24837 return (err); 24838 } 24839 /* 24840 * Offset is being calculated as - 24841 * (original block # * system block size) - 24842 * (new block # * target block size) 24843 */ 24844 io_start_offset = 24845 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24846 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24847 24848 ASSERT((io_start_offset >= 0) && 24849 (io_start_offset < un->un_tgt_blocksize)); 24850 /* 24851 * Do the modify portion of read modify write. 24852 */ 24853 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24854 (size_t)nblk * un->un_sys_blocksize); 24855 } else { 24856 doing_rmw = FALSE; 24857 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24858 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24859 } 24860 24861 /* Convert blkno and nblk to target blocks */ 24862 blkno = tgt_blkno; 24863 nblk = tgt_nblk; 24864 } else { 24865 wr_bp = &wr_buf; 24866 bzero(wr_bp, sizeof (struct buf)); 24867 wr_bp->b_flags = B_BUSY; 24868 wr_bp->b_un.b_addr = addr; 24869 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24870 wr_bp->b_resid = 0; 24871 } 24872 24873 mutex_exit(SD_MUTEX(un)); 24874 24875 /* 24876 * Obtain a SCSI packet for the write command. 24877 * It should be safe to call the allocator here without 24878 * worrying about being locked for DVMA mapping because 24879 * the address we're passed is already a DVMA mapping 24880 * 24881 * We are also not going to worry about semaphore ownership 24882 * in the dump buffer. Dumping is single threaded at present. 24883 */ 24884 24885 wr_pktp = NULL; 24886 24887 dma_resid = wr_bp->b_bcount; 24888 oblkno = blkno; 24889 24890 while (dma_resid != 0) { 24891 24892 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24893 wr_bp->b_flags &= ~B_ERROR; 24894 24895 if (un->un_partial_dma_supported == 1) { 24896 blkno = oblkno + 24897 ((wr_bp->b_bcount - dma_resid) / 24898 un->un_tgt_blocksize); 24899 nblk = dma_resid / un->un_tgt_blocksize; 24900 24901 if (wr_pktp) { 24902 /* 24903 * Partial DMA transfers after initial transfer 24904 */ 24905 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24906 blkno, nblk); 24907 } else { 24908 /* Initial transfer */ 24909 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24910 un->un_pkt_flags, NULL_FUNC, NULL, 24911 blkno, nblk); 24912 } 24913 } else { 24914 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24915 0, NULL_FUNC, NULL, blkno, nblk); 24916 } 24917 24918 if (rval == 0) { 24919 /* We were given a SCSI packet, continue. */ 24920 break; 24921 } 24922 24923 if (i == 0) { 24924 if (wr_bp->b_flags & B_ERROR) { 24925 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24926 "no resources for dumping; " 24927 "error code: 0x%x, retrying", 24928 geterror(wr_bp)); 24929 } else { 24930 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24931 "no resources for dumping; retrying"); 24932 } 24933 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24934 if (wr_bp->b_flags & B_ERROR) { 24935 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24936 "no resources for dumping; error code: " 24937 "0x%x, retrying\n", geterror(wr_bp)); 24938 } 24939 } else { 24940 if (wr_bp->b_flags & B_ERROR) { 24941 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24942 "no resources for dumping; " 24943 "error code: 0x%x, retries failed, " 24944 "giving up.\n", geterror(wr_bp)); 24945 } else { 24946 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24947 "no resources for dumping; " 24948 "retries failed, giving up.\n"); 24949 } 24950 mutex_enter(SD_MUTEX(un)); 24951 Restore_state(un); 24952 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24953 mutex_exit(SD_MUTEX(un)); 24954 scsi_free_consistent_buf(wr_bp); 24955 } else { 24956 mutex_exit(SD_MUTEX(un)); 24957 } 24958 return (EIO); 24959 } 24960 drv_usecwait(10000); 24961 } 24962 24963 if (un->un_partial_dma_supported == 1) { 24964 /* 24965 * save the resid from PARTIAL_DMA 24966 */ 24967 dma_resid = wr_pktp->pkt_resid; 24968 if (dma_resid != 0) 24969 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24970 wr_pktp->pkt_resid = 0; 24971 } else { 24972 dma_resid = 0; 24973 } 24974 24975 /* SunBug 1222170 */ 24976 wr_pktp->pkt_flags = FLAG_NOINTR; 24977 24978 err = EIO; 24979 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24980 24981 /* 24982 * Scsi_poll returns 0 (success) if the command completes and 24983 * the status block is STATUS_GOOD. We should only check 24984 * errors if this condition is not true. Even then we should 24985 * send our own request sense packet only if we have a check 24986 * condition and auto request sense has not been performed by 24987 * the hba. 24988 */ 24989 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 24990 24991 if ((sd_scsi_poll(un, wr_pktp) == 0) && 24992 (wr_pktp->pkt_resid == 0)) { 24993 err = SD_SUCCESS; 24994 break; 24995 } 24996 24997 /* 24998 * Check CMD_DEV_GONE 1st, give up if device is gone. 24999 */ 25000 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25001 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25002 "Error while dumping state...Device is gone\n"); 25003 break; 25004 } 25005 25006 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25007 SD_INFO(SD_LOG_DUMP, un, 25008 "sddump: write failed with CHECK, try # %d\n", i); 25009 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25010 (void) sd_send_polled_RQS(un); 25011 } 25012 25013 continue; 25014 } 25015 25016 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25017 int reset_retval = 0; 25018 25019 SD_INFO(SD_LOG_DUMP, un, 25020 "sddump: write failed with BUSY, try # %d\n", i); 25021 25022 if (un->un_f_lun_reset_enabled == TRUE) { 25023 reset_retval = scsi_reset(SD_ADDRESS(un), 25024 RESET_LUN); 25025 } 25026 if (reset_retval == 0) { 25027 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25028 } 25029 (void) sd_send_polled_RQS(un); 25030 25031 } else { 25032 SD_INFO(SD_LOG_DUMP, un, 25033 "sddump: write failed with 0x%x, try # %d\n", 25034 SD_GET_PKT_STATUS(wr_pktp), i); 25035 mutex_enter(SD_MUTEX(un)); 25036 sd_reset_target(un, wr_pktp); 25037 mutex_exit(SD_MUTEX(un)); 25038 } 25039 25040 /* 25041 * If we are not getting anywhere with lun/target resets, 25042 * let's reset the bus. 25043 */ 25044 if (i == SD_NDUMP_RETRIES/2) { 25045 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25046 (void) sd_send_polled_RQS(un); 25047 } 25048 } 25049 } 25050 25051 scsi_destroy_pkt(wr_pktp); 25052 mutex_enter(SD_MUTEX(un)); 25053 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25054 mutex_exit(SD_MUTEX(un)); 25055 scsi_free_consistent_buf(wr_bp); 25056 } else { 25057 mutex_exit(SD_MUTEX(un)); 25058 } 25059 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25060 return (err); 25061 } 25062 25063 /* 25064 * Function: sd_scsi_poll() 25065 * 25066 * Description: This is a wrapper for the scsi_poll call. 25067 * 25068 * Arguments: sd_lun - The unit structure 25069 * scsi_pkt - The scsi packet being sent to the device. 25070 * 25071 * Return Code: 0 - Command completed successfully with good status 25072 * -1 - Command failed. This could indicate a check condition 25073 * or other status value requiring recovery action. 25074 * 25075 * NOTE: This code is only called off sddump(). 25076 */ 25077 25078 static int 25079 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25080 { 25081 int status; 25082 25083 ASSERT(un != NULL); 25084 ASSERT(!mutex_owned(SD_MUTEX(un))); 25085 ASSERT(pktp != NULL); 25086 25087 status = SD_SUCCESS; 25088 25089 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25090 pktp->pkt_flags |= un->un_tagflags; 25091 pktp->pkt_flags &= ~FLAG_NODISCON; 25092 } 25093 25094 status = sd_ddi_scsi_poll(pktp); 25095 /* 25096 * Scsi_poll returns 0 (success) if the command completes and the 25097 * status block is STATUS_GOOD. We should only check errors if this 25098 * condition is not true. Even then we should send our own request 25099 * sense packet only if we have a check condition and auto 25100 * request sense has not been performed by the hba. 25101 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25102 */ 25103 if ((status != SD_SUCCESS) && 25104 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25105 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25106 (pktp->pkt_reason != CMD_DEV_GONE)) 25107 (void) sd_send_polled_RQS(un); 25108 25109 return (status); 25110 } 25111 25112 /* 25113 * Function: sd_send_polled_RQS() 25114 * 25115 * Description: This sends the request sense command to a device. 25116 * 25117 * Arguments: sd_lun - The unit structure 25118 * 25119 * Return Code: 0 - Command completed successfully with good status 25120 * -1 - Command failed. 25121 * 25122 */ 25123 25124 static int 25125 sd_send_polled_RQS(struct sd_lun *un) 25126 { 25127 int ret_val; 25128 struct scsi_pkt *rqs_pktp; 25129 struct buf *rqs_bp; 25130 25131 ASSERT(un != NULL); 25132 ASSERT(!mutex_owned(SD_MUTEX(un))); 25133 25134 ret_val = SD_SUCCESS; 25135 25136 rqs_pktp = un->un_rqs_pktp; 25137 rqs_bp = un->un_rqs_bp; 25138 25139 mutex_enter(SD_MUTEX(un)); 25140 25141 if (un->un_sense_isbusy) { 25142 ret_val = SD_FAILURE; 25143 mutex_exit(SD_MUTEX(un)); 25144 return (ret_val); 25145 } 25146 25147 /* 25148 * If the request sense buffer (and packet) is not in use, 25149 * let's set the un_sense_isbusy and send our packet 25150 */ 25151 un->un_sense_isbusy = 1; 25152 rqs_pktp->pkt_resid = 0; 25153 rqs_pktp->pkt_reason = 0; 25154 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25155 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25156 25157 mutex_exit(SD_MUTEX(un)); 25158 25159 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25160 " 0x%p\n", rqs_bp->b_un.b_addr); 25161 25162 /* 25163 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25164 * axle - it has a call into us! 25165 */ 25166 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25167 SD_INFO(SD_LOG_COMMON, un, 25168 "sd_send_polled_RQS: RQS failed\n"); 25169 } 25170 25171 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25172 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25173 25174 mutex_enter(SD_MUTEX(un)); 25175 un->un_sense_isbusy = 0; 25176 mutex_exit(SD_MUTEX(un)); 25177 25178 return (ret_val); 25179 } 25180 25181 /* 25182 * Defines needed for localized version of the scsi_poll routine. 25183 */ 25184 #define CSEC 10000 /* usecs */ 25185 #define SEC_TO_CSEC (1000000/CSEC) 25186 25187 /* 25188 * Function: sd_ddi_scsi_poll() 25189 * 25190 * Description: Localized version of the scsi_poll routine. The purpose is to 25191 * send a scsi_pkt to a device as a polled command. This version 25192 * is to ensure more robust handling of transport errors. 25193 * Specifically this routine cures not ready, coming ready 25194 * transition for power up and reset of sonoma's. This can take 25195 * up to 45 seconds for power-on and 20 seconds for reset of a 25196 * sonoma lun. 25197 * 25198 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25199 * 25200 * Return Code: 0 - Command completed successfully with good status 25201 * -1 - Command failed. 25202 * 25203 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25204 * be fixed (removing this code), we need to determine how to handle the 25205 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25206 * 25207 * NOTE: This code is only called off sddump(). 25208 */ 25209 static int 25210 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25211 { 25212 int rval = -1; 25213 int savef; 25214 long savet; 25215 void (*savec)(); 25216 int timeout; 25217 int busy_count; 25218 int poll_delay; 25219 int rc; 25220 uint8_t *sensep; 25221 struct scsi_arq_status *arqstat; 25222 extern int do_polled_io; 25223 25224 ASSERT(pkt->pkt_scbp); 25225 25226 /* 25227 * save old flags.. 25228 */ 25229 savef = pkt->pkt_flags; 25230 savec = pkt->pkt_comp; 25231 savet = pkt->pkt_time; 25232 25233 pkt->pkt_flags |= FLAG_NOINTR; 25234 25235 /* 25236 * XXX there is nothing in the SCSA spec that states that we should not 25237 * do a callback for polled cmds; however, removing this will break sd 25238 * and probably other target drivers 25239 */ 25240 pkt->pkt_comp = NULL; 25241 25242 /* 25243 * we don't like a polled command without timeout. 25244 * 60 seconds seems long enough. 25245 */ 25246 if (pkt->pkt_time == 0) 25247 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25248 25249 /* 25250 * Send polled cmd. 25251 * 25252 * We do some error recovery for various errors. Tran_busy, 25253 * queue full, and non-dispatched commands are retried every 10 msec. 25254 * as they are typically transient failures. Busy status and Not 25255 * Ready are retried every second as this status takes a while to 25256 * change. 25257 */ 25258 timeout = pkt->pkt_time * SEC_TO_CSEC; 25259 25260 for (busy_count = 0; busy_count < timeout; busy_count++) { 25261 /* 25262 * Initialize pkt status variables. 25263 */ 25264 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25265 25266 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25267 if (rc != TRAN_BUSY) { 25268 /* Transport failed - give up. */ 25269 break; 25270 } else { 25271 /* Transport busy - try again. */ 25272 poll_delay = 1 * CSEC; /* 10 msec. */ 25273 } 25274 } else { 25275 /* 25276 * Transport accepted - check pkt status. 25277 */ 25278 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25279 if ((pkt->pkt_reason == CMD_CMPLT) && 25280 (rc == STATUS_CHECK) && 25281 (pkt->pkt_state & STATE_ARQ_DONE)) { 25282 arqstat = 25283 (struct scsi_arq_status *)(pkt->pkt_scbp); 25284 sensep = (uint8_t *)&arqstat->sts_sensedata; 25285 } else { 25286 sensep = NULL; 25287 } 25288 25289 if ((pkt->pkt_reason == CMD_CMPLT) && 25290 (rc == STATUS_GOOD)) { 25291 /* No error - we're done */ 25292 rval = 0; 25293 break; 25294 25295 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25296 /* Lost connection - give up */ 25297 break; 25298 25299 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25300 (pkt->pkt_state == 0)) { 25301 /* Pkt not dispatched - try again. */ 25302 poll_delay = 1 * CSEC; /* 10 msec. */ 25303 25304 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25305 (rc == STATUS_QFULL)) { 25306 /* Queue full - try again. */ 25307 poll_delay = 1 * CSEC; /* 10 msec. */ 25308 25309 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25310 (rc == STATUS_BUSY)) { 25311 /* Busy - try again. */ 25312 poll_delay = 100 * CSEC; /* 1 sec. */ 25313 busy_count += (SEC_TO_CSEC - 1); 25314 25315 } else if ((sensep != NULL) && 25316 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25317 /* 25318 * Unit Attention - try again. 25319 * Pretend it took 1 sec. 25320 * NOTE: 'continue' avoids poll_delay 25321 */ 25322 busy_count += (SEC_TO_CSEC - 1); 25323 continue; 25324 25325 } else if ((sensep != NULL) && 25326 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25327 (scsi_sense_asc(sensep) == 0x04) && 25328 (scsi_sense_ascq(sensep) == 0x01)) { 25329 /* 25330 * Not ready -> ready - try again. 25331 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25332 * ...same as STATUS_BUSY 25333 */ 25334 poll_delay = 100 * CSEC; /* 1 sec. */ 25335 busy_count += (SEC_TO_CSEC - 1); 25336 25337 } else { 25338 /* BAD status - give up. */ 25339 break; 25340 } 25341 } 25342 25343 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25344 !do_polled_io) { 25345 delay(drv_usectohz(poll_delay)); 25346 } else { 25347 /* we busy wait during cpr_dump or interrupt threads */ 25348 drv_usecwait(poll_delay); 25349 } 25350 } 25351 25352 pkt->pkt_flags = savef; 25353 pkt->pkt_comp = savec; 25354 pkt->pkt_time = savet; 25355 25356 /* return on error */ 25357 if (rval) 25358 return (rval); 25359 25360 /* 25361 * This is not a performance critical code path. 25362 * 25363 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25364 * issues associated with looking at DMA memory prior to 25365 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25366 */ 25367 scsi_sync_pkt(pkt); 25368 return (0); 25369 } 25370 25371 25372 25373 /* 25374 * Function: sd_persistent_reservation_in_read_keys 25375 * 25376 * Description: This routine is the driver entry point for handling CD-ROM 25377 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25378 * by sending the SCSI-3 PRIN commands to the device. 25379 * Processes the read keys command response by copying the 25380 * reservation key information into the user provided buffer. 25381 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25382 * 25383 * Arguments: un - Pointer to soft state struct for the target. 25384 * usrp - user provided pointer to multihost Persistent In Read 25385 * Keys structure (mhioc_inkeys_t) 25386 * flag - this argument is a pass through to ddi_copyxxx() 25387 * directly from the mode argument of ioctl(). 25388 * 25389 * Return Code: 0 - Success 25390 * EACCES 25391 * ENOTSUP 25392 * errno return code from sd_send_scsi_cmd() 25393 * 25394 * Context: Can sleep. Does not return until command is completed. 25395 */ 25396 25397 static int 25398 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25399 mhioc_inkeys_t *usrp, int flag) 25400 { 25401 #ifdef _MULTI_DATAMODEL 25402 struct mhioc_key_list32 li32; 25403 #endif 25404 sd_prin_readkeys_t *in; 25405 mhioc_inkeys_t *ptr; 25406 mhioc_key_list_t li; 25407 uchar_t *data_bufp; 25408 int data_len; 25409 int rval = 0; 25410 size_t copysz; 25411 sd_ssc_t *ssc; 25412 25413 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25414 return (EINVAL); 25415 } 25416 bzero(&li, sizeof (mhioc_key_list_t)); 25417 25418 ssc = sd_ssc_init(un); 25419 25420 /* 25421 * Get the listsize from user 25422 */ 25423 #ifdef _MULTI_DATAMODEL 25424 25425 switch (ddi_model_convert_from(flag & FMODELS)) { 25426 case DDI_MODEL_ILP32: 25427 copysz = sizeof (struct mhioc_key_list32); 25428 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25429 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25430 "sd_persistent_reservation_in_read_keys: " 25431 "failed ddi_copyin: mhioc_key_list32_t\n"); 25432 rval = EFAULT; 25433 goto done; 25434 } 25435 li.listsize = li32.listsize; 25436 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25437 break; 25438 25439 case DDI_MODEL_NONE: 25440 copysz = sizeof (mhioc_key_list_t); 25441 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25442 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25443 "sd_persistent_reservation_in_read_keys: " 25444 "failed ddi_copyin: mhioc_key_list_t\n"); 25445 rval = EFAULT; 25446 goto done; 25447 } 25448 break; 25449 } 25450 25451 #else /* ! _MULTI_DATAMODEL */ 25452 copysz = sizeof (mhioc_key_list_t); 25453 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25454 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25455 "sd_persistent_reservation_in_read_keys: " 25456 "failed ddi_copyin: mhioc_key_list_t\n"); 25457 rval = EFAULT; 25458 goto done; 25459 } 25460 #endif 25461 25462 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25463 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25464 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25465 25466 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25467 data_len, data_bufp); 25468 if (rval != 0) { 25469 if (rval == EIO) 25470 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25471 else 25472 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25473 goto done; 25474 } 25475 in = (sd_prin_readkeys_t *)data_bufp; 25476 ptr->generation = BE_32(in->generation); 25477 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25478 25479 /* 25480 * Return the min(listsize, listlen) keys 25481 */ 25482 #ifdef _MULTI_DATAMODEL 25483 25484 switch (ddi_model_convert_from(flag & FMODELS)) { 25485 case DDI_MODEL_ILP32: 25486 li32.listlen = li.listlen; 25487 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25488 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25489 "sd_persistent_reservation_in_read_keys: " 25490 "failed ddi_copyout: mhioc_key_list32_t\n"); 25491 rval = EFAULT; 25492 goto done; 25493 } 25494 break; 25495 25496 case DDI_MODEL_NONE: 25497 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25498 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25499 "sd_persistent_reservation_in_read_keys: " 25500 "failed ddi_copyout: mhioc_key_list_t\n"); 25501 rval = EFAULT; 25502 goto done; 25503 } 25504 break; 25505 } 25506 25507 #else /* ! _MULTI_DATAMODEL */ 25508 25509 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25510 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25511 "sd_persistent_reservation_in_read_keys: " 25512 "failed ddi_copyout: mhioc_key_list_t\n"); 25513 rval = EFAULT; 25514 goto done; 25515 } 25516 25517 #endif /* _MULTI_DATAMODEL */ 25518 25519 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25520 li.listsize * MHIOC_RESV_KEY_SIZE); 25521 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25522 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25523 "sd_persistent_reservation_in_read_keys: " 25524 "failed ddi_copyout: keylist\n"); 25525 rval = EFAULT; 25526 } 25527 done: 25528 sd_ssc_fini(ssc); 25529 kmem_free(data_bufp, data_len); 25530 return (rval); 25531 } 25532 25533 25534 /* 25535 * Function: sd_persistent_reservation_in_read_resv 25536 * 25537 * Description: This routine is the driver entry point for handling CD-ROM 25538 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25539 * by sending the SCSI-3 PRIN commands to the device. 25540 * Process the read persistent reservations command response by 25541 * copying the reservation information into the user provided 25542 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25543 * 25544 * Arguments: un - Pointer to soft state struct for the target. 25545 * usrp - user provided pointer to multihost Persistent In Read 25546 * Keys structure (mhioc_inkeys_t) 25547 * flag - this argument is a pass through to ddi_copyxxx() 25548 * directly from the mode argument of ioctl(). 25549 * 25550 * Return Code: 0 - Success 25551 * EACCES 25552 * ENOTSUP 25553 * errno return code from sd_send_scsi_cmd() 25554 * 25555 * Context: Can sleep. Does not return until command is completed. 25556 */ 25557 25558 static int 25559 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25560 mhioc_inresvs_t *usrp, int flag) 25561 { 25562 #ifdef _MULTI_DATAMODEL 25563 struct mhioc_resv_desc_list32 resvlist32; 25564 #endif 25565 sd_prin_readresv_t *in; 25566 mhioc_inresvs_t *ptr; 25567 sd_readresv_desc_t *readresv_ptr; 25568 mhioc_resv_desc_list_t resvlist; 25569 mhioc_resv_desc_t resvdesc; 25570 uchar_t *data_bufp = NULL; 25571 int data_len; 25572 int rval = 0; 25573 int i; 25574 size_t copysz; 25575 mhioc_resv_desc_t *bufp; 25576 sd_ssc_t *ssc; 25577 25578 if ((ptr = usrp) == NULL) { 25579 return (EINVAL); 25580 } 25581 25582 ssc = sd_ssc_init(un); 25583 25584 /* 25585 * Get the listsize from user 25586 */ 25587 #ifdef _MULTI_DATAMODEL 25588 switch (ddi_model_convert_from(flag & FMODELS)) { 25589 case DDI_MODEL_ILP32: 25590 copysz = sizeof (struct mhioc_resv_desc_list32); 25591 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25592 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25593 "sd_persistent_reservation_in_read_resv: " 25594 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25595 rval = EFAULT; 25596 goto done; 25597 } 25598 resvlist.listsize = resvlist32.listsize; 25599 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25600 break; 25601 25602 case DDI_MODEL_NONE: 25603 copysz = sizeof (mhioc_resv_desc_list_t); 25604 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25605 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25606 "sd_persistent_reservation_in_read_resv: " 25607 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25608 rval = EFAULT; 25609 goto done; 25610 } 25611 break; 25612 } 25613 #else /* ! _MULTI_DATAMODEL */ 25614 copysz = sizeof (mhioc_resv_desc_list_t); 25615 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25616 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25617 "sd_persistent_reservation_in_read_resv: " 25618 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25619 rval = EFAULT; 25620 goto done; 25621 } 25622 #endif /* ! _MULTI_DATAMODEL */ 25623 25624 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25625 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25626 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25627 25628 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25629 data_len, data_bufp); 25630 if (rval != 0) { 25631 if (rval == EIO) 25632 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25633 else 25634 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25635 goto done; 25636 } 25637 in = (sd_prin_readresv_t *)data_bufp; 25638 ptr->generation = BE_32(in->generation); 25639 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25640 25641 /* 25642 * Return the min(listsize, listlen( keys 25643 */ 25644 #ifdef _MULTI_DATAMODEL 25645 25646 switch (ddi_model_convert_from(flag & FMODELS)) { 25647 case DDI_MODEL_ILP32: 25648 resvlist32.listlen = resvlist.listlen; 25649 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25650 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25651 "sd_persistent_reservation_in_read_resv: " 25652 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25653 rval = EFAULT; 25654 goto done; 25655 } 25656 break; 25657 25658 case DDI_MODEL_NONE: 25659 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25660 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25661 "sd_persistent_reservation_in_read_resv: " 25662 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25663 rval = EFAULT; 25664 goto done; 25665 } 25666 break; 25667 } 25668 25669 #else /* ! _MULTI_DATAMODEL */ 25670 25671 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25672 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25673 "sd_persistent_reservation_in_read_resv: " 25674 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25675 rval = EFAULT; 25676 goto done; 25677 } 25678 25679 #endif /* ! _MULTI_DATAMODEL */ 25680 25681 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25682 bufp = resvlist.list; 25683 copysz = sizeof (mhioc_resv_desc_t); 25684 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25685 i++, readresv_ptr++, bufp++) { 25686 25687 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25688 MHIOC_RESV_KEY_SIZE); 25689 resvdesc.type = readresv_ptr->type; 25690 resvdesc.scope = readresv_ptr->scope; 25691 resvdesc.scope_specific_addr = 25692 BE_32(readresv_ptr->scope_specific_addr); 25693 25694 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25695 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25696 "sd_persistent_reservation_in_read_resv: " 25697 "failed ddi_copyout: resvlist\n"); 25698 rval = EFAULT; 25699 goto done; 25700 } 25701 } 25702 done: 25703 sd_ssc_fini(ssc); 25704 /* only if data_bufp is allocated, we need to free it */ 25705 if (data_bufp) { 25706 kmem_free(data_bufp, data_len); 25707 } 25708 return (rval); 25709 } 25710 25711 25712 /* 25713 * Function: sr_change_blkmode() 25714 * 25715 * Description: This routine is the driver entry point for handling CD-ROM 25716 * block mode ioctl requests. Support for returning and changing 25717 * the current block size in use by the device is implemented. The 25718 * LBA size is changed via a MODE SELECT Block Descriptor. 25719 * 25720 * This routine issues a mode sense with an allocation length of 25721 * 12 bytes for the mode page header and a single block descriptor. 25722 * 25723 * Arguments: dev - the device 'dev_t' 25724 * cmd - the request type; one of CDROMGBLKMODE (get) or 25725 * CDROMSBLKMODE (set) 25726 * data - current block size or requested block size 25727 * flag - this argument is a pass through to ddi_copyxxx() directly 25728 * from the mode argument of ioctl(). 25729 * 25730 * Return Code: the code returned by sd_send_scsi_cmd() 25731 * EINVAL if invalid arguments are provided 25732 * EFAULT if ddi_copyxxx() fails 25733 * ENXIO if fail ddi_get_soft_state 25734 * EIO if invalid mode sense block descriptor length 25735 * 25736 */ 25737 25738 static int 25739 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25740 { 25741 struct sd_lun *un = NULL; 25742 struct mode_header *sense_mhp, *select_mhp; 25743 struct block_descriptor *sense_desc, *select_desc; 25744 int current_bsize; 25745 int rval = EINVAL; 25746 uchar_t *sense = NULL; 25747 uchar_t *select = NULL; 25748 sd_ssc_t *ssc; 25749 25750 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25751 25752 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25753 return (ENXIO); 25754 } 25755 25756 /* 25757 * The block length is changed via the Mode Select block descriptor, the 25758 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25759 * required as part of this routine. Therefore the mode sense allocation 25760 * length is specified to be the length of a mode page header and a 25761 * block descriptor. 25762 */ 25763 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25764 25765 ssc = sd_ssc_init(un); 25766 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25767 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25768 sd_ssc_fini(ssc); 25769 if (rval != 0) { 25770 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25771 "sr_change_blkmode: Mode Sense Failed\n"); 25772 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25773 return (rval); 25774 } 25775 25776 /* Check the block descriptor len to handle only 1 block descriptor */ 25777 sense_mhp = (struct mode_header *)sense; 25778 if ((sense_mhp->bdesc_length == 0) || 25779 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25780 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25781 "sr_change_blkmode: Mode Sense returned invalid block" 25782 " descriptor length\n"); 25783 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25784 return (EIO); 25785 } 25786 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25787 current_bsize = ((sense_desc->blksize_hi << 16) | 25788 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25789 25790 /* Process command */ 25791 switch (cmd) { 25792 case CDROMGBLKMODE: 25793 /* Return the block size obtained during the mode sense */ 25794 if (ddi_copyout(¤t_bsize, (void *)data, 25795 sizeof (int), flag) != 0) 25796 rval = EFAULT; 25797 break; 25798 case CDROMSBLKMODE: 25799 /* Validate the requested block size */ 25800 switch (data) { 25801 case CDROM_BLK_512: 25802 case CDROM_BLK_1024: 25803 case CDROM_BLK_2048: 25804 case CDROM_BLK_2056: 25805 case CDROM_BLK_2336: 25806 case CDROM_BLK_2340: 25807 case CDROM_BLK_2352: 25808 case CDROM_BLK_2368: 25809 case CDROM_BLK_2448: 25810 case CDROM_BLK_2646: 25811 case CDROM_BLK_2647: 25812 break; 25813 default: 25814 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25815 "sr_change_blkmode: " 25816 "Block Size '%ld' Not Supported\n", data); 25817 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25818 return (EINVAL); 25819 } 25820 25821 /* 25822 * The current block size matches the requested block size so 25823 * there is no need to send the mode select to change the size 25824 */ 25825 if (current_bsize == data) { 25826 break; 25827 } 25828 25829 /* Build the select data for the requested block size */ 25830 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25831 select_mhp = (struct mode_header *)select; 25832 select_desc = 25833 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25834 /* 25835 * The LBA size is changed via the block descriptor, so the 25836 * descriptor is built according to the user data 25837 */ 25838 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25839 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25840 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25841 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25842 25843 /* Send the mode select for the requested block size */ 25844 ssc = sd_ssc_init(un); 25845 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25846 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25847 SD_PATH_STANDARD); 25848 sd_ssc_fini(ssc); 25849 if (rval != 0) { 25850 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25851 "sr_change_blkmode: Mode Select Failed\n"); 25852 /* 25853 * The mode select failed for the requested block size, 25854 * so reset the data for the original block size and 25855 * send it to the target. The error is indicated by the 25856 * return value for the failed mode select. 25857 */ 25858 select_desc->blksize_hi = sense_desc->blksize_hi; 25859 select_desc->blksize_mid = sense_desc->blksize_mid; 25860 select_desc->blksize_lo = sense_desc->blksize_lo; 25861 ssc = sd_ssc_init(un); 25862 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25863 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25864 SD_PATH_STANDARD); 25865 sd_ssc_fini(ssc); 25866 } else { 25867 ASSERT(!mutex_owned(SD_MUTEX(un))); 25868 mutex_enter(SD_MUTEX(un)); 25869 sd_update_block_info(un, (uint32_t)data, 0); 25870 mutex_exit(SD_MUTEX(un)); 25871 } 25872 break; 25873 default: 25874 /* should not reach here, but check anyway */ 25875 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25876 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25877 rval = EINVAL; 25878 break; 25879 } 25880 25881 if (select) { 25882 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25883 } 25884 if (sense) { 25885 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25886 } 25887 return (rval); 25888 } 25889 25890 25891 /* 25892 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25893 * implement driver support for getting and setting the CD speed. The command 25894 * set used will be based on the device type. If the device has not been 25895 * identified as MMC the Toshiba vendor specific mode page will be used. If 25896 * the device is MMC but does not support the Real Time Streaming feature 25897 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25898 * be used to read the speed. 25899 */ 25900 25901 /* 25902 * Function: sr_change_speed() 25903 * 25904 * Description: This routine is the driver entry point for handling CD-ROM 25905 * drive speed ioctl requests for devices supporting the Toshiba 25906 * vendor specific drive speed mode page. Support for returning 25907 * and changing the current drive speed in use by the device is 25908 * implemented. 25909 * 25910 * Arguments: dev - the device 'dev_t' 25911 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25912 * CDROMSDRVSPEED (set) 25913 * data - current drive speed or requested drive speed 25914 * flag - this argument is a pass through to ddi_copyxxx() directly 25915 * from the mode argument of ioctl(). 25916 * 25917 * Return Code: the code returned by sd_send_scsi_cmd() 25918 * EINVAL if invalid arguments are provided 25919 * EFAULT if ddi_copyxxx() fails 25920 * ENXIO if fail ddi_get_soft_state 25921 * EIO if invalid mode sense block descriptor length 25922 */ 25923 25924 static int 25925 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25926 { 25927 struct sd_lun *un = NULL; 25928 struct mode_header *sense_mhp, *select_mhp; 25929 struct mode_speed *sense_page, *select_page; 25930 int current_speed; 25931 int rval = EINVAL; 25932 int bd_len; 25933 uchar_t *sense = NULL; 25934 uchar_t *select = NULL; 25935 sd_ssc_t *ssc; 25936 25937 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25938 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25939 return (ENXIO); 25940 } 25941 25942 /* 25943 * Note: The drive speed is being modified here according to a Toshiba 25944 * vendor specific mode page (0x31). 25945 */ 25946 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25947 25948 ssc = sd_ssc_init(un); 25949 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25950 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25951 SD_PATH_STANDARD); 25952 sd_ssc_fini(ssc); 25953 if (rval != 0) { 25954 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25955 "sr_change_speed: Mode Sense Failed\n"); 25956 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25957 return (rval); 25958 } 25959 sense_mhp = (struct mode_header *)sense; 25960 25961 /* Check the block descriptor len to handle only 1 block descriptor */ 25962 bd_len = sense_mhp->bdesc_length; 25963 if (bd_len > MODE_BLK_DESC_LENGTH) { 25964 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25965 "sr_change_speed: Mode Sense returned invalid block " 25966 "descriptor length\n"); 25967 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25968 return (EIO); 25969 } 25970 25971 sense_page = (struct mode_speed *) 25972 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25973 current_speed = sense_page->speed; 25974 25975 /* Process command */ 25976 switch (cmd) { 25977 case CDROMGDRVSPEED: 25978 /* Return the drive speed obtained during the mode sense */ 25979 if (current_speed == 0x2) { 25980 current_speed = CDROM_TWELVE_SPEED; 25981 } 25982 if (ddi_copyout(¤t_speed, (void *)data, 25983 sizeof (int), flag) != 0) { 25984 rval = EFAULT; 25985 } 25986 break; 25987 case CDROMSDRVSPEED: 25988 /* Validate the requested drive speed */ 25989 switch ((uchar_t)data) { 25990 case CDROM_TWELVE_SPEED: 25991 data = 0x2; 25992 /*FALLTHROUGH*/ 25993 case CDROM_NORMAL_SPEED: 25994 case CDROM_DOUBLE_SPEED: 25995 case CDROM_QUAD_SPEED: 25996 case CDROM_MAXIMUM_SPEED: 25997 break; 25998 default: 25999 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26000 "sr_change_speed: " 26001 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26002 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26003 return (EINVAL); 26004 } 26005 26006 /* 26007 * The current drive speed matches the requested drive speed so 26008 * there is no need to send the mode select to change the speed 26009 */ 26010 if (current_speed == data) { 26011 break; 26012 } 26013 26014 /* Build the select data for the requested drive speed */ 26015 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26016 select_mhp = (struct mode_header *)select; 26017 select_mhp->bdesc_length = 0; 26018 select_page = 26019 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26020 select_page = 26021 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26022 select_page->mode_page.code = CDROM_MODE_SPEED; 26023 select_page->mode_page.length = 2; 26024 select_page->speed = (uchar_t)data; 26025 26026 /* Send the mode select for the requested block size */ 26027 ssc = sd_ssc_init(un); 26028 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26029 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26030 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26031 sd_ssc_fini(ssc); 26032 if (rval != 0) { 26033 /* 26034 * The mode select failed for the requested drive speed, 26035 * so reset the data for the original drive speed and 26036 * send it to the target. The error is indicated by the 26037 * return value for the failed mode select. 26038 */ 26039 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26040 "sr_drive_speed: Mode Select Failed\n"); 26041 select_page->speed = sense_page->speed; 26042 ssc = sd_ssc_init(un); 26043 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26044 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26045 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26046 sd_ssc_fini(ssc); 26047 } 26048 break; 26049 default: 26050 /* should not reach here, but check anyway */ 26051 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26052 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26053 rval = EINVAL; 26054 break; 26055 } 26056 26057 if (select) { 26058 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26059 } 26060 if (sense) { 26061 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26062 } 26063 26064 return (rval); 26065 } 26066 26067 26068 /* 26069 * Function: sr_atapi_change_speed() 26070 * 26071 * Description: This routine is the driver entry point for handling CD-ROM 26072 * drive speed ioctl requests for MMC devices that do not support 26073 * the Real Time Streaming feature (0x107). 26074 * 26075 * Note: This routine will use the SET SPEED command which may not 26076 * be supported by all devices. 26077 * 26078 * Arguments: dev- the device 'dev_t' 26079 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26080 * CDROMSDRVSPEED (set) 26081 * data- current drive speed or requested drive speed 26082 * flag- this argument is a pass through to ddi_copyxxx() directly 26083 * from the mode argument of ioctl(). 26084 * 26085 * Return Code: the code returned by sd_send_scsi_cmd() 26086 * EINVAL if invalid arguments are provided 26087 * EFAULT if ddi_copyxxx() fails 26088 * ENXIO if fail ddi_get_soft_state 26089 * EIO if invalid mode sense block descriptor length 26090 */ 26091 26092 static int 26093 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26094 { 26095 struct sd_lun *un; 26096 struct uscsi_cmd *com = NULL; 26097 struct mode_header_grp2 *sense_mhp; 26098 uchar_t *sense_page; 26099 uchar_t *sense = NULL; 26100 char cdb[CDB_GROUP5]; 26101 int bd_len; 26102 int current_speed = 0; 26103 int max_speed = 0; 26104 int rval; 26105 sd_ssc_t *ssc; 26106 26107 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26108 26109 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26110 return (ENXIO); 26111 } 26112 26113 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26114 26115 ssc = sd_ssc_init(un); 26116 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26117 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26118 SD_PATH_STANDARD); 26119 sd_ssc_fini(ssc); 26120 if (rval != 0) { 26121 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26122 "sr_atapi_change_speed: Mode Sense Failed\n"); 26123 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26124 return (rval); 26125 } 26126 26127 /* Check the block descriptor len to handle only 1 block descriptor */ 26128 sense_mhp = (struct mode_header_grp2 *)sense; 26129 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26130 if (bd_len > MODE_BLK_DESC_LENGTH) { 26131 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26132 "sr_atapi_change_speed: Mode Sense returned invalid " 26133 "block descriptor length\n"); 26134 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26135 return (EIO); 26136 } 26137 26138 /* Calculate the current and maximum drive speeds */ 26139 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26140 current_speed = (sense_page[14] << 8) | sense_page[15]; 26141 max_speed = (sense_page[8] << 8) | sense_page[9]; 26142 26143 /* Process the command */ 26144 switch (cmd) { 26145 case CDROMGDRVSPEED: 26146 current_speed /= SD_SPEED_1X; 26147 if (ddi_copyout(¤t_speed, (void *)data, 26148 sizeof (int), flag) != 0) 26149 rval = EFAULT; 26150 break; 26151 case CDROMSDRVSPEED: 26152 /* Convert the speed code to KB/sec */ 26153 switch ((uchar_t)data) { 26154 case CDROM_NORMAL_SPEED: 26155 current_speed = SD_SPEED_1X; 26156 break; 26157 case CDROM_DOUBLE_SPEED: 26158 current_speed = 2 * SD_SPEED_1X; 26159 break; 26160 case CDROM_QUAD_SPEED: 26161 current_speed = 4 * SD_SPEED_1X; 26162 break; 26163 case CDROM_TWELVE_SPEED: 26164 current_speed = 12 * SD_SPEED_1X; 26165 break; 26166 case CDROM_MAXIMUM_SPEED: 26167 current_speed = 0xffff; 26168 break; 26169 default: 26170 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26171 "sr_atapi_change_speed: invalid drive speed %d\n", 26172 (uchar_t)data); 26173 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26174 return (EINVAL); 26175 } 26176 26177 /* Check the request against the drive's max speed. */ 26178 if (current_speed != 0xffff) { 26179 if (current_speed > max_speed) { 26180 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26181 return (EINVAL); 26182 } 26183 } 26184 26185 /* 26186 * Build and send the SET SPEED command 26187 * 26188 * Note: The SET SPEED (0xBB) command used in this routine is 26189 * obsolete per the SCSI MMC spec but still supported in the 26190 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26191 * therefore the command is still implemented in this routine. 26192 */ 26193 bzero(cdb, sizeof (cdb)); 26194 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26195 cdb[2] = (uchar_t)(current_speed >> 8); 26196 cdb[3] = (uchar_t)current_speed; 26197 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26198 com->uscsi_cdb = (caddr_t)cdb; 26199 com->uscsi_cdblen = CDB_GROUP5; 26200 com->uscsi_bufaddr = NULL; 26201 com->uscsi_buflen = 0; 26202 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26203 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26204 break; 26205 default: 26206 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26207 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26208 rval = EINVAL; 26209 } 26210 26211 if (sense) { 26212 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26213 } 26214 if (com) { 26215 kmem_free(com, sizeof (*com)); 26216 } 26217 return (rval); 26218 } 26219 26220 26221 /* 26222 * Function: sr_pause_resume() 26223 * 26224 * Description: This routine is the driver entry point for handling CD-ROM 26225 * pause/resume ioctl requests. This only affects the audio play 26226 * operation. 26227 * 26228 * Arguments: dev - the device 'dev_t' 26229 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26230 * for setting the resume bit of the cdb. 26231 * 26232 * Return Code: the code returned by sd_send_scsi_cmd() 26233 * EINVAL if invalid mode specified 26234 * 26235 */ 26236 26237 static int 26238 sr_pause_resume(dev_t dev, int cmd) 26239 { 26240 struct sd_lun *un; 26241 struct uscsi_cmd *com; 26242 char cdb[CDB_GROUP1]; 26243 int rval; 26244 26245 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26246 return (ENXIO); 26247 } 26248 26249 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26250 bzero(cdb, CDB_GROUP1); 26251 cdb[0] = SCMD_PAUSE_RESUME; 26252 switch (cmd) { 26253 case CDROMRESUME: 26254 cdb[8] = 1; 26255 break; 26256 case CDROMPAUSE: 26257 cdb[8] = 0; 26258 break; 26259 default: 26260 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26261 " Command '%x' Not Supported\n", cmd); 26262 rval = EINVAL; 26263 goto done; 26264 } 26265 26266 com->uscsi_cdb = cdb; 26267 com->uscsi_cdblen = CDB_GROUP1; 26268 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26269 26270 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26271 SD_PATH_STANDARD); 26272 26273 done: 26274 kmem_free(com, sizeof (*com)); 26275 return (rval); 26276 } 26277 26278 26279 /* 26280 * Function: sr_play_msf() 26281 * 26282 * Description: This routine is the driver entry point for handling CD-ROM 26283 * ioctl requests to output the audio signals at the specified 26284 * starting address and continue the audio play until the specified 26285 * ending address (CDROMPLAYMSF) The address is in Minute Second 26286 * Frame (MSF) format. 26287 * 26288 * Arguments: dev - the device 'dev_t' 26289 * data - pointer to user provided audio msf structure, 26290 * specifying start/end addresses. 26291 * flag - this argument is a pass through to ddi_copyxxx() 26292 * directly from the mode argument of ioctl(). 26293 * 26294 * Return Code: the code returned by sd_send_scsi_cmd() 26295 * EFAULT if ddi_copyxxx() fails 26296 * ENXIO if fail ddi_get_soft_state 26297 * EINVAL if data pointer is NULL 26298 */ 26299 26300 static int 26301 sr_play_msf(dev_t dev, caddr_t data, int flag) 26302 { 26303 struct sd_lun *un; 26304 struct uscsi_cmd *com; 26305 struct cdrom_msf msf_struct; 26306 struct cdrom_msf *msf = &msf_struct; 26307 char cdb[CDB_GROUP1]; 26308 int rval; 26309 26310 if (data == NULL) { 26311 return (EINVAL); 26312 } 26313 26314 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26315 return (ENXIO); 26316 } 26317 26318 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26319 return (EFAULT); 26320 } 26321 26322 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26323 bzero(cdb, CDB_GROUP1); 26324 cdb[0] = SCMD_PLAYAUDIO_MSF; 26325 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26326 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26327 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26328 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26329 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26330 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26331 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26332 } else { 26333 cdb[3] = msf->cdmsf_min0; 26334 cdb[4] = msf->cdmsf_sec0; 26335 cdb[5] = msf->cdmsf_frame0; 26336 cdb[6] = msf->cdmsf_min1; 26337 cdb[7] = msf->cdmsf_sec1; 26338 cdb[8] = msf->cdmsf_frame1; 26339 } 26340 com->uscsi_cdb = cdb; 26341 com->uscsi_cdblen = CDB_GROUP1; 26342 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26343 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26344 SD_PATH_STANDARD); 26345 kmem_free(com, sizeof (*com)); 26346 return (rval); 26347 } 26348 26349 26350 /* 26351 * Function: sr_play_trkind() 26352 * 26353 * Description: This routine is the driver entry point for handling CD-ROM 26354 * ioctl requests to output the audio signals at the specified 26355 * starting address and continue the audio play until the specified 26356 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26357 * format. 26358 * 26359 * Arguments: dev - the device 'dev_t' 26360 * data - pointer to user provided audio track/index structure, 26361 * specifying start/end addresses. 26362 * flag - this argument is a pass through to ddi_copyxxx() 26363 * directly from the mode argument of ioctl(). 26364 * 26365 * Return Code: the code returned by sd_send_scsi_cmd() 26366 * EFAULT if ddi_copyxxx() fails 26367 * ENXIO if fail ddi_get_soft_state 26368 * EINVAL if data pointer is NULL 26369 */ 26370 26371 static int 26372 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26373 { 26374 struct cdrom_ti ti_struct; 26375 struct cdrom_ti *ti = &ti_struct; 26376 struct uscsi_cmd *com = NULL; 26377 char cdb[CDB_GROUP1]; 26378 int rval; 26379 26380 if (data == NULL) { 26381 return (EINVAL); 26382 } 26383 26384 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26385 return (EFAULT); 26386 } 26387 26388 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26389 bzero(cdb, CDB_GROUP1); 26390 cdb[0] = SCMD_PLAYAUDIO_TI; 26391 cdb[4] = ti->cdti_trk0; 26392 cdb[5] = ti->cdti_ind0; 26393 cdb[7] = ti->cdti_trk1; 26394 cdb[8] = ti->cdti_ind1; 26395 com->uscsi_cdb = cdb; 26396 com->uscsi_cdblen = CDB_GROUP1; 26397 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26398 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26399 SD_PATH_STANDARD); 26400 kmem_free(com, sizeof (*com)); 26401 return (rval); 26402 } 26403 26404 26405 /* 26406 * Function: sr_read_all_subcodes() 26407 * 26408 * Description: This routine is the driver entry point for handling CD-ROM 26409 * ioctl requests to return raw subcode data while the target is 26410 * playing audio (CDROMSUBCODE). 26411 * 26412 * Arguments: dev - the device 'dev_t' 26413 * data - pointer to user provided cdrom subcode structure, 26414 * specifying the transfer length and address. 26415 * flag - this argument is a pass through to ddi_copyxxx() 26416 * directly from the mode argument of ioctl(). 26417 * 26418 * Return Code: the code returned by sd_send_scsi_cmd() 26419 * EFAULT if ddi_copyxxx() fails 26420 * ENXIO if fail ddi_get_soft_state 26421 * EINVAL if data pointer is NULL 26422 */ 26423 26424 static int 26425 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26426 { 26427 struct sd_lun *un = NULL; 26428 struct uscsi_cmd *com = NULL; 26429 struct cdrom_subcode *subcode = NULL; 26430 int rval; 26431 size_t buflen; 26432 char cdb[CDB_GROUP5]; 26433 26434 #ifdef _MULTI_DATAMODEL 26435 /* To support ILP32 applications in an LP64 world */ 26436 struct cdrom_subcode32 cdrom_subcode32; 26437 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26438 #endif 26439 if (data == NULL) { 26440 return (EINVAL); 26441 } 26442 26443 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26444 return (ENXIO); 26445 } 26446 26447 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26448 26449 #ifdef _MULTI_DATAMODEL 26450 switch (ddi_model_convert_from(flag & FMODELS)) { 26451 case DDI_MODEL_ILP32: 26452 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26453 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26454 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26455 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26456 return (EFAULT); 26457 } 26458 /* Convert the ILP32 uscsi data from the application to LP64 */ 26459 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26460 break; 26461 case DDI_MODEL_NONE: 26462 if (ddi_copyin(data, subcode, 26463 sizeof (struct cdrom_subcode), flag)) { 26464 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26465 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26466 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26467 return (EFAULT); 26468 } 26469 break; 26470 } 26471 #else /* ! _MULTI_DATAMODEL */ 26472 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26473 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26474 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26475 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26476 return (EFAULT); 26477 } 26478 #endif /* _MULTI_DATAMODEL */ 26479 26480 /* 26481 * Since MMC-2 expects max 3 bytes for length, check if the 26482 * length input is greater than 3 bytes 26483 */ 26484 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26485 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26486 "sr_read_all_subcodes: " 26487 "cdrom transfer length too large: %d (limit %d)\n", 26488 subcode->cdsc_length, 0xFFFFFF); 26489 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26490 return (EINVAL); 26491 } 26492 26493 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26494 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26495 bzero(cdb, CDB_GROUP5); 26496 26497 if (un->un_f_mmc_cap == TRUE) { 26498 cdb[0] = (char)SCMD_READ_CD; 26499 cdb[2] = (char)0xff; 26500 cdb[3] = (char)0xff; 26501 cdb[4] = (char)0xff; 26502 cdb[5] = (char)0xff; 26503 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26504 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26505 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26506 cdb[10] = 1; 26507 } else { 26508 /* 26509 * Note: A vendor specific command (0xDF) is being used her to 26510 * request a read of all subcodes. 26511 */ 26512 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26513 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26514 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26515 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26516 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26517 } 26518 com->uscsi_cdb = cdb; 26519 com->uscsi_cdblen = CDB_GROUP5; 26520 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26521 com->uscsi_buflen = buflen; 26522 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26523 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26524 SD_PATH_STANDARD); 26525 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26526 kmem_free(com, sizeof (*com)); 26527 return (rval); 26528 } 26529 26530 26531 /* 26532 * Function: sr_read_subchannel() 26533 * 26534 * Description: This routine is the driver entry point for handling CD-ROM 26535 * ioctl requests to return the Q sub-channel data of the CD 26536 * current position block. (CDROMSUBCHNL) The data includes the 26537 * track number, index number, absolute CD-ROM address (LBA or MSF 26538 * format per the user) , track relative CD-ROM address (LBA or MSF 26539 * format per the user), control data and audio status. 26540 * 26541 * Arguments: dev - the device 'dev_t' 26542 * data - pointer to user provided cdrom sub-channel structure 26543 * flag - this argument is a pass through to ddi_copyxxx() 26544 * directly from the mode argument of ioctl(). 26545 * 26546 * Return Code: the code returned by sd_send_scsi_cmd() 26547 * EFAULT if ddi_copyxxx() fails 26548 * ENXIO if fail ddi_get_soft_state 26549 * EINVAL if data pointer is NULL 26550 */ 26551 26552 static int 26553 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26554 { 26555 struct sd_lun *un; 26556 struct uscsi_cmd *com; 26557 struct cdrom_subchnl subchanel; 26558 struct cdrom_subchnl *subchnl = &subchanel; 26559 char cdb[CDB_GROUP1]; 26560 caddr_t buffer; 26561 int rval; 26562 26563 if (data == NULL) { 26564 return (EINVAL); 26565 } 26566 26567 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26568 (un->un_state == SD_STATE_OFFLINE)) { 26569 return (ENXIO); 26570 } 26571 26572 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26573 return (EFAULT); 26574 } 26575 26576 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26577 bzero(cdb, CDB_GROUP1); 26578 cdb[0] = SCMD_READ_SUBCHANNEL; 26579 /* Set the MSF bit based on the user requested address format */ 26580 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26581 /* 26582 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26583 * returned 26584 */ 26585 cdb[2] = 0x40; 26586 /* 26587 * Set byte 3 to specify the return data format. A value of 0x01 26588 * indicates that the CD-ROM current position should be returned. 26589 */ 26590 cdb[3] = 0x01; 26591 cdb[8] = 0x10; 26592 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26593 com->uscsi_cdb = cdb; 26594 com->uscsi_cdblen = CDB_GROUP1; 26595 com->uscsi_bufaddr = buffer; 26596 com->uscsi_buflen = 16; 26597 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26598 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26599 SD_PATH_STANDARD); 26600 if (rval != 0) { 26601 kmem_free(buffer, 16); 26602 kmem_free(com, sizeof (*com)); 26603 return (rval); 26604 } 26605 26606 /* Process the returned Q sub-channel data */ 26607 subchnl->cdsc_audiostatus = buffer[1]; 26608 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26609 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26610 subchnl->cdsc_trk = buffer[6]; 26611 subchnl->cdsc_ind = buffer[7]; 26612 if (subchnl->cdsc_format & CDROM_LBA) { 26613 subchnl->cdsc_absaddr.lba = 26614 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26615 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26616 subchnl->cdsc_reladdr.lba = 26617 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26618 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26619 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26620 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26621 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26622 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26623 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26624 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26625 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26626 } else { 26627 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26628 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26629 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26630 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26631 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26632 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26633 } 26634 kmem_free(buffer, 16); 26635 kmem_free(com, sizeof (*com)); 26636 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26637 != 0) { 26638 return (EFAULT); 26639 } 26640 return (rval); 26641 } 26642 26643 26644 /* 26645 * Function: sr_read_tocentry() 26646 * 26647 * Description: This routine is the driver entry point for handling CD-ROM 26648 * ioctl requests to read from the Table of Contents (TOC) 26649 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26650 * fields, the starting address (LBA or MSF format per the user) 26651 * and the data mode if the user specified track is a data track. 26652 * 26653 * Note: The READ HEADER (0x44) command used in this routine is 26654 * obsolete per the SCSI MMC spec but still supported in the 26655 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26656 * therefore the command is still implemented in this routine. 26657 * 26658 * Arguments: dev - the device 'dev_t' 26659 * data - pointer to user provided toc entry structure, 26660 * specifying the track # and the address format 26661 * (LBA or MSF). 26662 * flag - this argument is a pass through to ddi_copyxxx() 26663 * directly from the mode argument of ioctl(). 26664 * 26665 * Return Code: the code returned by sd_send_scsi_cmd() 26666 * EFAULT if ddi_copyxxx() fails 26667 * ENXIO if fail ddi_get_soft_state 26668 * EINVAL if data pointer is NULL 26669 */ 26670 26671 static int 26672 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26673 { 26674 struct sd_lun *un = NULL; 26675 struct uscsi_cmd *com; 26676 struct cdrom_tocentry toc_entry; 26677 struct cdrom_tocentry *entry = &toc_entry; 26678 caddr_t buffer; 26679 int rval; 26680 char cdb[CDB_GROUP1]; 26681 26682 if (data == NULL) { 26683 return (EINVAL); 26684 } 26685 26686 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26687 (un->un_state == SD_STATE_OFFLINE)) { 26688 return (ENXIO); 26689 } 26690 26691 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26692 return (EFAULT); 26693 } 26694 26695 /* Validate the requested track and address format */ 26696 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26697 return (EINVAL); 26698 } 26699 26700 if (entry->cdte_track == 0) { 26701 return (EINVAL); 26702 } 26703 26704 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26705 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26706 bzero(cdb, CDB_GROUP1); 26707 26708 cdb[0] = SCMD_READ_TOC; 26709 /* Set the MSF bit based on the user requested address format */ 26710 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26711 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26712 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26713 } else { 26714 cdb[6] = entry->cdte_track; 26715 } 26716 26717 /* 26718 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26719 * (4 byte TOC response header + 8 byte track descriptor) 26720 */ 26721 cdb[8] = 12; 26722 com->uscsi_cdb = cdb; 26723 com->uscsi_cdblen = CDB_GROUP1; 26724 com->uscsi_bufaddr = buffer; 26725 com->uscsi_buflen = 0x0C; 26726 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26727 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26728 SD_PATH_STANDARD); 26729 if (rval != 0) { 26730 kmem_free(buffer, 12); 26731 kmem_free(com, sizeof (*com)); 26732 return (rval); 26733 } 26734 26735 /* Process the toc entry */ 26736 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26737 entry->cdte_ctrl = (buffer[5] & 0x0F); 26738 if (entry->cdte_format & CDROM_LBA) { 26739 entry->cdte_addr.lba = 26740 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26741 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26742 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26743 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26744 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26745 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26746 /* 26747 * Send a READ TOC command using the LBA address format to get 26748 * the LBA for the track requested so it can be used in the 26749 * READ HEADER request 26750 * 26751 * Note: The MSF bit of the READ HEADER command specifies the 26752 * output format. The block address specified in that command 26753 * must be in LBA format. 26754 */ 26755 cdb[1] = 0; 26756 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26757 SD_PATH_STANDARD); 26758 if (rval != 0) { 26759 kmem_free(buffer, 12); 26760 kmem_free(com, sizeof (*com)); 26761 return (rval); 26762 } 26763 } else { 26764 entry->cdte_addr.msf.minute = buffer[9]; 26765 entry->cdte_addr.msf.second = buffer[10]; 26766 entry->cdte_addr.msf.frame = buffer[11]; 26767 /* 26768 * Send a READ TOC command using the LBA address format to get 26769 * the LBA for the track requested so it can be used in the 26770 * READ HEADER request 26771 * 26772 * Note: The MSF bit of the READ HEADER command specifies the 26773 * output format. The block address specified in that command 26774 * must be in LBA format. 26775 */ 26776 cdb[1] = 0; 26777 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26778 SD_PATH_STANDARD); 26779 if (rval != 0) { 26780 kmem_free(buffer, 12); 26781 kmem_free(com, sizeof (*com)); 26782 return (rval); 26783 } 26784 } 26785 26786 /* 26787 * Build and send the READ HEADER command to determine the data mode of 26788 * the user specified track. 26789 */ 26790 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26791 (entry->cdte_track != CDROM_LEADOUT)) { 26792 bzero(cdb, CDB_GROUP1); 26793 cdb[0] = SCMD_READ_HEADER; 26794 cdb[2] = buffer[8]; 26795 cdb[3] = buffer[9]; 26796 cdb[4] = buffer[10]; 26797 cdb[5] = buffer[11]; 26798 cdb[8] = 0x08; 26799 com->uscsi_buflen = 0x08; 26800 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26801 SD_PATH_STANDARD); 26802 if (rval == 0) { 26803 entry->cdte_datamode = buffer[0]; 26804 } else { 26805 /* 26806 * READ HEADER command failed, since this is 26807 * obsoleted in one spec, its better to return 26808 * -1 for an invlid track so that we can still 26809 * receive the rest of the TOC data. 26810 */ 26811 entry->cdte_datamode = (uchar_t)-1; 26812 } 26813 } else { 26814 entry->cdte_datamode = (uchar_t)-1; 26815 } 26816 26817 kmem_free(buffer, 12); 26818 kmem_free(com, sizeof (*com)); 26819 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26820 return (EFAULT); 26821 26822 return (rval); 26823 } 26824 26825 26826 /* 26827 * Function: sr_read_tochdr() 26828 * 26829 * Description: This routine is the driver entry point for handling CD-ROM 26830 * ioctl requests to read the Table of Contents (TOC) header 26831 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26832 * and ending track numbers 26833 * 26834 * Arguments: dev - the device 'dev_t' 26835 * data - pointer to user provided toc header structure, 26836 * specifying the starting and ending track numbers. 26837 * flag - this argument is a pass through to ddi_copyxxx() 26838 * directly from the mode argument of ioctl(). 26839 * 26840 * Return Code: the code returned by sd_send_scsi_cmd() 26841 * EFAULT if ddi_copyxxx() fails 26842 * ENXIO if fail ddi_get_soft_state 26843 * EINVAL if data pointer is NULL 26844 */ 26845 26846 static int 26847 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26848 { 26849 struct sd_lun *un; 26850 struct uscsi_cmd *com; 26851 struct cdrom_tochdr toc_header; 26852 struct cdrom_tochdr *hdr = &toc_header; 26853 char cdb[CDB_GROUP1]; 26854 int rval; 26855 caddr_t buffer; 26856 26857 if (data == NULL) { 26858 return (EINVAL); 26859 } 26860 26861 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26862 (un->un_state == SD_STATE_OFFLINE)) { 26863 return (ENXIO); 26864 } 26865 26866 buffer = kmem_zalloc(4, KM_SLEEP); 26867 bzero(cdb, CDB_GROUP1); 26868 cdb[0] = SCMD_READ_TOC; 26869 /* 26870 * Specifying a track number of 0x00 in the READ TOC command indicates 26871 * that the TOC header should be returned 26872 */ 26873 cdb[6] = 0x00; 26874 /* 26875 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26876 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26877 */ 26878 cdb[8] = 0x04; 26879 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26880 com->uscsi_cdb = cdb; 26881 com->uscsi_cdblen = CDB_GROUP1; 26882 com->uscsi_bufaddr = buffer; 26883 com->uscsi_buflen = 0x04; 26884 com->uscsi_timeout = 300; 26885 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26886 26887 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26888 SD_PATH_STANDARD); 26889 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26890 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26891 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26892 } else { 26893 hdr->cdth_trk0 = buffer[2]; 26894 hdr->cdth_trk1 = buffer[3]; 26895 } 26896 kmem_free(buffer, 4); 26897 kmem_free(com, sizeof (*com)); 26898 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26899 return (EFAULT); 26900 } 26901 return (rval); 26902 } 26903 26904 26905 /* 26906 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26907 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26908 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26909 * digital audio and extended architecture digital audio. These modes are 26910 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26911 * MMC specs. 26912 * 26913 * In addition to support for the various data formats these routines also 26914 * include support for devices that implement only the direct access READ 26915 * commands (0x08, 0x28), devices that implement the READ_CD commands 26916 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26917 * READ CDXA commands (0xD8, 0xDB) 26918 */ 26919 26920 /* 26921 * Function: sr_read_mode1() 26922 * 26923 * Description: This routine is the driver entry point for handling CD-ROM 26924 * ioctl read mode1 requests (CDROMREADMODE1). 26925 * 26926 * Arguments: dev - the device 'dev_t' 26927 * data - pointer to user provided cd read structure specifying 26928 * the lba buffer address and length. 26929 * flag - this argument is a pass through to ddi_copyxxx() 26930 * directly from the mode argument of ioctl(). 26931 * 26932 * Return Code: the code returned by sd_send_scsi_cmd() 26933 * EFAULT if ddi_copyxxx() fails 26934 * ENXIO if fail ddi_get_soft_state 26935 * EINVAL if data pointer is NULL 26936 */ 26937 26938 static int 26939 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26940 { 26941 struct sd_lun *un; 26942 struct cdrom_read mode1_struct; 26943 struct cdrom_read *mode1 = &mode1_struct; 26944 int rval; 26945 sd_ssc_t *ssc; 26946 26947 #ifdef _MULTI_DATAMODEL 26948 /* To support ILP32 applications in an LP64 world */ 26949 struct cdrom_read32 cdrom_read32; 26950 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26951 #endif /* _MULTI_DATAMODEL */ 26952 26953 if (data == NULL) { 26954 return (EINVAL); 26955 } 26956 26957 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26958 (un->un_state == SD_STATE_OFFLINE)) { 26959 return (ENXIO); 26960 } 26961 26962 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26963 "sd_read_mode1: entry: un:0x%p\n", un); 26964 26965 #ifdef _MULTI_DATAMODEL 26966 switch (ddi_model_convert_from(flag & FMODELS)) { 26967 case DDI_MODEL_ILP32: 26968 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26969 return (EFAULT); 26970 } 26971 /* Convert the ILP32 uscsi data from the application to LP64 */ 26972 cdrom_read32tocdrom_read(cdrd32, mode1); 26973 break; 26974 case DDI_MODEL_NONE: 26975 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26976 return (EFAULT); 26977 } 26978 } 26979 #else /* ! _MULTI_DATAMODEL */ 26980 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26981 return (EFAULT); 26982 } 26983 #endif /* _MULTI_DATAMODEL */ 26984 26985 ssc = sd_ssc_init(un); 26986 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 26987 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 26988 sd_ssc_fini(ssc); 26989 26990 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26991 "sd_read_mode1: exit: un:0x%p\n", un); 26992 26993 return (rval); 26994 } 26995 26996 26997 /* 26998 * Function: sr_read_cd_mode2() 26999 * 27000 * Description: This routine is the driver entry point for handling CD-ROM 27001 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27002 * support the READ CD (0xBE) command or the 1st generation 27003 * READ CD (0xD4) command. 27004 * 27005 * Arguments: dev - the device 'dev_t' 27006 * data - pointer to user provided cd read structure specifying 27007 * the lba buffer address and length. 27008 * flag - this argument is a pass through to ddi_copyxxx() 27009 * directly from the mode argument of ioctl(). 27010 * 27011 * Return Code: the code returned by sd_send_scsi_cmd() 27012 * EFAULT if ddi_copyxxx() fails 27013 * ENXIO if fail ddi_get_soft_state 27014 * EINVAL if data pointer is NULL 27015 */ 27016 27017 static int 27018 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27019 { 27020 struct sd_lun *un; 27021 struct uscsi_cmd *com; 27022 struct cdrom_read mode2_struct; 27023 struct cdrom_read *mode2 = &mode2_struct; 27024 uchar_t cdb[CDB_GROUP5]; 27025 int nblocks; 27026 int rval; 27027 #ifdef _MULTI_DATAMODEL 27028 /* To support ILP32 applications in an LP64 world */ 27029 struct cdrom_read32 cdrom_read32; 27030 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27031 #endif /* _MULTI_DATAMODEL */ 27032 27033 if (data == NULL) { 27034 return (EINVAL); 27035 } 27036 27037 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27038 (un->un_state == SD_STATE_OFFLINE)) { 27039 return (ENXIO); 27040 } 27041 27042 #ifdef _MULTI_DATAMODEL 27043 switch (ddi_model_convert_from(flag & FMODELS)) { 27044 case DDI_MODEL_ILP32: 27045 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27046 return (EFAULT); 27047 } 27048 /* Convert the ILP32 uscsi data from the application to LP64 */ 27049 cdrom_read32tocdrom_read(cdrd32, mode2); 27050 break; 27051 case DDI_MODEL_NONE: 27052 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27053 return (EFAULT); 27054 } 27055 break; 27056 } 27057 27058 #else /* ! _MULTI_DATAMODEL */ 27059 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27060 return (EFAULT); 27061 } 27062 #endif /* _MULTI_DATAMODEL */ 27063 27064 bzero(cdb, sizeof (cdb)); 27065 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27066 /* Read command supported by 1st generation atapi drives */ 27067 cdb[0] = SCMD_READ_CDD4; 27068 } else { 27069 /* Universal CD Access Command */ 27070 cdb[0] = SCMD_READ_CD; 27071 } 27072 27073 /* 27074 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27075 */ 27076 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27077 27078 /* set the start address */ 27079 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27080 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27081 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27082 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27083 27084 /* set the transfer length */ 27085 nblocks = mode2->cdread_buflen / 2336; 27086 cdb[6] = (uchar_t)(nblocks >> 16); 27087 cdb[7] = (uchar_t)(nblocks >> 8); 27088 cdb[8] = (uchar_t)nblocks; 27089 27090 /* set the filter bits */ 27091 cdb[9] = CDROM_READ_CD_USERDATA; 27092 27093 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27094 com->uscsi_cdb = (caddr_t)cdb; 27095 com->uscsi_cdblen = sizeof (cdb); 27096 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27097 com->uscsi_buflen = mode2->cdread_buflen; 27098 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27099 27100 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27101 SD_PATH_STANDARD); 27102 kmem_free(com, sizeof (*com)); 27103 return (rval); 27104 } 27105 27106 27107 /* 27108 * Function: sr_read_mode2() 27109 * 27110 * Description: This routine is the driver entry point for handling CD-ROM 27111 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27112 * do not support the READ CD (0xBE) command. 27113 * 27114 * Arguments: dev - the device 'dev_t' 27115 * data - pointer to user provided cd read structure specifying 27116 * the lba buffer address and length. 27117 * flag - this argument is a pass through to ddi_copyxxx() 27118 * directly from the mode argument of ioctl(). 27119 * 27120 * Return Code: the code returned by sd_send_scsi_cmd() 27121 * EFAULT if ddi_copyxxx() fails 27122 * ENXIO if fail ddi_get_soft_state 27123 * EINVAL if data pointer is NULL 27124 * EIO if fail to reset block size 27125 * EAGAIN if commands are in progress in the driver 27126 */ 27127 27128 static int 27129 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27130 { 27131 struct sd_lun *un; 27132 struct cdrom_read mode2_struct; 27133 struct cdrom_read *mode2 = &mode2_struct; 27134 int rval; 27135 uint32_t restore_blksize; 27136 struct uscsi_cmd *com; 27137 uchar_t cdb[CDB_GROUP0]; 27138 int nblocks; 27139 27140 #ifdef _MULTI_DATAMODEL 27141 /* To support ILP32 applications in an LP64 world */ 27142 struct cdrom_read32 cdrom_read32; 27143 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27144 #endif /* _MULTI_DATAMODEL */ 27145 27146 if (data == NULL) { 27147 return (EINVAL); 27148 } 27149 27150 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27151 (un->un_state == SD_STATE_OFFLINE)) { 27152 return (ENXIO); 27153 } 27154 27155 /* 27156 * Because this routine will update the device and driver block size 27157 * being used we want to make sure there are no commands in progress. 27158 * If commands are in progress the user will have to try again. 27159 * 27160 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27161 * in sdioctl to protect commands from sdioctl through to the top of 27162 * sd_uscsi_strategy. See sdioctl for details. 27163 */ 27164 mutex_enter(SD_MUTEX(un)); 27165 if (un->un_ncmds_in_driver != 1) { 27166 mutex_exit(SD_MUTEX(un)); 27167 return (EAGAIN); 27168 } 27169 mutex_exit(SD_MUTEX(un)); 27170 27171 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27172 "sd_read_mode2: entry: un:0x%p\n", un); 27173 27174 #ifdef _MULTI_DATAMODEL 27175 switch (ddi_model_convert_from(flag & FMODELS)) { 27176 case DDI_MODEL_ILP32: 27177 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27178 return (EFAULT); 27179 } 27180 /* Convert the ILP32 uscsi data from the application to LP64 */ 27181 cdrom_read32tocdrom_read(cdrd32, mode2); 27182 break; 27183 case DDI_MODEL_NONE: 27184 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27185 return (EFAULT); 27186 } 27187 break; 27188 } 27189 #else /* ! _MULTI_DATAMODEL */ 27190 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27191 return (EFAULT); 27192 } 27193 #endif /* _MULTI_DATAMODEL */ 27194 27195 /* Store the current target block size for restoration later */ 27196 restore_blksize = un->un_tgt_blocksize; 27197 27198 /* Change the device and soft state target block size to 2336 */ 27199 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27200 rval = EIO; 27201 goto done; 27202 } 27203 27204 27205 bzero(cdb, sizeof (cdb)); 27206 27207 /* set READ operation */ 27208 cdb[0] = SCMD_READ; 27209 27210 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27211 mode2->cdread_lba >>= 2; 27212 27213 /* set the start address */ 27214 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27215 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27216 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27217 27218 /* set the transfer length */ 27219 nblocks = mode2->cdread_buflen / 2336; 27220 cdb[4] = (uchar_t)nblocks & 0xFF; 27221 27222 /* build command */ 27223 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27224 com->uscsi_cdb = (caddr_t)cdb; 27225 com->uscsi_cdblen = sizeof (cdb); 27226 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27227 com->uscsi_buflen = mode2->cdread_buflen; 27228 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27229 27230 /* 27231 * Issue SCSI command with user space address for read buffer. 27232 * 27233 * This sends the command through main channel in the driver. 27234 * 27235 * Since this is accessed via an IOCTL call, we go through the 27236 * standard path, so that if the device was powered down, then 27237 * it would be 'awakened' to handle the command. 27238 */ 27239 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27240 SD_PATH_STANDARD); 27241 27242 kmem_free(com, sizeof (*com)); 27243 27244 /* Restore the device and soft state target block size */ 27245 if (sr_sector_mode(dev, restore_blksize) != 0) { 27246 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27247 "can't do switch back to mode 1\n"); 27248 /* 27249 * If sd_send_scsi_READ succeeded we still need to report 27250 * an error because we failed to reset the block size 27251 */ 27252 if (rval == 0) { 27253 rval = EIO; 27254 } 27255 } 27256 27257 done: 27258 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27259 "sd_read_mode2: exit: un:0x%p\n", un); 27260 27261 return (rval); 27262 } 27263 27264 27265 /* 27266 * Function: sr_sector_mode() 27267 * 27268 * Description: This utility function is used by sr_read_mode2 to set the target 27269 * block size based on the user specified size. This is a legacy 27270 * implementation based upon a vendor specific mode page 27271 * 27272 * Arguments: dev - the device 'dev_t' 27273 * data - flag indicating if block size is being set to 2336 or 27274 * 512. 27275 * 27276 * Return Code: the code returned by sd_send_scsi_cmd() 27277 * EFAULT if ddi_copyxxx() fails 27278 * ENXIO if fail ddi_get_soft_state 27279 * EINVAL if data pointer is NULL 27280 */ 27281 27282 static int 27283 sr_sector_mode(dev_t dev, uint32_t blksize) 27284 { 27285 struct sd_lun *un; 27286 uchar_t *sense; 27287 uchar_t *select; 27288 int rval; 27289 sd_ssc_t *ssc; 27290 27291 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27292 (un->un_state == SD_STATE_OFFLINE)) { 27293 return (ENXIO); 27294 } 27295 27296 sense = kmem_zalloc(20, KM_SLEEP); 27297 27298 /* Note: This is a vendor specific mode page (0x81) */ 27299 ssc = sd_ssc_init(un); 27300 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27301 SD_PATH_STANDARD); 27302 sd_ssc_fini(ssc); 27303 if (rval != 0) { 27304 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27305 "sr_sector_mode: Mode Sense failed\n"); 27306 kmem_free(sense, 20); 27307 return (rval); 27308 } 27309 select = kmem_zalloc(20, KM_SLEEP); 27310 select[3] = 0x08; 27311 select[10] = ((blksize >> 8) & 0xff); 27312 select[11] = (blksize & 0xff); 27313 select[12] = 0x01; 27314 select[13] = 0x06; 27315 select[14] = sense[14]; 27316 select[15] = sense[15]; 27317 if (blksize == SD_MODE2_BLKSIZE) { 27318 select[14] |= 0x01; 27319 } 27320 27321 ssc = sd_ssc_init(un); 27322 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27323 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27324 sd_ssc_fini(ssc); 27325 if (rval != 0) { 27326 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27327 "sr_sector_mode: Mode Select failed\n"); 27328 } else { 27329 /* 27330 * Only update the softstate block size if we successfully 27331 * changed the device block mode. 27332 */ 27333 mutex_enter(SD_MUTEX(un)); 27334 sd_update_block_info(un, blksize, 0); 27335 mutex_exit(SD_MUTEX(un)); 27336 } 27337 kmem_free(sense, 20); 27338 kmem_free(select, 20); 27339 return (rval); 27340 } 27341 27342 27343 /* 27344 * Function: sr_read_cdda() 27345 * 27346 * Description: This routine is the driver entry point for handling CD-ROM 27347 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27348 * the target supports CDDA these requests are handled via a vendor 27349 * specific command (0xD8) If the target does not support CDDA 27350 * these requests are handled via the READ CD command (0xBE). 27351 * 27352 * Arguments: dev - the device 'dev_t' 27353 * data - pointer to user provided CD-DA structure specifying 27354 * the track starting address, transfer length, and 27355 * subcode options. 27356 * flag - this argument is a pass through to ddi_copyxxx() 27357 * directly from the mode argument of ioctl(). 27358 * 27359 * Return Code: the code returned by sd_send_scsi_cmd() 27360 * EFAULT if ddi_copyxxx() fails 27361 * ENXIO if fail ddi_get_soft_state 27362 * EINVAL if invalid arguments are provided 27363 * ENOTTY 27364 */ 27365 27366 static int 27367 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27368 { 27369 struct sd_lun *un; 27370 struct uscsi_cmd *com; 27371 struct cdrom_cdda *cdda; 27372 int rval; 27373 size_t buflen; 27374 char cdb[CDB_GROUP5]; 27375 27376 #ifdef _MULTI_DATAMODEL 27377 /* To support ILP32 applications in an LP64 world */ 27378 struct cdrom_cdda32 cdrom_cdda32; 27379 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27380 #endif /* _MULTI_DATAMODEL */ 27381 27382 if (data == NULL) { 27383 return (EINVAL); 27384 } 27385 27386 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27387 return (ENXIO); 27388 } 27389 27390 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27391 27392 #ifdef _MULTI_DATAMODEL 27393 switch (ddi_model_convert_from(flag & FMODELS)) { 27394 case DDI_MODEL_ILP32: 27395 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27396 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27397 "sr_read_cdda: ddi_copyin Failed\n"); 27398 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27399 return (EFAULT); 27400 } 27401 /* Convert the ILP32 uscsi data from the application to LP64 */ 27402 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27403 break; 27404 case DDI_MODEL_NONE: 27405 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27406 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27407 "sr_read_cdda: ddi_copyin Failed\n"); 27408 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27409 return (EFAULT); 27410 } 27411 break; 27412 } 27413 #else /* ! _MULTI_DATAMODEL */ 27414 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27415 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27416 "sr_read_cdda: ddi_copyin Failed\n"); 27417 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27418 return (EFAULT); 27419 } 27420 #endif /* _MULTI_DATAMODEL */ 27421 27422 /* 27423 * Since MMC-2 expects max 3 bytes for length, check if the 27424 * length input is greater than 3 bytes 27425 */ 27426 if ((cdda->cdda_length & 0xFF000000) != 0) { 27427 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27428 "cdrom transfer length too large: %d (limit %d)\n", 27429 cdda->cdda_length, 0xFFFFFF); 27430 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27431 return (EINVAL); 27432 } 27433 27434 switch (cdda->cdda_subcode) { 27435 case CDROM_DA_NO_SUBCODE: 27436 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27437 break; 27438 case CDROM_DA_SUBQ: 27439 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27440 break; 27441 case CDROM_DA_ALL_SUBCODE: 27442 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27443 break; 27444 case CDROM_DA_SUBCODE_ONLY: 27445 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27446 break; 27447 default: 27448 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27449 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27450 cdda->cdda_subcode); 27451 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27452 return (EINVAL); 27453 } 27454 27455 /* Build and send the command */ 27456 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27457 bzero(cdb, CDB_GROUP5); 27458 27459 if (un->un_f_cfg_cdda == TRUE) { 27460 cdb[0] = (char)SCMD_READ_CD; 27461 cdb[1] = 0x04; 27462 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27463 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27464 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27465 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27466 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27467 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27468 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27469 cdb[9] = 0x10; 27470 switch (cdda->cdda_subcode) { 27471 case CDROM_DA_NO_SUBCODE : 27472 cdb[10] = 0x0; 27473 break; 27474 case CDROM_DA_SUBQ : 27475 cdb[10] = 0x2; 27476 break; 27477 case CDROM_DA_ALL_SUBCODE : 27478 cdb[10] = 0x1; 27479 break; 27480 case CDROM_DA_SUBCODE_ONLY : 27481 /* FALLTHROUGH */ 27482 default : 27483 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27484 kmem_free(com, sizeof (*com)); 27485 return (ENOTTY); 27486 } 27487 } else { 27488 cdb[0] = (char)SCMD_READ_CDDA; 27489 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27490 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27491 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27492 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27493 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27494 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27495 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27496 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27497 cdb[10] = cdda->cdda_subcode; 27498 } 27499 27500 com->uscsi_cdb = cdb; 27501 com->uscsi_cdblen = CDB_GROUP5; 27502 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27503 com->uscsi_buflen = buflen; 27504 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27505 27506 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27507 SD_PATH_STANDARD); 27508 27509 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27510 kmem_free(com, sizeof (*com)); 27511 return (rval); 27512 } 27513 27514 27515 /* 27516 * Function: sr_read_cdxa() 27517 * 27518 * Description: This routine is the driver entry point for handling CD-ROM 27519 * ioctl requests to return CD-XA (Extended Architecture) data. 27520 * (CDROMCDXA). 27521 * 27522 * Arguments: dev - the device 'dev_t' 27523 * data - pointer to user provided CD-XA structure specifying 27524 * the data starting address, transfer length, and format 27525 * flag - this argument is a pass through to ddi_copyxxx() 27526 * directly from the mode argument of ioctl(). 27527 * 27528 * Return Code: the code returned by sd_send_scsi_cmd() 27529 * EFAULT if ddi_copyxxx() fails 27530 * ENXIO if fail ddi_get_soft_state 27531 * EINVAL if data pointer is NULL 27532 */ 27533 27534 static int 27535 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27536 { 27537 struct sd_lun *un; 27538 struct uscsi_cmd *com; 27539 struct cdrom_cdxa *cdxa; 27540 int rval; 27541 size_t buflen; 27542 char cdb[CDB_GROUP5]; 27543 uchar_t read_flags; 27544 27545 #ifdef _MULTI_DATAMODEL 27546 /* To support ILP32 applications in an LP64 world */ 27547 struct cdrom_cdxa32 cdrom_cdxa32; 27548 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27549 #endif /* _MULTI_DATAMODEL */ 27550 27551 if (data == NULL) { 27552 return (EINVAL); 27553 } 27554 27555 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27556 return (ENXIO); 27557 } 27558 27559 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27560 27561 #ifdef _MULTI_DATAMODEL 27562 switch (ddi_model_convert_from(flag & FMODELS)) { 27563 case DDI_MODEL_ILP32: 27564 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27565 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27566 return (EFAULT); 27567 } 27568 /* 27569 * Convert the ILP32 uscsi data from the 27570 * application to LP64 for internal use. 27571 */ 27572 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27573 break; 27574 case DDI_MODEL_NONE: 27575 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27576 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27577 return (EFAULT); 27578 } 27579 break; 27580 } 27581 #else /* ! _MULTI_DATAMODEL */ 27582 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27583 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27584 return (EFAULT); 27585 } 27586 #endif /* _MULTI_DATAMODEL */ 27587 27588 /* 27589 * Since MMC-2 expects max 3 bytes for length, check if the 27590 * length input is greater than 3 bytes 27591 */ 27592 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27593 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27594 "cdrom transfer length too large: %d (limit %d)\n", 27595 cdxa->cdxa_length, 0xFFFFFF); 27596 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27597 return (EINVAL); 27598 } 27599 27600 switch (cdxa->cdxa_format) { 27601 case CDROM_XA_DATA: 27602 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27603 read_flags = 0x10; 27604 break; 27605 case CDROM_XA_SECTOR_DATA: 27606 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27607 read_flags = 0xf8; 27608 break; 27609 case CDROM_XA_DATA_W_ERROR: 27610 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27611 read_flags = 0xfc; 27612 break; 27613 default: 27614 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27615 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27616 cdxa->cdxa_format); 27617 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27618 return (EINVAL); 27619 } 27620 27621 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27622 bzero(cdb, CDB_GROUP5); 27623 if (un->un_f_mmc_cap == TRUE) { 27624 cdb[0] = (char)SCMD_READ_CD; 27625 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27626 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27627 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27628 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27629 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27630 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27631 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27632 cdb[9] = (char)read_flags; 27633 } else { 27634 /* 27635 * Note: A vendor specific command (0xDB) is being used her to 27636 * request a read of all subcodes. 27637 */ 27638 cdb[0] = (char)SCMD_READ_CDXA; 27639 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27640 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27641 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27642 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27643 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27644 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27645 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27646 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27647 cdb[10] = cdxa->cdxa_format; 27648 } 27649 com->uscsi_cdb = cdb; 27650 com->uscsi_cdblen = CDB_GROUP5; 27651 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27652 com->uscsi_buflen = buflen; 27653 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27654 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27655 SD_PATH_STANDARD); 27656 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27657 kmem_free(com, sizeof (*com)); 27658 return (rval); 27659 } 27660 27661 27662 /* 27663 * Function: sr_eject() 27664 * 27665 * Description: This routine is the driver entry point for handling CD-ROM 27666 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27667 * 27668 * Arguments: dev - the device 'dev_t' 27669 * 27670 * Return Code: the code returned by sd_send_scsi_cmd() 27671 */ 27672 27673 static int 27674 sr_eject(dev_t dev) 27675 { 27676 struct sd_lun *un; 27677 int rval; 27678 sd_ssc_t *ssc; 27679 27680 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27681 (un->un_state == SD_STATE_OFFLINE)) { 27682 return (ENXIO); 27683 } 27684 27685 /* 27686 * To prevent race conditions with the eject 27687 * command, keep track of an eject command as 27688 * it progresses. If we are already handling 27689 * an eject command in the driver for the given 27690 * unit and another request to eject is received 27691 * immediately return EAGAIN so we don't lose 27692 * the command if the current eject command fails. 27693 */ 27694 mutex_enter(SD_MUTEX(un)); 27695 if (un->un_f_ejecting == TRUE) { 27696 mutex_exit(SD_MUTEX(un)); 27697 return (EAGAIN); 27698 } 27699 un->un_f_ejecting = TRUE; 27700 mutex_exit(SD_MUTEX(un)); 27701 27702 ssc = sd_ssc_init(un); 27703 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27704 SD_PATH_STANDARD); 27705 sd_ssc_fini(ssc); 27706 27707 if (rval != 0) { 27708 mutex_enter(SD_MUTEX(un)); 27709 un->un_f_ejecting = FALSE; 27710 mutex_exit(SD_MUTEX(un)); 27711 return (rval); 27712 } 27713 27714 ssc = sd_ssc_init(un); 27715 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27716 SD_PATH_STANDARD); 27717 sd_ssc_fini(ssc); 27718 27719 if (rval == 0) { 27720 mutex_enter(SD_MUTEX(un)); 27721 sr_ejected(un); 27722 un->un_mediastate = DKIO_EJECTED; 27723 un->un_f_ejecting = FALSE; 27724 cv_broadcast(&un->un_state_cv); 27725 mutex_exit(SD_MUTEX(un)); 27726 } else { 27727 mutex_enter(SD_MUTEX(un)); 27728 un->un_f_ejecting = FALSE; 27729 mutex_exit(SD_MUTEX(un)); 27730 } 27731 return (rval); 27732 } 27733 27734 27735 /* 27736 * Function: sr_ejected() 27737 * 27738 * Description: This routine updates the soft state structure to invalidate the 27739 * geometry information after the media has been ejected or a 27740 * media eject has been detected. 27741 * 27742 * Arguments: un - driver soft state (unit) structure 27743 */ 27744 27745 static void 27746 sr_ejected(struct sd_lun *un) 27747 { 27748 struct sd_errstats *stp; 27749 27750 ASSERT(un != NULL); 27751 ASSERT(mutex_owned(SD_MUTEX(un))); 27752 27753 un->un_f_blockcount_is_valid = FALSE; 27754 un->un_f_tgt_blocksize_is_valid = FALSE; 27755 mutex_exit(SD_MUTEX(un)); 27756 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27757 mutex_enter(SD_MUTEX(un)); 27758 27759 if (un->un_errstats != NULL) { 27760 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27761 stp->sd_capacity.value.ui64 = 0; 27762 } 27763 } 27764 27765 27766 /* 27767 * Function: sr_check_wp() 27768 * 27769 * Description: This routine checks the write protection of a removable 27770 * media disk and hotpluggable devices via the write protect bit of 27771 * the Mode Page Header device specific field. Some devices choke 27772 * on unsupported mode page. In order to workaround this issue, 27773 * this routine has been implemented to use 0x3f mode page(request 27774 * for all pages) for all device types. 27775 * 27776 * Arguments: dev - the device 'dev_t' 27777 * 27778 * Return Code: int indicating if the device is write protected (1) or not (0) 27779 * 27780 * Context: Kernel thread. 27781 * 27782 */ 27783 27784 static int 27785 sr_check_wp(dev_t dev) 27786 { 27787 struct sd_lun *un; 27788 uchar_t device_specific; 27789 uchar_t *sense; 27790 int hdrlen; 27791 int rval = FALSE; 27792 int status; 27793 sd_ssc_t *ssc; 27794 27795 /* 27796 * Note: The return codes for this routine should be reworked to 27797 * properly handle the case of a NULL softstate. 27798 */ 27799 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27800 return (FALSE); 27801 } 27802 27803 if (un->un_f_cfg_is_atapi == TRUE) { 27804 /* 27805 * The mode page contents are not required; set the allocation 27806 * length for the mode page header only 27807 */ 27808 hdrlen = MODE_HEADER_LENGTH_GRP2; 27809 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27810 ssc = sd_ssc_init(un); 27811 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27812 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27813 sd_ssc_fini(ssc); 27814 if (status != 0) 27815 goto err_exit; 27816 device_specific = 27817 ((struct mode_header_grp2 *)sense)->device_specific; 27818 } else { 27819 hdrlen = MODE_HEADER_LENGTH; 27820 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27821 ssc = sd_ssc_init(un); 27822 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27823 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27824 sd_ssc_fini(ssc); 27825 if (status != 0) 27826 goto err_exit; 27827 device_specific = 27828 ((struct mode_header *)sense)->device_specific; 27829 } 27830 27831 27832 /* 27833 * Write protect mode sense failed; not all disks 27834 * understand this query. Return FALSE assuming that 27835 * these devices are not writable. 27836 */ 27837 if (device_specific & WRITE_PROTECT) { 27838 rval = TRUE; 27839 } 27840 27841 err_exit: 27842 kmem_free(sense, hdrlen); 27843 return (rval); 27844 } 27845 27846 /* 27847 * Function: sr_volume_ctrl() 27848 * 27849 * Description: This routine is the driver entry point for handling CD-ROM 27850 * audio output volume ioctl requests. (CDROMVOLCTRL) 27851 * 27852 * Arguments: dev - the device 'dev_t' 27853 * data - pointer to user audio volume control structure 27854 * flag - this argument is a pass through to ddi_copyxxx() 27855 * directly from the mode argument of ioctl(). 27856 * 27857 * Return Code: the code returned by sd_send_scsi_cmd() 27858 * EFAULT if ddi_copyxxx() fails 27859 * ENXIO if fail ddi_get_soft_state 27860 * EINVAL if data pointer is NULL 27861 * 27862 */ 27863 27864 static int 27865 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27866 { 27867 struct sd_lun *un; 27868 struct cdrom_volctrl volume; 27869 struct cdrom_volctrl *vol = &volume; 27870 uchar_t *sense_page; 27871 uchar_t *select_page; 27872 uchar_t *sense; 27873 uchar_t *select; 27874 int sense_buflen; 27875 int select_buflen; 27876 int rval; 27877 sd_ssc_t *ssc; 27878 27879 if (data == NULL) { 27880 return (EINVAL); 27881 } 27882 27883 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27884 (un->un_state == SD_STATE_OFFLINE)) { 27885 return (ENXIO); 27886 } 27887 27888 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27889 return (EFAULT); 27890 } 27891 27892 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27893 struct mode_header_grp2 *sense_mhp; 27894 struct mode_header_grp2 *select_mhp; 27895 int bd_len; 27896 27897 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27898 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27899 MODEPAGE_AUDIO_CTRL_LEN; 27900 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27901 select = kmem_zalloc(select_buflen, KM_SLEEP); 27902 ssc = sd_ssc_init(un); 27903 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27904 sense_buflen, MODEPAGE_AUDIO_CTRL, 27905 SD_PATH_STANDARD); 27906 sd_ssc_fini(ssc); 27907 27908 if (rval != 0) { 27909 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27910 "sr_volume_ctrl: Mode Sense Failed\n"); 27911 kmem_free(sense, sense_buflen); 27912 kmem_free(select, select_buflen); 27913 return (rval); 27914 } 27915 sense_mhp = (struct mode_header_grp2 *)sense; 27916 select_mhp = (struct mode_header_grp2 *)select; 27917 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27918 sense_mhp->bdesc_length_lo; 27919 if (bd_len > MODE_BLK_DESC_LENGTH) { 27920 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27921 "sr_volume_ctrl: Mode Sense returned invalid " 27922 "block descriptor length\n"); 27923 kmem_free(sense, sense_buflen); 27924 kmem_free(select, select_buflen); 27925 return (EIO); 27926 } 27927 sense_page = (uchar_t *) 27928 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27929 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27930 select_mhp->length_msb = 0; 27931 select_mhp->length_lsb = 0; 27932 select_mhp->bdesc_length_hi = 0; 27933 select_mhp->bdesc_length_lo = 0; 27934 } else { 27935 struct mode_header *sense_mhp, *select_mhp; 27936 27937 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27938 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27939 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27940 select = kmem_zalloc(select_buflen, KM_SLEEP); 27941 ssc = sd_ssc_init(un); 27942 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27943 sense_buflen, MODEPAGE_AUDIO_CTRL, 27944 SD_PATH_STANDARD); 27945 sd_ssc_fini(ssc); 27946 27947 if (rval != 0) { 27948 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27949 "sr_volume_ctrl: Mode Sense Failed\n"); 27950 kmem_free(sense, sense_buflen); 27951 kmem_free(select, select_buflen); 27952 return (rval); 27953 } 27954 sense_mhp = (struct mode_header *)sense; 27955 select_mhp = (struct mode_header *)select; 27956 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27957 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27958 "sr_volume_ctrl: Mode Sense returned invalid " 27959 "block descriptor length\n"); 27960 kmem_free(sense, sense_buflen); 27961 kmem_free(select, select_buflen); 27962 return (EIO); 27963 } 27964 sense_page = (uchar_t *) 27965 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27966 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27967 select_mhp->length = 0; 27968 select_mhp->bdesc_length = 0; 27969 } 27970 /* 27971 * Note: An audio control data structure could be created and overlayed 27972 * on the following in place of the array indexing method implemented. 27973 */ 27974 27975 /* Build the select data for the user volume data */ 27976 select_page[0] = MODEPAGE_AUDIO_CTRL; 27977 select_page[1] = 0xE; 27978 /* Set the immediate bit */ 27979 select_page[2] = 0x04; 27980 /* Zero out reserved fields */ 27981 select_page[3] = 0x00; 27982 select_page[4] = 0x00; 27983 /* Return sense data for fields not to be modified */ 27984 select_page[5] = sense_page[5]; 27985 select_page[6] = sense_page[6]; 27986 select_page[7] = sense_page[7]; 27987 /* Set the user specified volume levels for channel 0 and 1 */ 27988 select_page[8] = 0x01; 27989 select_page[9] = vol->channel0; 27990 select_page[10] = 0x02; 27991 select_page[11] = vol->channel1; 27992 /* Channel 2 and 3 are currently unsupported so return the sense data */ 27993 select_page[12] = sense_page[12]; 27994 select_page[13] = sense_page[13]; 27995 select_page[14] = sense_page[14]; 27996 select_page[15] = sense_page[15]; 27997 27998 ssc = sd_ssc_init(un); 27999 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28000 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 28001 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28002 } else { 28003 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 28004 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28005 } 28006 sd_ssc_fini(ssc); 28007 28008 kmem_free(sense, sense_buflen); 28009 kmem_free(select, select_buflen); 28010 return (rval); 28011 } 28012 28013 28014 /* 28015 * Function: sr_read_sony_session_offset() 28016 * 28017 * Description: This routine is the driver entry point for handling CD-ROM 28018 * ioctl requests for session offset information. (CDROMREADOFFSET) 28019 * The address of the first track in the last session of a 28020 * multi-session CD-ROM is returned 28021 * 28022 * Note: This routine uses a vendor specific key value in the 28023 * command control field without implementing any vendor check here 28024 * or in the ioctl routine. 28025 * 28026 * Arguments: dev - the device 'dev_t' 28027 * data - pointer to an int to hold the requested address 28028 * flag - this argument is a pass through to ddi_copyxxx() 28029 * directly from the mode argument of ioctl(). 28030 * 28031 * Return Code: the code returned by sd_send_scsi_cmd() 28032 * EFAULT if ddi_copyxxx() fails 28033 * ENXIO if fail ddi_get_soft_state 28034 * EINVAL if data pointer is NULL 28035 */ 28036 28037 static int 28038 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28039 { 28040 struct sd_lun *un; 28041 struct uscsi_cmd *com; 28042 caddr_t buffer; 28043 char cdb[CDB_GROUP1]; 28044 int session_offset = 0; 28045 int rval; 28046 28047 if (data == NULL) { 28048 return (EINVAL); 28049 } 28050 28051 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28052 (un->un_state == SD_STATE_OFFLINE)) { 28053 return (ENXIO); 28054 } 28055 28056 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28057 bzero(cdb, CDB_GROUP1); 28058 cdb[0] = SCMD_READ_TOC; 28059 /* 28060 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28061 * (4 byte TOC response header + 8 byte response data) 28062 */ 28063 cdb[8] = SONY_SESSION_OFFSET_LEN; 28064 /* Byte 9 is the control byte. A vendor specific value is used */ 28065 cdb[9] = SONY_SESSION_OFFSET_KEY; 28066 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28067 com->uscsi_cdb = cdb; 28068 com->uscsi_cdblen = CDB_GROUP1; 28069 com->uscsi_bufaddr = buffer; 28070 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28071 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28072 28073 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28074 SD_PATH_STANDARD); 28075 if (rval != 0) { 28076 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28077 kmem_free(com, sizeof (*com)); 28078 return (rval); 28079 } 28080 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28081 session_offset = 28082 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28083 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28084 /* 28085 * Offset returned offset in current lbasize block's. Convert to 28086 * 2k block's to return to the user 28087 */ 28088 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28089 session_offset >>= 2; 28090 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28091 session_offset >>= 1; 28092 } 28093 } 28094 28095 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28096 rval = EFAULT; 28097 } 28098 28099 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28100 kmem_free(com, sizeof (*com)); 28101 return (rval); 28102 } 28103 28104 28105 /* 28106 * Function: sd_wm_cache_constructor() 28107 * 28108 * Description: Cache Constructor for the wmap cache for the read/modify/write 28109 * devices. 28110 * 28111 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28112 * un - sd_lun structure for the device. 28113 * flag - the km flags passed to constructor 28114 * 28115 * Return Code: 0 on success. 28116 * -1 on failure. 28117 */ 28118 28119 /*ARGSUSED*/ 28120 static int 28121 sd_wm_cache_constructor(void *wm, void *un, int flags) 28122 { 28123 bzero(wm, sizeof (struct sd_w_map)); 28124 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28125 return (0); 28126 } 28127 28128 28129 /* 28130 * Function: sd_wm_cache_destructor() 28131 * 28132 * Description: Cache destructor for the wmap cache for the read/modify/write 28133 * devices. 28134 * 28135 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28136 * un - sd_lun structure for the device. 28137 */ 28138 /*ARGSUSED*/ 28139 static void 28140 sd_wm_cache_destructor(void *wm, void *un) 28141 { 28142 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28143 } 28144 28145 28146 /* 28147 * Function: sd_range_lock() 28148 * 28149 * Description: Lock the range of blocks specified as parameter to ensure 28150 * that read, modify write is atomic and no other i/o writes 28151 * to the same location. The range is specified in terms 28152 * of start and end blocks. Block numbers are the actual 28153 * media block numbers and not system. 28154 * 28155 * Arguments: un - sd_lun structure for the device. 28156 * startb - The starting block number 28157 * endb - The end block number 28158 * typ - type of i/o - simple/read_modify_write 28159 * 28160 * Return Code: wm - pointer to the wmap structure. 28161 * 28162 * Context: This routine can sleep. 28163 */ 28164 28165 static struct sd_w_map * 28166 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28167 { 28168 struct sd_w_map *wmp = NULL; 28169 struct sd_w_map *sl_wmp = NULL; 28170 struct sd_w_map *tmp_wmp; 28171 wm_state state = SD_WM_CHK_LIST; 28172 28173 28174 ASSERT(un != NULL); 28175 ASSERT(!mutex_owned(SD_MUTEX(un))); 28176 28177 mutex_enter(SD_MUTEX(un)); 28178 28179 while (state != SD_WM_DONE) { 28180 28181 switch (state) { 28182 case SD_WM_CHK_LIST: 28183 /* 28184 * This is the starting state. Check the wmap list 28185 * to see if the range is currently available. 28186 */ 28187 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28188 /* 28189 * If this is a simple write and no rmw 28190 * i/o is pending then try to lock the 28191 * range as the range should be available. 28192 */ 28193 state = SD_WM_LOCK_RANGE; 28194 } else { 28195 tmp_wmp = sd_get_range(un, startb, endb); 28196 if (tmp_wmp != NULL) { 28197 if ((wmp != NULL) && ONLIST(un, wmp)) { 28198 /* 28199 * Should not keep onlist wmps 28200 * while waiting this macro 28201 * will also do wmp = NULL; 28202 */ 28203 FREE_ONLIST_WMAP(un, wmp); 28204 } 28205 /* 28206 * sl_wmp is the wmap on which wait 28207 * is done, since the tmp_wmp points 28208 * to the inuse wmap, set sl_wmp to 28209 * tmp_wmp and change the state to sleep 28210 */ 28211 sl_wmp = tmp_wmp; 28212 state = SD_WM_WAIT_MAP; 28213 } else { 28214 state = SD_WM_LOCK_RANGE; 28215 } 28216 28217 } 28218 break; 28219 28220 case SD_WM_LOCK_RANGE: 28221 ASSERT(un->un_wm_cache); 28222 /* 28223 * The range need to be locked, try to get a wmap. 28224 * First attempt it with NO_SLEEP, want to avoid a sleep 28225 * if possible as we will have to release the sd mutex 28226 * if we have to sleep. 28227 */ 28228 if (wmp == NULL) 28229 wmp = kmem_cache_alloc(un->un_wm_cache, 28230 KM_NOSLEEP); 28231 if (wmp == NULL) { 28232 mutex_exit(SD_MUTEX(un)); 28233 _NOTE(DATA_READABLE_WITHOUT_LOCK 28234 (sd_lun::un_wm_cache)) 28235 wmp = kmem_cache_alloc(un->un_wm_cache, 28236 KM_SLEEP); 28237 mutex_enter(SD_MUTEX(un)); 28238 /* 28239 * we released the mutex so recheck and go to 28240 * check list state. 28241 */ 28242 state = SD_WM_CHK_LIST; 28243 } else { 28244 /* 28245 * We exit out of state machine since we 28246 * have the wmap. Do the housekeeping first. 28247 * place the wmap on the wmap list if it is not 28248 * on it already and then set the state to done. 28249 */ 28250 wmp->wm_start = startb; 28251 wmp->wm_end = endb; 28252 wmp->wm_flags = typ | SD_WM_BUSY; 28253 if (typ & SD_WTYPE_RMW) { 28254 un->un_rmw_count++; 28255 } 28256 /* 28257 * If not already on the list then link 28258 */ 28259 if (!ONLIST(un, wmp)) { 28260 wmp->wm_next = un->un_wm; 28261 wmp->wm_prev = NULL; 28262 if (wmp->wm_next) 28263 wmp->wm_next->wm_prev = wmp; 28264 un->un_wm = wmp; 28265 } 28266 state = SD_WM_DONE; 28267 } 28268 break; 28269 28270 case SD_WM_WAIT_MAP: 28271 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28272 /* 28273 * Wait is done on sl_wmp, which is set in the 28274 * check_list state. 28275 */ 28276 sl_wmp->wm_wanted_count++; 28277 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28278 sl_wmp->wm_wanted_count--; 28279 /* 28280 * We can reuse the memory from the completed sl_wmp 28281 * lock range for our new lock, but only if noone is 28282 * waiting for it. 28283 */ 28284 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28285 if (sl_wmp->wm_wanted_count == 0) { 28286 if (wmp != NULL) 28287 CHK_N_FREEWMP(un, wmp); 28288 wmp = sl_wmp; 28289 } 28290 sl_wmp = NULL; 28291 /* 28292 * After waking up, need to recheck for availability of 28293 * range. 28294 */ 28295 state = SD_WM_CHK_LIST; 28296 break; 28297 28298 default: 28299 panic("sd_range_lock: " 28300 "Unknown state %d in sd_range_lock", state); 28301 /*NOTREACHED*/ 28302 } /* switch(state) */ 28303 28304 } /* while(state != SD_WM_DONE) */ 28305 28306 mutex_exit(SD_MUTEX(un)); 28307 28308 ASSERT(wmp != NULL); 28309 28310 return (wmp); 28311 } 28312 28313 28314 /* 28315 * Function: sd_get_range() 28316 * 28317 * Description: Find if there any overlapping I/O to this one 28318 * Returns the write-map of 1st such I/O, NULL otherwise. 28319 * 28320 * Arguments: un - sd_lun structure for the device. 28321 * startb - The starting block number 28322 * endb - The end block number 28323 * 28324 * Return Code: wm - pointer to the wmap structure. 28325 */ 28326 28327 static struct sd_w_map * 28328 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28329 { 28330 struct sd_w_map *wmp; 28331 28332 ASSERT(un != NULL); 28333 28334 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28335 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28336 continue; 28337 } 28338 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28339 break; 28340 } 28341 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28342 break; 28343 } 28344 } 28345 28346 return (wmp); 28347 } 28348 28349 28350 /* 28351 * Function: sd_free_inlist_wmap() 28352 * 28353 * Description: Unlink and free a write map struct. 28354 * 28355 * Arguments: un - sd_lun structure for the device. 28356 * wmp - sd_w_map which needs to be unlinked. 28357 */ 28358 28359 static void 28360 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28361 { 28362 ASSERT(un != NULL); 28363 28364 if (un->un_wm == wmp) { 28365 un->un_wm = wmp->wm_next; 28366 } else { 28367 wmp->wm_prev->wm_next = wmp->wm_next; 28368 } 28369 28370 if (wmp->wm_next) { 28371 wmp->wm_next->wm_prev = wmp->wm_prev; 28372 } 28373 28374 wmp->wm_next = wmp->wm_prev = NULL; 28375 28376 kmem_cache_free(un->un_wm_cache, wmp); 28377 } 28378 28379 28380 /* 28381 * Function: sd_range_unlock() 28382 * 28383 * Description: Unlock the range locked by wm. 28384 * Free write map if nobody else is waiting on it. 28385 * 28386 * Arguments: un - sd_lun structure for the device. 28387 * wmp - sd_w_map which needs to be unlinked. 28388 */ 28389 28390 static void 28391 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28392 { 28393 ASSERT(un != NULL); 28394 ASSERT(wm != NULL); 28395 ASSERT(!mutex_owned(SD_MUTEX(un))); 28396 28397 mutex_enter(SD_MUTEX(un)); 28398 28399 if (wm->wm_flags & SD_WTYPE_RMW) { 28400 un->un_rmw_count--; 28401 } 28402 28403 if (wm->wm_wanted_count) { 28404 wm->wm_flags = 0; 28405 /* 28406 * Broadcast that the wmap is available now. 28407 */ 28408 cv_broadcast(&wm->wm_avail); 28409 } else { 28410 /* 28411 * If no one is waiting on the map, it should be free'ed. 28412 */ 28413 sd_free_inlist_wmap(un, wm); 28414 } 28415 28416 mutex_exit(SD_MUTEX(un)); 28417 } 28418 28419 28420 /* 28421 * Function: sd_read_modify_write_task 28422 * 28423 * Description: Called from a taskq thread to initiate the write phase of 28424 * a read-modify-write request. This is used for targets where 28425 * un->un_sys_blocksize != un->un_tgt_blocksize. 28426 * 28427 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28428 * 28429 * Context: Called under taskq thread context. 28430 */ 28431 28432 static void 28433 sd_read_modify_write_task(void *arg) 28434 { 28435 struct sd_mapblocksize_info *bsp; 28436 struct buf *bp; 28437 struct sd_xbuf *xp; 28438 struct sd_lun *un; 28439 28440 bp = arg; /* The bp is given in arg */ 28441 ASSERT(bp != NULL); 28442 28443 /* Get the pointer to the layer-private data struct */ 28444 xp = SD_GET_XBUF(bp); 28445 ASSERT(xp != NULL); 28446 bsp = xp->xb_private; 28447 ASSERT(bsp != NULL); 28448 28449 un = SD_GET_UN(bp); 28450 ASSERT(un != NULL); 28451 ASSERT(!mutex_owned(SD_MUTEX(un))); 28452 28453 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28454 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28455 28456 /* 28457 * This is the write phase of a read-modify-write request, called 28458 * under the context of a taskq thread in response to the completion 28459 * of the read portion of the rmw request completing under interrupt 28460 * context. The write request must be sent from here down the iostart 28461 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28462 * we use the layer index saved in the layer-private data area. 28463 */ 28464 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28465 28466 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28467 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28468 } 28469 28470 28471 /* 28472 * Function: sddump_do_read_of_rmw() 28473 * 28474 * Description: This routine will be called from sddump, If sddump is called 28475 * with an I/O which not aligned on device blocksize boundary 28476 * then the write has to be converted to read-modify-write. 28477 * Do the read part here in order to keep sddump simple. 28478 * Note - That the sd_mutex is held across the call to this 28479 * routine. 28480 * 28481 * Arguments: un - sd_lun 28482 * blkno - block number in terms of media block size. 28483 * nblk - number of blocks. 28484 * bpp - pointer to pointer to the buf structure. On return 28485 * from this function, *bpp points to the valid buffer 28486 * to which the write has to be done. 28487 * 28488 * Return Code: 0 for success or errno-type return code 28489 */ 28490 28491 static int 28492 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28493 struct buf **bpp) 28494 { 28495 int err; 28496 int i; 28497 int rval; 28498 struct buf *bp; 28499 struct scsi_pkt *pkt = NULL; 28500 uint32_t target_blocksize; 28501 28502 ASSERT(un != NULL); 28503 ASSERT(mutex_owned(SD_MUTEX(un))); 28504 28505 target_blocksize = un->un_tgt_blocksize; 28506 28507 mutex_exit(SD_MUTEX(un)); 28508 28509 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28510 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28511 if (bp == NULL) { 28512 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28513 "no resources for dumping; giving up"); 28514 err = ENOMEM; 28515 goto done; 28516 } 28517 28518 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28519 blkno, nblk); 28520 if (rval != 0) { 28521 scsi_free_consistent_buf(bp); 28522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28523 "no resources for dumping; giving up"); 28524 err = ENOMEM; 28525 goto done; 28526 } 28527 28528 pkt->pkt_flags |= FLAG_NOINTR; 28529 28530 err = EIO; 28531 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28532 28533 /* 28534 * Scsi_poll returns 0 (success) if the command completes and 28535 * the status block is STATUS_GOOD. We should only check 28536 * errors if this condition is not true. Even then we should 28537 * send our own request sense packet only if we have a check 28538 * condition and auto request sense has not been performed by 28539 * the hba. 28540 */ 28541 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28542 28543 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28544 err = 0; 28545 break; 28546 } 28547 28548 /* 28549 * Check CMD_DEV_GONE 1st, give up if device is gone, 28550 * no need to read RQS data. 28551 */ 28552 if (pkt->pkt_reason == CMD_DEV_GONE) { 28553 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28554 "Error while dumping state with rmw..." 28555 "Device is gone\n"); 28556 break; 28557 } 28558 28559 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28560 SD_INFO(SD_LOG_DUMP, un, 28561 "sddump: read failed with CHECK, try # %d\n", i); 28562 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28563 (void) sd_send_polled_RQS(un); 28564 } 28565 28566 continue; 28567 } 28568 28569 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28570 int reset_retval = 0; 28571 28572 SD_INFO(SD_LOG_DUMP, un, 28573 "sddump: read failed with BUSY, try # %d\n", i); 28574 28575 if (un->un_f_lun_reset_enabled == TRUE) { 28576 reset_retval = scsi_reset(SD_ADDRESS(un), 28577 RESET_LUN); 28578 } 28579 if (reset_retval == 0) { 28580 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28581 } 28582 (void) sd_send_polled_RQS(un); 28583 28584 } else { 28585 SD_INFO(SD_LOG_DUMP, un, 28586 "sddump: read failed with 0x%x, try # %d\n", 28587 SD_GET_PKT_STATUS(pkt), i); 28588 mutex_enter(SD_MUTEX(un)); 28589 sd_reset_target(un, pkt); 28590 mutex_exit(SD_MUTEX(un)); 28591 } 28592 28593 /* 28594 * If we are not getting anywhere with lun/target resets, 28595 * let's reset the bus. 28596 */ 28597 if (i > SD_NDUMP_RETRIES/2) { 28598 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28599 (void) sd_send_polled_RQS(un); 28600 } 28601 28602 } 28603 scsi_destroy_pkt(pkt); 28604 28605 if (err != 0) { 28606 scsi_free_consistent_buf(bp); 28607 *bpp = NULL; 28608 } else { 28609 *bpp = bp; 28610 } 28611 28612 done: 28613 mutex_enter(SD_MUTEX(un)); 28614 return (err); 28615 } 28616 28617 28618 /* 28619 * Function: sd_failfast_flushq 28620 * 28621 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28622 * in b_flags and move them onto the failfast queue, then kick 28623 * off a thread to return all bp's on the failfast queue to 28624 * their owners with an error set. 28625 * 28626 * Arguments: un - pointer to the soft state struct for the instance. 28627 * 28628 * Context: may execute in interrupt context. 28629 */ 28630 28631 static void 28632 sd_failfast_flushq(struct sd_lun *un) 28633 { 28634 struct buf *bp; 28635 struct buf *next_waitq_bp; 28636 struct buf *prev_waitq_bp = NULL; 28637 28638 ASSERT(un != NULL); 28639 ASSERT(mutex_owned(SD_MUTEX(un))); 28640 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28641 ASSERT(un->un_failfast_bp == NULL); 28642 28643 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28644 "sd_failfast_flushq: entry: un:0x%p\n", un); 28645 28646 /* 28647 * Check if we should flush all bufs when entering failfast state, or 28648 * just those with B_FAILFAST set. 28649 */ 28650 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28651 /* 28652 * Move *all* bp's on the wait queue to the failfast flush 28653 * queue, including those that do NOT have B_FAILFAST set. 28654 */ 28655 if (un->un_failfast_headp == NULL) { 28656 ASSERT(un->un_failfast_tailp == NULL); 28657 un->un_failfast_headp = un->un_waitq_headp; 28658 } else { 28659 ASSERT(un->un_failfast_tailp != NULL); 28660 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28661 } 28662 28663 un->un_failfast_tailp = un->un_waitq_tailp; 28664 28665 /* update kstat for each bp moved out of the waitq */ 28666 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28667 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28668 } 28669 28670 /* empty the waitq */ 28671 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28672 28673 } else { 28674 /* 28675 * Go thru the wait queue, pick off all entries with 28676 * B_FAILFAST set, and move these onto the failfast queue. 28677 */ 28678 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28679 /* 28680 * Save the pointer to the next bp on the wait queue, 28681 * so we get to it on the next iteration of this loop. 28682 */ 28683 next_waitq_bp = bp->av_forw; 28684 28685 /* 28686 * If this bp from the wait queue does NOT have 28687 * B_FAILFAST set, just move on to the next element 28688 * in the wait queue. Note, this is the only place 28689 * where it is correct to set prev_waitq_bp. 28690 */ 28691 if ((bp->b_flags & B_FAILFAST) == 0) { 28692 prev_waitq_bp = bp; 28693 continue; 28694 } 28695 28696 /* 28697 * Remove the bp from the wait queue. 28698 */ 28699 if (bp == un->un_waitq_headp) { 28700 /* The bp is the first element of the waitq. */ 28701 un->un_waitq_headp = next_waitq_bp; 28702 if (un->un_waitq_headp == NULL) { 28703 /* The wait queue is now empty */ 28704 un->un_waitq_tailp = NULL; 28705 } 28706 } else { 28707 /* 28708 * The bp is either somewhere in the middle 28709 * or at the end of the wait queue. 28710 */ 28711 ASSERT(un->un_waitq_headp != NULL); 28712 ASSERT(prev_waitq_bp != NULL); 28713 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28714 == 0); 28715 if (bp == un->un_waitq_tailp) { 28716 /* bp is the last entry on the waitq. */ 28717 ASSERT(next_waitq_bp == NULL); 28718 un->un_waitq_tailp = prev_waitq_bp; 28719 } 28720 prev_waitq_bp->av_forw = next_waitq_bp; 28721 } 28722 bp->av_forw = NULL; 28723 28724 /* 28725 * update kstat since the bp is moved out of 28726 * the waitq 28727 */ 28728 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28729 28730 /* 28731 * Now put the bp onto the failfast queue. 28732 */ 28733 if (un->un_failfast_headp == NULL) { 28734 /* failfast queue is currently empty */ 28735 ASSERT(un->un_failfast_tailp == NULL); 28736 un->un_failfast_headp = 28737 un->un_failfast_tailp = bp; 28738 } else { 28739 /* Add the bp to the end of the failfast q */ 28740 ASSERT(un->un_failfast_tailp != NULL); 28741 ASSERT(un->un_failfast_tailp->b_flags & 28742 B_FAILFAST); 28743 un->un_failfast_tailp->av_forw = bp; 28744 un->un_failfast_tailp = bp; 28745 } 28746 } 28747 } 28748 28749 /* 28750 * Now return all bp's on the failfast queue to their owners. 28751 */ 28752 while ((bp = un->un_failfast_headp) != NULL) { 28753 28754 un->un_failfast_headp = bp->av_forw; 28755 if (un->un_failfast_headp == NULL) { 28756 un->un_failfast_tailp = NULL; 28757 } 28758 28759 /* 28760 * We want to return the bp with a failure error code, but 28761 * we do not want a call to sd_start_cmds() to occur here, 28762 * so use sd_return_failed_command_no_restart() instead of 28763 * sd_return_failed_command(). 28764 */ 28765 sd_return_failed_command_no_restart(un, bp, EIO); 28766 } 28767 28768 /* Flush the xbuf queues if required. */ 28769 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28770 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28771 } 28772 28773 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28774 "sd_failfast_flushq: exit: un:0x%p\n", un); 28775 } 28776 28777 28778 /* 28779 * Function: sd_failfast_flushq_callback 28780 * 28781 * Description: Return TRUE if the given bp meets the criteria for failfast 28782 * flushing. Used with ddi_xbuf_flushq(9F). 28783 * 28784 * Arguments: bp - ptr to buf struct to be examined. 28785 * 28786 * Context: Any 28787 */ 28788 28789 static int 28790 sd_failfast_flushq_callback(struct buf *bp) 28791 { 28792 /* 28793 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28794 * state is entered; OR (2) the given bp has B_FAILFAST set. 28795 */ 28796 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28797 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28798 } 28799 28800 28801 28802 /* 28803 * Function: sd_setup_next_xfer 28804 * 28805 * Description: Prepare next I/O operation using DMA_PARTIAL 28806 * 28807 */ 28808 28809 static int 28810 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28811 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28812 { 28813 ssize_t num_blks_not_xfered; 28814 daddr_t strt_blk_num; 28815 ssize_t bytes_not_xfered; 28816 int rval; 28817 28818 ASSERT(pkt->pkt_resid == 0); 28819 28820 /* 28821 * Calculate next block number and amount to be transferred. 28822 * 28823 * How much data NOT transfered to the HBA yet. 28824 */ 28825 bytes_not_xfered = xp->xb_dma_resid; 28826 28827 /* 28828 * figure how many blocks NOT transfered to the HBA yet. 28829 */ 28830 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28831 28832 /* 28833 * set starting block number to the end of what WAS transfered. 28834 */ 28835 strt_blk_num = xp->xb_blkno + 28836 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28837 28838 /* 28839 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28840 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28841 * the disk mutex here. 28842 */ 28843 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28844 strt_blk_num, num_blks_not_xfered); 28845 28846 if (rval == 0) { 28847 28848 /* 28849 * Success. 28850 * 28851 * Adjust things if there are still more blocks to be 28852 * transfered. 28853 */ 28854 xp->xb_dma_resid = pkt->pkt_resid; 28855 pkt->pkt_resid = 0; 28856 28857 return (1); 28858 } 28859 28860 /* 28861 * There's really only one possible return value from 28862 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28863 * returns NULL. 28864 */ 28865 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28866 28867 bp->b_resid = bp->b_bcount; 28868 bp->b_flags |= B_ERROR; 28869 28870 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28871 "Error setting up next portion of DMA transfer\n"); 28872 28873 return (0); 28874 } 28875 28876 /* 28877 * Function: sd_panic_for_res_conflict 28878 * 28879 * Description: Call panic with a string formatted with "Reservation Conflict" 28880 * and a human readable identifier indicating the SD instance 28881 * that experienced the reservation conflict. 28882 * 28883 * Arguments: un - pointer to the soft state struct for the instance. 28884 * 28885 * Context: may execute in interrupt context. 28886 */ 28887 28888 #define SD_RESV_CONFLICT_FMT_LEN 40 28889 void 28890 sd_panic_for_res_conflict(struct sd_lun *un) 28891 { 28892 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28893 char path_str[MAXPATHLEN]; 28894 28895 (void) snprintf(panic_str, sizeof (panic_str), 28896 "Reservation Conflict\nDisk: %s", 28897 ddi_pathname(SD_DEVINFO(un), path_str)); 28898 28899 panic(panic_str); 28900 } 28901 28902 /* 28903 * Note: The following sd_faultinjection_ioctl( ) routines implement 28904 * driver support for handling fault injection for error analysis 28905 * causing faults in multiple layers of the driver. 28906 * 28907 */ 28908 28909 #ifdef SD_FAULT_INJECTION 28910 static uint_t sd_fault_injection_on = 0; 28911 28912 /* 28913 * Function: sd_faultinjection_ioctl() 28914 * 28915 * Description: This routine is the driver entry point for handling 28916 * faultinjection ioctls to inject errors into the 28917 * layer model 28918 * 28919 * Arguments: cmd - the ioctl cmd received 28920 * arg - the arguments from user and returns 28921 */ 28922 28923 static void 28924 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28925 28926 uint_t i = 0; 28927 uint_t rval; 28928 28929 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28930 28931 mutex_enter(SD_MUTEX(un)); 28932 28933 switch (cmd) { 28934 case SDIOCRUN: 28935 /* Allow pushed faults to be injected */ 28936 SD_INFO(SD_LOG_SDTEST, un, 28937 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28938 28939 sd_fault_injection_on = 1; 28940 28941 SD_INFO(SD_LOG_IOERR, un, 28942 "sd_faultinjection_ioctl: run finished\n"); 28943 break; 28944 28945 case SDIOCSTART: 28946 /* Start Injection Session */ 28947 SD_INFO(SD_LOG_SDTEST, un, 28948 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28949 28950 sd_fault_injection_on = 0; 28951 un->sd_injection_mask = 0xFFFFFFFF; 28952 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28953 un->sd_fi_fifo_pkt[i] = NULL; 28954 un->sd_fi_fifo_xb[i] = NULL; 28955 un->sd_fi_fifo_un[i] = NULL; 28956 un->sd_fi_fifo_arq[i] = NULL; 28957 } 28958 un->sd_fi_fifo_start = 0; 28959 un->sd_fi_fifo_end = 0; 28960 28961 mutex_enter(&(un->un_fi_mutex)); 28962 un->sd_fi_log[0] = '\0'; 28963 un->sd_fi_buf_len = 0; 28964 mutex_exit(&(un->un_fi_mutex)); 28965 28966 SD_INFO(SD_LOG_IOERR, un, 28967 "sd_faultinjection_ioctl: start finished\n"); 28968 break; 28969 28970 case SDIOCSTOP: 28971 /* Stop Injection Session */ 28972 SD_INFO(SD_LOG_SDTEST, un, 28973 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28974 sd_fault_injection_on = 0; 28975 un->sd_injection_mask = 0x0; 28976 28977 /* Empty stray or unuseds structs from fifo */ 28978 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28979 if (un->sd_fi_fifo_pkt[i] != NULL) { 28980 kmem_free(un->sd_fi_fifo_pkt[i], 28981 sizeof (struct sd_fi_pkt)); 28982 } 28983 if (un->sd_fi_fifo_xb[i] != NULL) { 28984 kmem_free(un->sd_fi_fifo_xb[i], 28985 sizeof (struct sd_fi_xb)); 28986 } 28987 if (un->sd_fi_fifo_un[i] != NULL) { 28988 kmem_free(un->sd_fi_fifo_un[i], 28989 sizeof (struct sd_fi_un)); 28990 } 28991 if (un->sd_fi_fifo_arq[i] != NULL) { 28992 kmem_free(un->sd_fi_fifo_arq[i], 28993 sizeof (struct sd_fi_arq)); 28994 } 28995 un->sd_fi_fifo_pkt[i] = NULL; 28996 un->sd_fi_fifo_un[i] = NULL; 28997 un->sd_fi_fifo_xb[i] = NULL; 28998 un->sd_fi_fifo_arq[i] = NULL; 28999 } 29000 un->sd_fi_fifo_start = 0; 29001 un->sd_fi_fifo_end = 0; 29002 29003 SD_INFO(SD_LOG_IOERR, un, 29004 "sd_faultinjection_ioctl: stop finished\n"); 29005 break; 29006 29007 case SDIOCINSERTPKT: 29008 /* Store a packet struct to be pushed onto fifo */ 29009 SD_INFO(SD_LOG_SDTEST, un, 29010 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29011 29012 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29013 29014 sd_fault_injection_on = 0; 29015 29016 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29017 if (un->sd_fi_fifo_pkt[i] != NULL) { 29018 kmem_free(un->sd_fi_fifo_pkt[i], 29019 sizeof (struct sd_fi_pkt)); 29020 } 29021 if (arg != NULL) { 29022 un->sd_fi_fifo_pkt[i] = 29023 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29024 if (un->sd_fi_fifo_pkt[i] == NULL) { 29025 /* Alloc failed don't store anything */ 29026 break; 29027 } 29028 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29029 sizeof (struct sd_fi_pkt), 0); 29030 if (rval == -1) { 29031 kmem_free(un->sd_fi_fifo_pkt[i], 29032 sizeof (struct sd_fi_pkt)); 29033 un->sd_fi_fifo_pkt[i] = NULL; 29034 } 29035 } else { 29036 SD_INFO(SD_LOG_IOERR, un, 29037 "sd_faultinjection_ioctl: pkt null\n"); 29038 } 29039 break; 29040 29041 case SDIOCINSERTXB: 29042 /* Store a xb struct to be pushed onto fifo */ 29043 SD_INFO(SD_LOG_SDTEST, un, 29044 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29045 29046 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29047 29048 sd_fault_injection_on = 0; 29049 29050 if (un->sd_fi_fifo_xb[i] != NULL) { 29051 kmem_free(un->sd_fi_fifo_xb[i], 29052 sizeof (struct sd_fi_xb)); 29053 un->sd_fi_fifo_xb[i] = NULL; 29054 } 29055 if (arg != NULL) { 29056 un->sd_fi_fifo_xb[i] = 29057 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29058 if (un->sd_fi_fifo_xb[i] == NULL) { 29059 /* Alloc failed don't store anything */ 29060 break; 29061 } 29062 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29063 sizeof (struct sd_fi_xb), 0); 29064 29065 if (rval == -1) { 29066 kmem_free(un->sd_fi_fifo_xb[i], 29067 sizeof (struct sd_fi_xb)); 29068 un->sd_fi_fifo_xb[i] = NULL; 29069 } 29070 } else { 29071 SD_INFO(SD_LOG_IOERR, un, 29072 "sd_faultinjection_ioctl: xb null\n"); 29073 } 29074 break; 29075 29076 case SDIOCINSERTUN: 29077 /* Store a un struct to be pushed onto fifo */ 29078 SD_INFO(SD_LOG_SDTEST, un, 29079 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29080 29081 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29082 29083 sd_fault_injection_on = 0; 29084 29085 if (un->sd_fi_fifo_un[i] != NULL) { 29086 kmem_free(un->sd_fi_fifo_un[i], 29087 sizeof (struct sd_fi_un)); 29088 un->sd_fi_fifo_un[i] = NULL; 29089 } 29090 if (arg != NULL) { 29091 un->sd_fi_fifo_un[i] = 29092 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29093 if (un->sd_fi_fifo_un[i] == NULL) { 29094 /* Alloc failed don't store anything */ 29095 break; 29096 } 29097 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29098 sizeof (struct sd_fi_un), 0); 29099 if (rval == -1) { 29100 kmem_free(un->sd_fi_fifo_un[i], 29101 sizeof (struct sd_fi_un)); 29102 un->sd_fi_fifo_un[i] = NULL; 29103 } 29104 29105 } else { 29106 SD_INFO(SD_LOG_IOERR, un, 29107 "sd_faultinjection_ioctl: un null\n"); 29108 } 29109 29110 break; 29111 29112 case SDIOCINSERTARQ: 29113 /* Store a arq struct to be pushed onto fifo */ 29114 SD_INFO(SD_LOG_SDTEST, un, 29115 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29116 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29117 29118 sd_fault_injection_on = 0; 29119 29120 if (un->sd_fi_fifo_arq[i] != NULL) { 29121 kmem_free(un->sd_fi_fifo_arq[i], 29122 sizeof (struct sd_fi_arq)); 29123 un->sd_fi_fifo_arq[i] = NULL; 29124 } 29125 if (arg != NULL) { 29126 un->sd_fi_fifo_arq[i] = 29127 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29128 if (un->sd_fi_fifo_arq[i] == NULL) { 29129 /* Alloc failed don't store anything */ 29130 break; 29131 } 29132 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29133 sizeof (struct sd_fi_arq), 0); 29134 if (rval == -1) { 29135 kmem_free(un->sd_fi_fifo_arq[i], 29136 sizeof (struct sd_fi_arq)); 29137 un->sd_fi_fifo_arq[i] = NULL; 29138 } 29139 29140 } else { 29141 SD_INFO(SD_LOG_IOERR, un, 29142 "sd_faultinjection_ioctl: arq null\n"); 29143 } 29144 29145 break; 29146 29147 case SDIOCPUSH: 29148 /* Push stored xb, pkt, un, and arq onto fifo */ 29149 sd_fault_injection_on = 0; 29150 29151 if (arg != NULL) { 29152 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29153 if (rval != -1 && 29154 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29155 un->sd_fi_fifo_end += i; 29156 } 29157 } else { 29158 SD_INFO(SD_LOG_IOERR, un, 29159 "sd_faultinjection_ioctl: push arg null\n"); 29160 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29161 un->sd_fi_fifo_end++; 29162 } 29163 } 29164 SD_INFO(SD_LOG_IOERR, un, 29165 "sd_faultinjection_ioctl: push to end=%d\n", 29166 un->sd_fi_fifo_end); 29167 break; 29168 29169 case SDIOCRETRIEVE: 29170 /* Return buffer of log from Injection session */ 29171 SD_INFO(SD_LOG_SDTEST, un, 29172 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29173 29174 sd_fault_injection_on = 0; 29175 29176 mutex_enter(&(un->un_fi_mutex)); 29177 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29178 un->sd_fi_buf_len+1, 0); 29179 mutex_exit(&(un->un_fi_mutex)); 29180 29181 if (rval == -1) { 29182 /* 29183 * arg is possibly invalid setting 29184 * it to NULL for return 29185 */ 29186 arg = NULL; 29187 } 29188 break; 29189 } 29190 29191 mutex_exit(SD_MUTEX(un)); 29192 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29193 " exit\n"); 29194 } 29195 29196 29197 /* 29198 * Function: sd_injection_log() 29199 * 29200 * Description: This routine adds buff to the already existing injection log 29201 * for retrieval via faultinjection_ioctl for use in fault 29202 * detection and recovery 29203 * 29204 * Arguments: buf - the string to add to the log 29205 */ 29206 29207 static void 29208 sd_injection_log(char *buf, struct sd_lun *un) 29209 { 29210 uint_t len; 29211 29212 ASSERT(un != NULL); 29213 ASSERT(buf != NULL); 29214 29215 mutex_enter(&(un->un_fi_mutex)); 29216 29217 len = min(strlen(buf), 255); 29218 /* Add logged value to Injection log to be returned later */ 29219 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29220 uint_t offset = strlen((char *)un->sd_fi_log); 29221 char *destp = (char *)un->sd_fi_log + offset; 29222 int i; 29223 for (i = 0; i < len; i++) { 29224 *destp++ = *buf++; 29225 } 29226 un->sd_fi_buf_len += len; 29227 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29228 } 29229 29230 mutex_exit(&(un->un_fi_mutex)); 29231 } 29232 29233 29234 /* 29235 * Function: sd_faultinjection() 29236 * 29237 * Description: This routine takes the pkt and changes its 29238 * content based on error injection scenerio. 29239 * 29240 * Arguments: pktp - packet to be changed 29241 */ 29242 29243 static void 29244 sd_faultinjection(struct scsi_pkt *pktp) 29245 { 29246 uint_t i; 29247 struct sd_fi_pkt *fi_pkt; 29248 struct sd_fi_xb *fi_xb; 29249 struct sd_fi_un *fi_un; 29250 struct sd_fi_arq *fi_arq; 29251 struct buf *bp; 29252 struct sd_xbuf *xb; 29253 struct sd_lun *un; 29254 29255 ASSERT(pktp != NULL); 29256 29257 /* pull bp xb and un from pktp */ 29258 bp = (struct buf *)pktp->pkt_private; 29259 xb = SD_GET_XBUF(bp); 29260 un = SD_GET_UN(bp); 29261 29262 ASSERT(un != NULL); 29263 29264 mutex_enter(SD_MUTEX(un)); 29265 29266 SD_TRACE(SD_LOG_SDTEST, un, 29267 "sd_faultinjection: entry Injection from sdintr\n"); 29268 29269 /* if injection is off return */ 29270 if (sd_fault_injection_on == 0 || 29271 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29272 mutex_exit(SD_MUTEX(un)); 29273 return; 29274 } 29275 29276 SD_INFO(SD_LOG_SDTEST, un, 29277 "sd_faultinjection: is working for copying\n"); 29278 29279 /* take next set off fifo */ 29280 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29281 29282 fi_pkt = un->sd_fi_fifo_pkt[i]; 29283 fi_xb = un->sd_fi_fifo_xb[i]; 29284 fi_un = un->sd_fi_fifo_un[i]; 29285 fi_arq = un->sd_fi_fifo_arq[i]; 29286 29287 29288 /* set variables accordingly */ 29289 /* set pkt if it was on fifo */ 29290 if (fi_pkt != NULL) { 29291 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29292 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29293 if (fi_pkt->pkt_cdbp != 0xff) 29294 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29295 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29296 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29297 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29298 29299 } 29300 /* set xb if it was on fifo */ 29301 if (fi_xb != NULL) { 29302 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29303 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29304 if (fi_xb->xb_retry_count != 0) 29305 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29306 SD_CONDSET(xb, xb, xb_victim_retry_count, 29307 "xb_victim_retry_count"); 29308 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29309 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29310 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29311 29312 /* copy in block data from sense */ 29313 /* 29314 * if (fi_xb->xb_sense_data[0] != -1) { 29315 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29316 * SENSE_LENGTH); 29317 * } 29318 */ 29319 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29320 29321 /* copy in extended sense codes */ 29322 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29323 xb, es_code, "es_code"); 29324 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29325 xb, es_key, "es_key"); 29326 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29327 xb, es_add_code, "es_add_code"); 29328 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29329 xb, es_qual_code, "es_qual_code"); 29330 struct scsi_extended_sense *esp; 29331 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29332 esp->es_class = CLASS_EXTENDED_SENSE; 29333 } 29334 29335 /* set un if it was on fifo */ 29336 if (fi_un != NULL) { 29337 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29338 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29339 SD_CONDSET(un, un, un_reset_retry_count, 29340 "un_reset_retry_count"); 29341 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29342 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29343 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29344 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29345 "un_f_allow_bus_device_reset"); 29346 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29347 29348 } 29349 29350 /* copy in auto request sense if it was on fifo */ 29351 if (fi_arq != NULL) { 29352 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29353 } 29354 29355 /* free structs */ 29356 if (un->sd_fi_fifo_pkt[i] != NULL) { 29357 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29358 } 29359 if (un->sd_fi_fifo_xb[i] != NULL) { 29360 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29361 } 29362 if (un->sd_fi_fifo_un[i] != NULL) { 29363 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29364 } 29365 if (un->sd_fi_fifo_arq[i] != NULL) { 29366 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29367 } 29368 29369 /* 29370 * kmem_free does not gurantee to set to NULL 29371 * since we uses these to determine if we set 29372 * values or not lets confirm they are always 29373 * NULL after free 29374 */ 29375 un->sd_fi_fifo_pkt[i] = NULL; 29376 un->sd_fi_fifo_un[i] = NULL; 29377 un->sd_fi_fifo_xb[i] = NULL; 29378 un->sd_fi_fifo_arq[i] = NULL; 29379 29380 un->sd_fi_fifo_start++; 29381 29382 mutex_exit(SD_MUTEX(un)); 29383 29384 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29385 } 29386 29387 #endif /* SD_FAULT_INJECTION */ 29388 29389 /* 29390 * This routine is invoked in sd_unit_attach(). Before calling it, the 29391 * properties in conf file should be processed already, and "hotpluggable" 29392 * property was processed also. 29393 * 29394 * The sd driver distinguishes 3 different type of devices: removable media, 29395 * non-removable media, and hotpluggable. Below the differences are defined: 29396 * 29397 * 1. Device ID 29398 * 29399 * The device ID of a device is used to identify this device. Refer to 29400 * ddi_devid_register(9F). 29401 * 29402 * For a non-removable media disk device which can provide 0x80 or 0x83 29403 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29404 * device ID is created to identify this device. For other non-removable 29405 * media devices, a default device ID is created only if this device has 29406 * at least 2 alter cylinders. Otherwise, this device has no devid. 29407 * 29408 * ------------------------------------------------------- 29409 * removable media hotpluggable | Can Have Device ID 29410 * ------------------------------------------------------- 29411 * false false | Yes 29412 * false true | Yes 29413 * true x | No 29414 * ------------------------------------------------------ 29415 * 29416 * 29417 * 2. SCSI group 4 commands 29418 * 29419 * In SCSI specs, only some commands in group 4 command set can use 29420 * 8-byte addresses that can be used to access >2TB storage spaces. 29421 * Other commands have no such capability. Without supporting group4, 29422 * it is impossible to make full use of storage spaces of a disk with 29423 * capacity larger than 2TB. 29424 * 29425 * ----------------------------------------------- 29426 * removable media hotpluggable LP64 | Group 29427 * ----------------------------------------------- 29428 * false false false | 1 29429 * false false true | 4 29430 * false true false | 1 29431 * false true true | 4 29432 * true x x | 5 29433 * ----------------------------------------------- 29434 * 29435 * 29436 * 3. Check for VTOC Label 29437 * 29438 * If a direct-access disk has no EFI label, sd will check if it has a 29439 * valid VTOC label. Now, sd also does that check for removable media 29440 * and hotpluggable devices. 29441 * 29442 * -------------------------------------------------------------- 29443 * Direct-Access removable media hotpluggable | Check Label 29444 * ------------------------------------------------------------- 29445 * false false false | No 29446 * false false true | No 29447 * false true false | Yes 29448 * false true true | Yes 29449 * true x x | Yes 29450 * -------------------------------------------------------------- 29451 * 29452 * 29453 * 4. Building default VTOC label 29454 * 29455 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29456 * If those devices have no valid VTOC label, sd(7d) will attempt to 29457 * create default VTOC for them. Currently sd creates default VTOC label 29458 * for all devices on x86 platform (VTOC_16), but only for removable 29459 * media devices on SPARC (VTOC_8). 29460 * 29461 * ----------------------------------------------------------- 29462 * removable media hotpluggable platform | Default Label 29463 * ----------------------------------------------------------- 29464 * false false sparc | No 29465 * false true x86 | Yes 29466 * false true sparc | Yes 29467 * true x x | Yes 29468 * ---------------------------------------------------------- 29469 * 29470 * 29471 * 5. Supported blocksizes of target devices 29472 * 29473 * Sd supports non-512-byte blocksize for removable media devices only. 29474 * For other devices, only 512-byte blocksize is supported. This may be 29475 * changed in near future because some RAID devices require non-512-byte 29476 * blocksize 29477 * 29478 * ----------------------------------------------------------- 29479 * removable media hotpluggable | non-512-byte blocksize 29480 * ----------------------------------------------------------- 29481 * false false | No 29482 * false true | No 29483 * true x | Yes 29484 * ----------------------------------------------------------- 29485 * 29486 * 29487 * 6. Automatic mount & unmount 29488 * 29489 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29490 * if a device is removable media device. It return 1 for removable media 29491 * devices, and 0 for others. 29492 * 29493 * The automatic mounting subsystem should distinguish between the types 29494 * of devices and apply automounting policies to each. 29495 * 29496 * 29497 * 7. fdisk partition management 29498 * 29499 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29500 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29501 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29502 * fdisk partitions on both x86 and SPARC platform. 29503 * 29504 * ----------------------------------------------------------- 29505 * platform removable media USB/1394 | fdisk supported 29506 * ----------------------------------------------------------- 29507 * x86 X X | true 29508 * ------------------------------------------------------------ 29509 * sparc X X | false 29510 * ------------------------------------------------------------ 29511 * 29512 * 29513 * 8. MBOOT/MBR 29514 * 29515 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29516 * read/write mboot for removable media devices on sparc platform. 29517 * 29518 * ----------------------------------------------------------- 29519 * platform removable media USB/1394 | mboot supported 29520 * ----------------------------------------------------------- 29521 * x86 X X | true 29522 * ------------------------------------------------------------ 29523 * sparc false false | false 29524 * sparc false true | true 29525 * sparc true false | true 29526 * sparc true true | true 29527 * ------------------------------------------------------------ 29528 * 29529 * 29530 * 9. error handling during opening device 29531 * 29532 * If failed to open a disk device, an errno is returned. For some kinds 29533 * of errors, different errno is returned depending on if this device is 29534 * a removable media device. This brings USB/1394 hard disks in line with 29535 * expected hard disk behavior. It is not expected that this breaks any 29536 * application. 29537 * 29538 * ------------------------------------------------------ 29539 * removable media hotpluggable | errno 29540 * ------------------------------------------------------ 29541 * false false | EIO 29542 * false true | EIO 29543 * true x | ENXIO 29544 * ------------------------------------------------------ 29545 * 29546 * 29547 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29548 * 29549 * These IOCTLs are applicable only to removable media devices. 29550 * 29551 * ----------------------------------------------------------- 29552 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29553 * ----------------------------------------------------------- 29554 * false false | No 29555 * false true | No 29556 * true x | Yes 29557 * ----------------------------------------------------------- 29558 * 29559 * 29560 * 12. Kstats for partitions 29561 * 29562 * sd creates partition kstat for non-removable media devices. USB and 29563 * Firewire hard disks now have partition kstats 29564 * 29565 * ------------------------------------------------------ 29566 * removable media hotpluggable | kstat 29567 * ------------------------------------------------------ 29568 * false false | Yes 29569 * false true | Yes 29570 * true x | No 29571 * ------------------------------------------------------ 29572 * 29573 * 29574 * 13. Removable media & hotpluggable properties 29575 * 29576 * Sd driver creates a "removable-media" property for removable media 29577 * devices. Parent nexus drivers create a "hotpluggable" property if 29578 * it supports hotplugging. 29579 * 29580 * --------------------------------------------------------------------- 29581 * removable media hotpluggable | "removable-media" " hotpluggable" 29582 * --------------------------------------------------------------------- 29583 * false false | No No 29584 * false true | No Yes 29585 * true false | Yes No 29586 * true true | Yes Yes 29587 * --------------------------------------------------------------------- 29588 * 29589 * 29590 * 14. Power Management 29591 * 29592 * sd only power manages removable media devices or devices that support 29593 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29594 * 29595 * A parent nexus that supports hotplugging can also set "pm-capable" 29596 * if the disk can be power managed. 29597 * 29598 * ------------------------------------------------------------ 29599 * removable media hotpluggable pm-capable | power manage 29600 * ------------------------------------------------------------ 29601 * false false false | No 29602 * false false true | Yes 29603 * false true false | No 29604 * false true true | Yes 29605 * true x x | Yes 29606 * ------------------------------------------------------------ 29607 * 29608 * USB and firewire hard disks can now be power managed independently 29609 * of the framebuffer 29610 * 29611 * 29612 * 15. Support for USB disks with capacity larger than 1TB 29613 * 29614 * Currently, sd doesn't permit a fixed disk device with capacity 29615 * larger than 1TB to be used in a 32-bit operating system environment. 29616 * However, sd doesn't do that for removable media devices. Instead, it 29617 * assumes that removable media devices cannot have a capacity larger 29618 * than 1TB. Therefore, using those devices on 32-bit system is partially 29619 * supported, which can cause some unexpected results. 29620 * 29621 * --------------------------------------------------------------------- 29622 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29623 * --------------------------------------------------------------------- 29624 * false false | true | no 29625 * false true | true | no 29626 * true false | true | Yes 29627 * true true | true | Yes 29628 * --------------------------------------------------------------------- 29629 * 29630 * 29631 * 16. Check write-protection at open time 29632 * 29633 * When a removable media device is being opened for writing without NDELAY 29634 * flag, sd will check if this device is writable. If attempting to open 29635 * without NDELAY flag a write-protected device, this operation will abort. 29636 * 29637 * ------------------------------------------------------------ 29638 * removable media USB/1394 | WP Check 29639 * ------------------------------------------------------------ 29640 * false false | No 29641 * false true | No 29642 * true false | Yes 29643 * true true | Yes 29644 * ------------------------------------------------------------ 29645 * 29646 * 29647 * 17. syslog when corrupted VTOC is encountered 29648 * 29649 * Currently, if an invalid VTOC is encountered, sd only print syslog 29650 * for fixed SCSI disks. 29651 * ------------------------------------------------------------ 29652 * removable media USB/1394 | print syslog 29653 * ------------------------------------------------------------ 29654 * false false | Yes 29655 * false true | No 29656 * true false | No 29657 * true true | No 29658 * ------------------------------------------------------------ 29659 */ 29660 static void 29661 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29662 { 29663 int pm_capable_prop; 29664 29665 ASSERT(un->un_sd); 29666 ASSERT(un->un_sd->sd_inq); 29667 29668 /* 29669 * Enable SYNC CACHE support for all devices. 29670 */ 29671 un->un_f_sync_cache_supported = TRUE; 29672 29673 /* 29674 * Set the sync cache required flag to false. 29675 * This would ensure that there is no SYNC CACHE 29676 * sent when there are no writes 29677 */ 29678 un->un_f_sync_cache_required = FALSE; 29679 29680 if (un->un_sd->sd_inq->inq_rmb) { 29681 /* 29682 * The media of this device is removable. And for this kind 29683 * of devices, it is possible to change medium after opening 29684 * devices. Thus we should support this operation. 29685 */ 29686 un->un_f_has_removable_media = TRUE; 29687 29688 /* 29689 * support non-512-byte blocksize of removable media devices 29690 */ 29691 un->un_f_non_devbsize_supported = TRUE; 29692 29693 /* 29694 * Assume that all removable media devices support DOOR_LOCK 29695 */ 29696 un->un_f_doorlock_supported = TRUE; 29697 29698 /* 29699 * For a removable media device, it is possible to be opened 29700 * with NDELAY flag when there is no media in drive, in this 29701 * case we don't care if device is writable. But if without 29702 * NDELAY flag, we need to check if media is write-protected. 29703 */ 29704 un->un_f_chk_wp_open = TRUE; 29705 29706 /* 29707 * need to start a SCSI watch thread to monitor media state, 29708 * when media is being inserted or ejected, notify syseventd. 29709 */ 29710 un->un_f_monitor_media_state = TRUE; 29711 29712 /* 29713 * Some devices don't support START_STOP_UNIT command. 29714 * Therefore, we'd better check if a device supports it 29715 * before sending it. 29716 */ 29717 un->un_f_check_start_stop = TRUE; 29718 29719 /* 29720 * support eject media ioctl: 29721 * FDEJECT, DKIOCEJECT, CDROMEJECT 29722 */ 29723 un->un_f_eject_media_supported = TRUE; 29724 29725 /* 29726 * Because many removable-media devices don't support 29727 * LOG_SENSE, we couldn't use this command to check if 29728 * a removable media device support power-management. 29729 * We assume that they support power-management via 29730 * START_STOP_UNIT command and can be spun up and down 29731 * without limitations. 29732 */ 29733 un->un_f_pm_supported = TRUE; 29734 29735 /* 29736 * Need to create a zero length (Boolean) property 29737 * removable-media for the removable media devices. 29738 * Note that the return value of the property is not being 29739 * checked, since if unable to create the property 29740 * then do not want the attach to fail altogether. Consistent 29741 * with other property creation in attach. 29742 */ 29743 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29744 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29745 29746 } else { 29747 /* 29748 * create device ID for device 29749 */ 29750 un->un_f_devid_supported = TRUE; 29751 29752 /* 29753 * Spin up non-removable-media devices once it is attached 29754 */ 29755 un->un_f_attach_spinup = TRUE; 29756 29757 /* 29758 * According to SCSI specification, Sense data has two kinds of 29759 * format: fixed format, and descriptor format. At present, we 29760 * don't support descriptor format sense data for removable 29761 * media. 29762 */ 29763 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29764 un->un_f_descr_format_supported = TRUE; 29765 } 29766 29767 /* 29768 * kstats are created only for non-removable media devices. 29769 * 29770 * Set this in sd.conf to 0 in order to disable kstats. The 29771 * default is 1, so they are enabled by default. 29772 */ 29773 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29774 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29775 "enable-partition-kstats", 1)); 29776 29777 /* 29778 * Check if HBA has set the "pm-capable" property. 29779 * If "pm-capable" exists and is non-zero then we can 29780 * power manage the device without checking the start/stop 29781 * cycle count log sense page. 29782 * 29783 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29784 * then we should not power manage the device. 29785 * 29786 * If "pm-capable" doesn't exist then pm_capable_prop will 29787 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29788 * sd will check the start/stop cycle count log sense page 29789 * and power manage the device if the cycle count limit has 29790 * not been exceeded. 29791 */ 29792 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29793 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29794 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29795 un->un_f_log_sense_supported = TRUE; 29796 } else { 29797 /* 29798 * pm-capable property exists. 29799 * 29800 * Convert "TRUE" values for pm_capable_prop to 29801 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29802 * later. "TRUE" values are any values except 29803 * SD_PM_CAPABLE_FALSE (0) and 29804 * SD_PM_CAPABLE_UNDEFINED (-1) 29805 */ 29806 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29807 un->un_f_log_sense_supported = FALSE; 29808 } else { 29809 un->un_f_pm_supported = TRUE; 29810 } 29811 29812 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29813 "sd_unit_attach: un:0x%p pm-capable " 29814 "property set to %d.\n", un, un->un_f_pm_supported); 29815 } 29816 } 29817 29818 if (un->un_f_is_hotpluggable) { 29819 29820 /* 29821 * Have to watch hotpluggable devices as well, since 29822 * that's the only way for userland applications to 29823 * detect hot removal while device is busy/mounted. 29824 */ 29825 un->un_f_monitor_media_state = TRUE; 29826 29827 un->un_f_check_start_stop = TRUE; 29828 29829 } 29830 } 29831 29832 /* 29833 * sd_tg_rdwr: 29834 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29835 * in sys block size, req_length in bytes. 29836 * 29837 */ 29838 static int 29839 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29840 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29841 { 29842 struct sd_lun *un; 29843 int path_flag = (int)(uintptr_t)tg_cookie; 29844 char *dkl = NULL; 29845 diskaddr_t real_addr = start_block; 29846 diskaddr_t first_byte, end_block; 29847 29848 size_t buffer_size = reqlength; 29849 int rval = 0; 29850 diskaddr_t cap; 29851 uint32_t lbasize; 29852 sd_ssc_t *ssc; 29853 29854 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29855 if (un == NULL) 29856 return (ENXIO); 29857 29858 if (cmd != TG_READ && cmd != TG_WRITE) 29859 return (EINVAL); 29860 29861 ssc = sd_ssc_init(un); 29862 mutex_enter(SD_MUTEX(un)); 29863 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29864 mutex_exit(SD_MUTEX(un)); 29865 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29866 &lbasize, path_flag); 29867 if (rval != 0) 29868 goto done1; 29869 mutex_enter(SD_MUTEX(un)); 29870 sd_update_block_info(un, lbasize, cap); 29871 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29872 mutex_exit(SD_MUTEX(un)); 29873 rval = EIO; 29874 goto done; 29875 } 29876 } 29877 29878 if (NOT_DEVBSIZE(un)) { 29879 /* 29880 * sys_blocksize != tgt_blocksize, need to re-adjust 29881 * blkno and save the index to beginning of dk_label 29882 */ 29883 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29884 real_addr = first_byte / un->un_tgt_blocksize; 29885 29886 end_block = (first_byte + reqlength + 29887 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29888 29889 /* round up buffer size to multiple of target block size */ 29890 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29891 29892 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29893 "label_addr: 0x%x allocation size: 0x%x\n", 29894 real_addr, buffer_size); 29895 29896 if (((first_byte % un->un_tgt_blocksize) != 0) || 29897 (reqlength % un->un_tgt_blocksize) != 0) 29898 /* the request is not aligned */ 29899 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29900 } 29901 29902 /* 29903 * The MMC standard allows READ CAPACITY to be 29904 * inaccurate by a bounded amount (in the interest of 29905 * response latency). As a result, failed READs are 29906 * commonplace (due to the reading of metadata and not 29907 * data). Depending on the per-Vendor/drive Sense data, 29908 * the failed READ can cause many (unnecessary) retries. 29909 */ 29910 29911 if (ISCD(un) && (cmd == TG_READ) && 29912 (un->un_f_blockcount_is_valid == TRUE) && 29913 ((start_block == (un->un_blockcount - 1))|| 29914 (start_block == (un->un_blockcount - 2)))) { 29915 path_flag = SD_PATH_DIRECT_PRIORITY; 29916 } 29917 29918 mutex_exit(SD_MUTEX(un)); 29919 if (cmd == TG_READ) { 29920 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29921 buffer_size, real_addr, path_flag); 29922 if (dkl != NULL) 29923 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29924 real_addr), bufaddr, reqlength); 29925 } else { 29926 if (dkl) { 29927 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29928 real_addr, path_flag); 29929 if (rval) { 29930 goto done1; 29931 } 29932 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29933 real_addr), reqlength); 29934 } 29935 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29936 buffer_size, real_addr, path_flag); 29937 } 29938 29939 done1: 29940 if (dkl != NULL) 29941 kmem_free(dkl, buffer_size); 29942 29943 if (rval != 0) { 29944 if (rval == EIO) 29945 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29946 else 29947 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29948 } 29949 done: 29950 sd_ssc_fini(ssc); 29951 return (rval); 29952 } 29953 29954 29955 static int 29956 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29957 { 29958 29959 struct sd_lun *un; 29960 diskaddr_t cap; 29961 uint32_t lbasize; 29962 int path_flag = (int)(uintptr_t)tg_cookie; 29963 int ret = 0; 29964 29965 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29966 if (un == NULL) 29967 return (ENXIO); 29968 29969 switch (cmd) { 29970 case TG_GETPHYGEOM: 29971 case TG_GETVIRTGEOM: 29972 case TG_GETCAPACITY: 29973 case TG_GETBLOCKSIZE: 29974 mutex_enter(SD_MUTEX(un)); 29975 29976 if ((un->un_f_blockcount_is_valid == TRUE) && 29977 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29978 cap = un->un_blockcount; 29979 lbasize = un->un_tgt_blocksize; 29980 mutex_exit(SD_MUTEX(un)); 29981 } else { 29982 sd_ssc_t *ssc; 29983 mutex_exit(SD_MUTEX(un)); 29984 ssc = sd_ssc_init(un); 29985 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29986 &lbasize, path_flag); 29987 if (ret != 0) { 29988 if (ret == EIO) 29989 sd_ssc_assessment(ssc, 29990 SD_FMT_STATUS_CHECK); 29991 else 29992 sd_ssc_assessment(ssc, 29993 SD_FMT_IGNORE); 29994 sd_ssc_fini(ssc); 29995 return (ret); 29996 } 29997 sd_ssc_fini(ssc); 29998 mutex_enter(SD_MUTEX(un)); 29999 sd_update_block_info(un, lbasize, cap); 30000 if ((un->un_f_blockcount_is_valid == FALSE) || 30001 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 30002 mutex_exit(SD_MUTEX(un)); 30003 return (EIO); 30004 } 30005 mutex_exit(SD_MUTEX(un)); 30006 } 30007 30008 if (cmd == TG_GETCAPACITY) { 30009 *(diskaddr_t *)arg = cap; 30010 return (0); 30011 } 30012 30013 if (cmd == TG_GETBLOCKSIZE) { 30014 *(uint32_t *)arg = lbasize; 30015 return (0); 30016 } 30017 30018 if (cmd == TG_GETPHYGEOM) 30019 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 30020 cap, lbasize, path_flag); 30021 else 30022 /* TG_GETVIRTGEOM */ 30023 ret = sd_get_virtual_geometry(un, 30024 (cmlb_geom_t *)arg, cap, lbasize); 30025 30026 return (ret); 30027 30028 case TG_GETATTR: 30029 mutex_enter(SD_MUTEX(un)); 30030 ((tg_attribute_t *)arg)->media_is_writable = 30031 un->un_f_mmc_writable_media; 30032 mutex_exit(SD_MUTEX(un)); 30033 return (0); 30034 default: 30035 return (ENOTTY); 30036 30037 } 30038 } 30039 30040 /* 30041 * Function: sd_ssc_ereport_post 30042 * 30043 * Description: Will be called when SD driver need to post an ereport. 30044 * 30045 * Context: Kernel thread or interrupt context. 30046 */ 30047 static void 30048 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30049 { 30050 int uscsi_path_instance = 0; 30051 uchar_t uscsi_pkt_reason; 30052 uint32_t uscsi_pkt_state; 30053 uint32_t uscsi_pkt_statistics; 30054 uint64_t uscsi_ena; 30055 uchar_t op_code; 30056 uint8_t *sensep; 30057 union scsi_cdb *cdbp; 30058 uint_t cdblen = 0; 30059 uint_t senlen = 0; 30060 struct sd_lun *un; 30061 dev_info_t *dip; 30062 char *devid; 30063 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30064 SSC_FLAGS_INVALID_STATUS | 30065 SSC_FLAGS_INVALID_SENSE | 30066 SSC_FLAGS_INVALID_DATA; 30067 char assessment[16]; 30068 30069 ASSERT(ssc != NULL); 30070 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30071 ASSERT(ssc->ssc_uscsi_info != NULL); 30072 30073 un = ssc->ssc_un; 30074 ASSERT(un != NULL); 30075 30076 dip = un->un_sd->sd_dev; 30077 30078 /* 30079 * Get the devid: 30080 * devid will only be passed to non-transport error reports. 30081 */ 30082 devid = DEVI(dip)->devi_devid_str; 30083 30084 /* 30085 * If we are syncing or dumping, the command will not be executed 30086 * so we bypass this situation. 30087 */ 30088 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30089 (un->un_state == SD_STATE_DUMPING)) 30090 return; 30091 30092 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30093 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30094 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30095 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30096 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30097 30098 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30099 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30100 30101 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30102 if (cdbp == NULL) { 30103 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30104 "sd_ssc_ereport_post meet empty cdb\n"); 30105 return; 30106 } 30107 30108 op_code = cdbp->scc_cmd; 30109 30110 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30111 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30112 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30113 30114 if (senlen > 0) 30115 ASSERT(sensep != NULL); 30116 30117 /* 30118 * Initialize drv_assess to corresponding values. 30119 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30120 * on the sense-key returned back. 30121 */ 30122 switch (drv_assess) { 30123 case SD_FM_DRV_RECOVERY: 30124 (void) sprintf(assessment, "%s", "recovered"); 30125 break; 30126 case SD_FM_DRV_RETRY: 30127 (void) sprintf(assessment, "%s", "retry"); 30128 break; 30129 case SD_FM_DRV_NOTICE: 30130 (void) sprintf(assessment, "%s", "info"); 30131 break; 30132 case SD_FM_DRV_FATAL: 30133 default: 30134 (void) sprintf(assessment, "%s", "unknown"); 30135 } 30136 /* 30137 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30138 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30139 * driver-assessment will always be "recovered" here. 30140 */ 30141 if (drv_assess == SD_FM_DRV_RECOVERY) { 30142 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30143 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30144 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30145 "driver-assessment", DATA_TYPE_STRING, assessment, 30146 "op-code", DATA_TYPE_UINT8, op_code, 30147 "cdb", DATA_TYPE_UINT8_ARRAY, 30148 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30149 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30150 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30151 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30152 NULL); 30153 return; 30154 } 30155 30156 /* 30157 * If there is un-expected/un-decodable data, we should post 30158 * ereport.io.scsi.cmd.disk.dev.uderr. 30159 * driver-assessment will be set based on parameter drv_assess. 30160 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30161 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30162 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30163 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30164 */ 30165 if (ssc->ssc_flags & ssc_invalid_flags) { 30166 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30167 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30168 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30169 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30170 "driver-assessment", DATA_TYPE_STRING, 30171 drv_assess == SD_FM_DRV_FATAL ? 30172 "fail" : assessment, 30173 "op-code", DATA_TYPE_UINT8, op_code, 30174 "cdb", DATA_TYPE_UINT8_ARRAY, 30175 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30176 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30177 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30178 "pkt-stats", DATA_TYPE_UINT32, 30179 uscsi_pkt_statistics, 30180 "stat-code", DATA_TYPE_UINT8, 30181 ssc->ssc_uscsi_cmd->uscsi_status, 30182 "un-decode-info", DATA_TYPE_STRING, 30183 ssc->ssc_info, 30184 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30185 senlen, sensep, 30186 NULL); 30187 } else { 30188 /* 30189 * For other type of invalid data, the 30190 * un-decode-value field would be empty because the 30191 * un-decodable content could be seen from upper 30192 * level payload or inside un-decode-info. 30193 */ 30194 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30195 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30196 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30197 "driver-assessment", DATA_TYPE_STRING, 30198 drv_assess == SD_FM_DRV_FATAL ? 30199 "fail" : assessment, 30200 "op-code", DATA_TYPE_UINT8, op_code, 30201 "cdb", DATA_TYPE_UINT8_ARRAY, 30202 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30203 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30204 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30205 "pkt-stats", DATA_TYPE_UINT32, 30206 uscsi_pkt_statistics, 30207 "stat-code", DATA_TYPE_UINT8, 30208 ssc->ssc_uscsi_cmd->uscsi_status, 30209 "un-decode-info", DATA_TYPE_STRING, 30210 ssc->ssc_info, 30211 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30212 0, NULL, 30213 NULL); 30214 } 30215 ssc->ssc_flags &= ~ssc_invalid_flags; 30216 return; 30217 } 30218 30219 if (uscsi_pkt_reason != CMD_CMPLT || 30220 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30221 /* 30222 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30223 * set inside sd_start_cmds due to errors(bad packet or 30224 * fatal transport error), we should take it as a 30225 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30226 * driver-assessment will be set based on drv_assess. 30227 * We will set devid to NULL because it is a transport 30228 * error. 30229 */ 30230 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30231 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30232 30233 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30234 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30235 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30236 "driver-assessment", DATA_TYPE_STRING, 30237 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30238 "op-code", DATA_TYPE_UINT8, op_code, 30239 "cdb", DATA_TYPE_UINT8_ARRAY, 30240 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30241 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30242 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30243 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30244 NULL); 30245 } else { 30246 /* 30247 * If we got here, we have a completed command, and we need 30248 * to further investigate the sense data to see what kind 30249 * of ereport we should post. 30250 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30251 * if sense-key == 0x3. 30252 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30253 * driver-assessment will be set based on the parameter 30254 * drv_assess. 30255 */ 30256 if (senlen > 0) { 30257 /* 30258 * Here we have sense data available. 30259 */ 30260 uint8_t sense_key; 30261 sense_key = scsi_sense_key(sensep); 30262 if (sense_key == 0x3) { 30263 /* 30264 * sense-key == 0x3(medium error), 30265 * driver-assessment should be "fatal" if 30266 * drv_assess is SD_FM_DRV_FATAL. 30267 */ 30268 scsi_fm_ereport_post(un->un_sd, 30269 uscsi_path_instance, 30270 "cmd.disk.dev.rqs.merr", 30271 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30272 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30273 "driver-assessment", 30274 DATA_TYPE_STRING, 30275 drv_assess == SD_FM_DRV_FATAL ? 30276 "fatal" : assessment, 30277 "op-code", 30278 DATA_TYPE_UINT8, op_code, 30279 "cdb", 30280 DATA_TYPE_UINT8_ARRAY, cdblen, 30281 ssc->ssc_uscsi_cmd->uscsi_cdb, 30282 "pkt-reason", 30283 DATA_TYPE_UINT8, uscsi_pkt_reason, 30284 "pkt-state", 30285 DATA_TYPE_UINT8, uscsi_pkt_state, 30286 "pkt-stats", 30287 DATA_TYPE_UINT32, 30288 uscsi_pkt_statistics, 30289 "stat-code", 30290 DATA_TYPE_UINT8, 30291 ssc->ssc_uscsi_cmd->uscsi_status, 30292 "key", 30293 DATA_TYPE_UINT8, 30294 scsi_sense_key(sensep), 30295 "asc", 30296 DATA_TYPE_UINT8, 30297 scsi_sense_asc(sensep), 30298 "ascq", 30299 DATA_TYPE_UINT8, 30300 scsi_sense_ascq(sensep), 30301 "sense-data", 30302 DATA_TYPE_UINT8_ARRAY, 30303 senlen, sensep, 30304 "lba", 30305 DATA_TYPE_UINT64, 30306 ssc->ssc_uscsi_info->ui_lba, 30307 NULL); 30308 } else { 30309 /* 30310 * if sense-key == 0x4(hardware 30311 * error), driver-assessment should 30312 * be "fatal" if drv_assess is 30313 * SD_FM_DRV_FATAL. 30314 */ 30315 scsi_fm_ereport_post(un->un_sd, 30316 uscsi_path_instance, 30317 "cmd.disk.dev.rqs.derr", 30318 uscsi_ena, devid, DDI_NOSLEEP, 30319 FM_VERSION, 30320 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30321 "driver-assessment", 30322 DATA_TYPE_STRING, 30323 drv_assess == SD_FM_DRV_FATAL ? 30324 (sense_key == 0x4 ? 30325 "fatal" : "fail") : assessment, 30326 "op-code", 30327 DATA_TYPE_UINT8, op_code, 30328 "cdb", 30329 DATA_TYPE_UINT8_ARRAY, cdblen, 30330 ssc->ssc_uscsi_cmd->uscsi_cdb, 30331 "pkt-reason", 30332 DATA_TYPE_UINT8, uscsi_pkt_reason, 30333 "pkt-state", 30334 DATA_TYPE_UINT8, uscsi_pkt_state, 30335 "pkt-stats", 30336 DATA_TYPE_UINT32, 30337 uscsi_pkt_statistics, 30338 "stat-code", 30339 DATA_TYPE_UINT8, 30340 ssc->ssc_uscsi_cmd->uscsi_status, 30341 "key", 30342 DATA_TYPE_UINT8, 30343 scsi_sense_key(sensep), 30344 "asc", 30345 DATA_TYPE_UINT8, 30346 scsi_sense_asc(sensep), 30347 "ascq", 30348 DATA_TYPE_UINT8, 30349 scsi_sense_ascq(sensep), 30350 "sense-data", 30351 DATA_TYPE_UINT8_ARRAY, 30352 senlen, sensep, 30353 NULL); 30354 } 30355 } else { 30356 /* 30357 * For stat_code == STATUS_GOOD, this is not a 30358 * hardware error. 30359 */ 30360 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 30361 return; 30362 30363 /* 30364 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30365 * stat-code but with sense data unavailable. 30366 * driver-assessment will be set based on parameter 30367 * drv_assess. 30368 */ 30369 scsi_fm_ereport_post(un->un_sd, 30370 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30371 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30372 FM_EREPORT_VERS0, 30373 "driver-assessment", DATA_TYPE_STRING, 30374 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30375 "op-code", DATA_TYPE_UINT8, op_code, 30376 "cdb", 30377 DATA_TYPE_UINT8_ARRAY, 30378 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30379 "pkt-reason", 30380 DATA_TYPE_UINT8, uscsi_pkt_reason, 30381 "pkt-state", 30382 DATA_TYPE_UINT8, uscsi_pkt_state, 30383 "pkt-stats", 30384 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30385 "stat-code", 30386 DATA_TYPE_UINT8, 30387 ssc->ssc_uscsi_cmd->uscsi_status, 30388 NULL); 30389 } 30390 } 30391 } 30392 30393 /* 30394 * Function: sd_ssc_extract_info 30395 * 30396 * Description: Extract information available to help generate ereport. 30397 * 30398 * Context: Kernel thread or interrupt context. 30399 */ 30400 static void 30401 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30402 struct buf *bp, struct sd_xbuf *xp) 30403 { 30404 size_t senlen = 0; 30405 union scsi_cdb *cdbp; 30406 int path_instance; 30407 /* 30408 * Need scsi_cdb_size array to determine the cdb length. 30409 */ 30410 extern uchar_t scsi_cdb_size[]; 30411 30412 ASSERT(un != NULL); 30413 ASSERT(pktp != NULL); 30414 ASSERT(bp != NULL); 30415 ASSERT(xp != NULL); 30416 ASSERT(ssc != NULL); 30417 ASSERT(mutex_owned(SD_MUTEX(un))); 30418 30419 /* 30420 * Transfer the cdb buffer pointer here. 30421 */ 30422 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30423 30424 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30425 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30426 30427 /* 30428 * Transfer the sense data buffer pointer if sense data is available, 30429 * calculate the sense data length first. 30430 */ 30431 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 30432 (xp->xb_sense_state & STATE_ARQ_DONE)) { 30433 /* 30434 * For arq case, we will enter here. 30435 */ 30436 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30437 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30438 } else { 30439 senlen = SENSE_LENGTH; 30440 } 30441 } else { 30442 /* 30443 * For non-arq case, we will enter this branch. 30444 */ 30445 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30446 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 30447 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30448 } 30449 30450 } 30451 30452 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30453 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30454 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30455 30456 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30457 30458 /* 30459 * Only transfer path_instance when scsi_pkt was properly allocated. 30460 */ 30461 path_instance = pktp->pkt_path_instance; 30462 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30463 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30464 else 30465 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30466 30467 /* 30468 * Copy in the other fields we may need when posting ereport. 30469 */ 30470 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30471 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30472 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30473 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30474 30475 /* 30476 * For partially read/write command, we will not create ena 30477 * in case of a successful command be reconized as recovered. 30478 */ 30479 if ((pktp->pkt_reason == CMD_CMPLT) && 30480 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 30481 (senlen == 0)) { 30482 return; 30483 } 30484 30485 /* 30486 * To associate ereports of a single command execution flow, we 30487 * need a shared ena for a specific command. 30488 */ 30489 if (xp->xb_ena == 0) 30490 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30491 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30492 } 30493