1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_IS_PARALLEL_SCSI(un) \ 779 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 780 #define SD_IS_SERIAL(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 782 783 /* 784 * Definitions used by device id registration routines 785 */ 786 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 787 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 788 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 789 790 static kmutex_t sd_sense_mutex = {0}; 791 792 /* 793 * Macros for updates of the driver state 794 */ 795 #define New_state(un, s) \ 796 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 797 #define Restore_state(un) \ 798 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 799 800 static struct sd_cdbinfo sd_cdbtab[] = { 801 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 802 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 803 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 804 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 805 }; 806 807 /* 808 * Specifies the number of seconds that must have elapsed since the last 809 * cmd. has completed for a device to be declared idle to the PM framework. 810 */ 811 static int sd_pm_idletime = 1; 812 813 /* 814 * Internal function prototypes 815 */ 816 817 #if (defined(__fibre)) 818 /* 819 * These #defines are to avoid namespace collisions that occur because this 820 * code is currently used to compile two separate driver modules: sd and ssd. 821 * All function names need to be treated this way (even if declared static) 822 * in order to allow the debugger to resolve the names properly. 823 * It is anticipated that in the near future the ssd module will be obsoleted, 824 * at which time this ugliness should go away. 825 */ 826 #define sd_log_trace ssd_log_trace 827 #define sd_log_info ssd_log_info 828 #define sd_log_err ssd_log_err 829 #define sdprobe ssdprobe 830 #define sdinfo ssdinfo 831 #define sd_prop_op ssd_prop_op 832 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 833 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 834 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 835 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 836 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 837 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 838 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 839 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 840 #define sd_spin_up_unit ssd_spin_up_unit 841 #define sd_enable_descr_sense ssd_enable_descr_sense 842 #define sd_reenable_dsense_task ssd_reenable_dsense_task 843 #define sd_set_mmc_caps ssd_set_mmc_caps 844 #define sd_read_unit_properties ssd_read_unit_properties 845 #define sd_process_sdconf_file ssd_process_sdconf_file 846 #define sd_process_sdconf_table ssd_process_sdconf_table 847 #define sd_sdconf_id_match ssd_sdconf_id_match 848 #define sd_blank_cmp ssd_blank_cmp 849 #define sd_chk_vers1_data ssd_chk_vers1_data 850 #define sd_set_vers1_properties ssd_set_vers1_properties 851 852 #define sd_get_physical_geometry ssd_get_physical_geometry 853 #define sd_get_virtual_geometry ssd_get_virtual_geometry 854 #define sd_update_block_info ssd_update_block_info 855 #define sd_register_devid ssd_register_devid 856 #define sd_get_devid ssd_get_devid 857 #define sd_create_devid ssd_create_devid 858 #define sd_write_deviceid ssd_write_deviceid 859 #define sd_check_vpd_page_support ssd_check_vpd_page_support 860 #define sd_setup_pm ssd_setup_pm 861 #define sd_create_pm_components ssd_create_pm_components 862 #define sd_ddi_suspend ssd_ddi_suspend 863 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 864 #define sd_ddi_resume ssd_ddi_resume 865 #define sd_ddi_pm_resume ssd_ddi_pm_resume 866 #define sdpower ssdpower 867 #define sdattach ssdattach 868 #define sddetach ssddetach 869 #define sd_unit_attach ssd_unit_attach 870 #define sd_unit_detach ssd_unit_detach 871 #define sd_set_unit_attributes ssd_set_unit_attributes 872 #define sd_create_errstats ssd_create_errstats 873 #define sd_set_errstats ssd_set_errstats 874 #define sd_set_pstats ssd_set_pstats 875 #define sddump ssddump 876 #define sd_scsi_poll ssd_scsi_poll 877 #define sd_send_polled_RQS ssd_send_polled_RQS 878 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 879 #define sd_init_event_callbacks ssd_init_event_callbacks 880 #define sd_event_callback ssd_event_callback 881 #define sd_cache_control ssd_cache_control 882 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 883 #define sd_get_nv_sup ssd_get_nv_sup 884 #define sd_make_device ssd_make_device 885 #define sdopen ssdopen 886 #define sdclose ssdclose 887 #define sd_ready_and_valid ssd_ready_and_valid 888 #define sdmin ssdmin 889 #define sdread ssdread 890 #define sdwrite ssdwrite 891 #define sdaread ssdaread 892 #define sdawrite ssdawrite 893 #define sdstrategy ssdstrategy 894 #define sdioctl ssdioctl 895 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 896 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 897 #define sd_checksum_iostart ssd_checksum_iostart 898 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 899 #define sd_pm_iostart ssd_pm_iostart 900 #define sd_core_iostart ssd_core_iostart 901 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 902 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 903 #define sd_checksum_iodone ssd_checksum_iodone 904 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 905 #define sd_pm_iodone ssd_pm_iodone 906 #define sd_initpkt_for_buf ssd_initpkt_for_buf 907 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 908 #define sd_setup_rw_pkt ssd_setup_rw_pkt 909 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 910 #define sd_buf_iodone ssd_buf_iodone 911 #define sd_uscsi_strategy ssd_uscsi_strategy 912 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 913 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 914 #define sd_uscsi_iodone ssd_uscsi_iodone 915 #define sd_xbuf_strategy ssd_xbuf_strategy 916 #define sd_xbuf_init ssd_xbuf_init 917 #define sd_pm_entry ssd_pm_entry 918 #define sd_pm_exit ssd_pm_exit 919 920 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 921 #define sd_pm_timeout_handler ssd_pm_timeout_handler 922 923 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 924 #define sdintr ssdintr 925 #define sd_start_cmds ssd_start_cmds 926 #define sd_send_scsi_cmd ssd_send_scsi_cmd 927 #define sd_bioclone_alloc ssd_bioclone_alloc 928 #define sd_bioclone_free ssd_bioclone_free 929 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 930 #define sd_shadow_buf_free ssd_shadow_buf_free 931 #define sd_print_transport_rejected_message \ 932 ssd_print_transport_rejected_message 933 #define sd_retry_command ssd_retry_command 934 #define sd_set_retry_bp ssd_set_retry_bp 935 #define sd_send_request_sense_command ssd_send_request_sense_command 936 #define sd_start_retry_command ssd_start_retry_command 937 #define sd_start_direct_priority_command \ 938 ssd_start_direct_priority_command 939 #define sd_return_failed_command ssd_return_failed_command 940 #define sd_return_failed_command_no_restart \ 941 ssd_return_failed_command_no_restart 942 #define sd_return_command ssd_return_command 943 #define sd_sync_with_callback ssd_sync_with_callback 944 #define sdrunout ssdrunout 945 #define sd_mark_rqs_busy ssd_mark_rqs_busy 946 #define sd_mark_rqs_idle ssd_mark_rqs_idle 947 #define sd_reduce_throttle ssd_reduce_throttle 948 #define sd_restore_throttle ssd_restore_throttle 949 #define sd_print_incomplete_msg ssd_print_incomplete_msg 950 #define sd_init_cdb_limits ssd_init_cdb_limits 951 #define sd_pkt_status_good ssd_pkt_status_good 952 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 953 #define sd_pkt_status_busy ssd_pkt_status_busy 954 #define sd_pkt_status_reservation_conflict \ 955 ssd_pkt_status_reservation_conflict 956 #define sd_pkt_status_qfull ssd_pkt_status_qfull 957 #define sd_handle_request_sense ssd_handle_request_sense 958 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 959 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 960 #define sd_validate_sense_data ssd_validate_sense_data 961 #define sd_decode_sense ssd_decode_sense 962 #define sd_print_sense_msg ssd_print_sense_msg 963 #define sd_sense_key_no_sense ssd_sense_key_no_sense 964 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 965 #define sd_sense_key_not_ready ssd_sense_key_not_ready 966 #define sd_sense_key_medium_or_hardware_error \ 967 ssd_sense_key_medium_or_hardware_error 968 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 969 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 970 #define sd_sense_key_fail_command ssd_sense_key_fail_command 971 #define sd_sense_key_blank_check ssd_sense_key_blank_check 972 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 973 #define sd_sense_key_default ssd_sense_key_default 974 #define sd_print_retry_msg ssd_print_retry_msg 975 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 976 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 977 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 978 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 979 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 980 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 981 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 982 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 983 #define sd_pkt_reason_default ssd_pkt_reason_default 984 #define sd_reset_target ssd_reset_target 985 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 986 #define sd_start_stop_unit_task ssd_start_stop_unit_task 987 #define sd_taskq_create ssd_taskq_create 988 #define sd_taskq_delete ssd_taskq_delete 989 #define sd_target_change_task ssd_target_change_task 990 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 991 #define sd_media_change_task ssd_media_change_task 992 #define sd_handle_mchange ssd_handle_mchange 993 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 994 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 995 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 996 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 997 #define sd_send_scsi_feature_GET_CONFIGURATION \ 998 sd_send_scsi_feature_GET_CONFIGURATION 999 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1000 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1001 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1002 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1003 ssd_send_scsi_PERSISTENT_RESERVE_IN 1004 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1005 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1006 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1007 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1008 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1009 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1010 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1011 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1012 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1013 #define sd_alloc_rqs ssd_alloc_rqs 1014 #define sd_free_rqs ssd_free_rqs 1015 #define sd_dump_memory ssd_dump_memory 1016 #define sd_get_media_info ssd_get_media_info 1017 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1018 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1019 #define sd_strtok_r ssd_strtok_r 1020 #define sd_set_properties ssd_set_properties 1021 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1022 #define sd_setup_next_xfer ssd_setup_next_xfer 1023 #define sd_dkio_get_temp ssd_dkio_get_temp 1024 #define sd_check_mhd ssd_check_mhd 1025 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1026 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1027 #define sd_sname ssd_sname 1028 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1029 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1030 #define sd_take_ownership ssd_take_ownership 1031 #define sd_reserve_release ssd_reserve_release 1032 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1033 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1034 #define sd_persistent_reservation_in_read_keys \ 1035 ssd_persistent_reservation_in_read_keys 1036 #define sd_persistent_reservation_in_read_resv \ 1037 ssd_persistent_reservation_in_read_resv 1038 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1039 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1040 #define sd_mhdioc_release ssd_mhdioc_release 1041 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1042 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1043 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1044 #define sr_change_blkmode ssr_change_blkmode 1045 #define sr_change_speed ssr_change_speed 1046 #define sr_atapi_change_speed ssr_atapi_change_speed 1047 #define sr_pause_resume ssr_pause_resume 1048 #define sr_play_msf ssr_play_msf 1049 #define sr_play_trkind ssr_play_trkind 1050 #define sr_read_all_subcodes ssr_read_all_subcodes 1051 #define sr_read_subchannel ssr_read_subchannel 1052 #define sr_read_tocentry ssr_read_tocentry 1053 #define sr_read_tochdr ssr_read_tochdr 1054 #define sr_read_cdda ssr_read_cdda 1055 #define sr_read_cdxa ssr_read_cdxa 1056 #define sr_read_mode1 ssr_read_mode1 1057 #define sr_read_mode2 ssr_read_mode2 1058 #define sr_read_cd_mode2 ssr_read_cd_mode2 1059 #define sr_sector_mode ssr_sector_mode 1060 #define sr_eject ssr_eject 1061 #define sr_ejected ssr_ejected 1062 #define sr_check_wp ssr_check_wp 1063 #define sd_check_media ssd_check_media 1064 #define sd_media_watch_cb ssd_media_watch_cb 1065 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1066 #define sr_volume_ctrl ssr_volume_ctrl 1067 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1068 #define sd_log_page_supported ssd_log_page_supported 1069 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1070 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1071 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1072 #define sd_range_lock ssd_range_lock 1073 #define sd_get_range ssd_get_range 1074 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1075 #define sd_range_unlock ssd_range_unlock 1076 #define sd_read_modify_write_task ssd_read_modify_write_task 1077 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1078 1079 #define sd_iostart_chain ssd_iostart_chain 1080 #define sd_iodone_chain ssd_iodone_chain 1081 #define sd_initpkt_map ssd_initpkt_map 1082 #define sd_destroypkt_map ssd_destroypkt_map 1083 #define sd_chain_type_map ssd_chain_type_map 1084 #define sd_chain_index_map ssd_chain_index_map 1085 1086 #define sd_failfast_flushctl ssd_failfast_flushctl 1087 #define sd_failfast_flushq ssd_failfast_flushq 1088 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1089 1090 #define sd_is_lsi ssd_is_lsi 1091 #define sd_tg_rdwr ssd_tg_rdwr 1092 #define sd_tg_getinfo ssd_tg_getinfo 1093 1094 #endif /* #if (defined(__fibre)) */ 1095 1096 1097 int _init(void); 1098 int _fini(void); 1099 int _info(struct modinfo *modinfop); 1100 1101 /*PRINTFLIKE3*/ 1102 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1103 /*PRINTFLIKE3*/ 1104 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1105 /*PRINTFLIKE3*/ 1106 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1107 1108 static int sdprobe(dev_info_t *devi); 1109 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1110 void **result); 1111 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1112 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1113 1114 /* 1115 * Smart probe for parallel scsi 1116 */ 1117 static void sd_scsi_probe_cache_init(void); 1118 static void sd_scsi_probe_cache_fini(void); 1119 static void sd_scsi_clear_probe_cache(void); 1120 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1121 1122 /* 1123 * Attached luns on target for parallel scsi 1124 */ 1125 static void sd_scsi_target_lun_init(void); 1126 static void sd_scsi_target_lun_fini(void); 1127 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1128 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1129 1130 static int sd_spin_up_unit(sd_ssc_t *ssc); 1131 1132 /* 1133 * Using sd_ssc_init to establish sd_ssc_t struct 1134 * Using sd_ssc_send to send uscsi internal command 1135 * Using sd_ssc_fini to free sd_ssc_t struct 1136 */ 1137 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1138 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1139 int flag, enum uio_seg dataspace, int path_flag); 1140 static void sd_ssc_fini(sd_ssc_t *ssc); 1141 1142 /* 1143 * Using sd_ssc_assessment to set correct type-of-assessment 1144 * Using sd_ssc_post to post ereport & system log 1145 * sd_ssc_post will call sd_ssc_print to print system log 1146 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1147 */ 1148 static void sd_ssc_assessment(sd_ssc_t *ssc, 1149 enum sd_type_assessment tp_assess); 1150 1151 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1152 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1153 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1154 enum sd_driver_assessment drv_assess); 1155 1156 /* 1157 * Using sd_ssc_set_info to mark an un-decodable-data error. 1158 * Using sd_ssc_extract_info to transfer information from internal 1159 * data structures to sd_ssc_t. 1160 */ 1161 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, 1162 const char *fmt, ...); 1163 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1164 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1165 1166 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1167 enum uio_seg dataspace, int path_flag); 1168 1169 #ifdef _LP64 1170 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1171 static void sd_reenable_dsense_task(void *arg); 1172 #endif /* _LP64 */ 1173 1174 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1175 1176 static void sd_read_unit_properties(struct sd_lun *un); 1177 static int sd_process_sdconf_file(struct sd_lun *un); 1178 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1179 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1180 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1181 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1182 int *data_list, sd_tunables *values); 1183 static void sd_process_sdconf_table(struct sd_lun *un); 1184 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1185 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1186 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1187 int list_len, char *dataname_ptr); 1188 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1189 sd_tunables *prop_list); 1190 1191 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1192 int reservation_flag); 1193 static int sd_get_devid(sd_ssc_t *ssc); 1194 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1195 static int sd_write_deviceid(sd_ssc_t *ssc); 1196 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1197 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1198 1199 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1200 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1201 1202 static int sd_ddi_suspend(dev_info_t *devi); 1203 static int sd_ddi_pm_suspend(struct sd_lun *un); 1204 static int sd_ddi_resume(dev_info_t *devi); 1205 static int sd_ddi_pm_resume(struct sd_lun *un); 1206 static int sdpower(dev_info_t *devi, int component, int level); 1207 1208 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1209 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1210 static int sd_unit_attach(dev_info_t *devi); 1211 static int sd_unit_detach(dev_info_t *devi); 1212 1213 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1214 static void sd_create_errstats(struct sd_lun *un, int instance); 1215 static void sd_set_errstats(struct sd_lun *un); 1216 static void sd_set_pstats(struct sd_lun *un); 1217 1218 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1219 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1220 static int sd_send_polled_RQS(struct sd_lun *un); 1221 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1222 1223 #if (defined(__fibre)) 1224 /* 1225 * Event callbacks (photon) 1226 */ 1227 static void sd_init_event_callbacks(struct sd_lun *un); 1228 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1229 #endif 1230 1231 /* 1232 * Defines for sd_cache_control 1233 */ 1234 1235 #define SD_CACHE_ENABLE 1 1236 #define SD_CACHE_DISABLE 0 1237 #define SD_CACHE_NOCHANGE -1 1238 1239 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1240 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1241 static void sd_get_nv_sup(sd_ssc_t *ssc); 1242 static dev_t sd_make_device(dev_info_t *devi); 1243 1244 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1245 uint64_t capacity); 1246 1247 /* 1248 * Driver entry point functions. 1249 */ 1250 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1251 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1252 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1253 1254 static void sdmin(struct buf *bp); 1255 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1256 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1257 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1258 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1259 1260 static int sdstrategy(struct buf *bp); 1261 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1262 1263 /* 1264 * Function prototypes for layering functions in the iostart chain. 1265 */ 1266 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1267 struct buf *bp); 1268 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1269 struct buf *bp); 1270 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1271 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1275 1276 /* 1277 * Function prototypes for layering functions in the iodone chain. 1278 */ 1279 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1281 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1282 struct buf *bp); 1283 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1284 struct buf *bp); 1285 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 1290 /* 1291 * Prototypes for functions to support buf(9S) based IO. 1292 */ 1293 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1294 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1295 static void sd_destroypkt_for_buf(struct buf *); 1296 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1297 struct buf *bp, int flags, 1298 int (*callback)(caddr_t), caddr_t callback_arg, 1299 diskaddr_t lba, uint32_t blockcount); 1300 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1301 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1302 1303 /* 1304 * Prototypes for functions to support USCSI IO. 1305 */ 1306 static int sd_uscsi_strategy(struct buf *bp); 1307 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1308 static void sd_destroypkt_for_uscsi(struct buf *); 1309 1310 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1311 uchar_t chain_type, void *pktinfop); 1312 1313 static int sd_pm_entry(struct sd_lun *un); 1314 static void sd_pm_exit(struct sd_lun *un); 1315 1316 static void sd_pm_idletimeout_handler(void *arg); 1317 1318 /* 1319 * sd_core internal functions (used at the sd_core_io layer). 1320 */ 1321 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1322 static void sdintr(struct scsi_pkt *pktp); 1323 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1324 1325 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1326 enum uio_seg dataspace, int path_flag); 1327 1328 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1329 daddr_t blkno, int (*func)(struct buf *)); 1330 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1331 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1332 static void sd_bioclone_free(struct buf *bp); 1333 static void sd_shadow_buf_free(struct buf *bp); 1334 1335 static void sd_print_transport_rejected_message(struct sd_lun *un, 1336 struct sd_xbuf *xp, int code); 1337 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1342 void *arg, int code); 1343 1344 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1345 int retry_check_flag, 1346 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1347 int c), 1348 void *user_arg, int failure_code, clock_t retry_delay, 1349 void (*statp)(kstat_io_t *)); 1350 1351 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1352 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1353 1354 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1355 struct scsi_pkt *pktp); 1356 static void sd_start_retry_command(void *arg); 1357 static void sd_start_direct_priority_command(void *arg); 1358 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1359 int errcode); 1360 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1361 struct buf *bp, int errcode); 1362 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1363 static void sd_sync_with_callback(struct sd_lun *un); 1364 static int sdrunout(caddr_t arg); 1365 1366 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1367 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1368 1369 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1370 static void sd_restore_throttle(void *arg); 1371 1372 static void sd_init_cdb_limits(struct sd_lun *un); 1373 1374 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 1377 /* 1378 * Error handling functions 1379 */ 1380 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1385 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 1389 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, size_t actual_len); 1395 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1404 uint8_t *sense_datap, 1405 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_not_ready(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_unit_attention(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1420 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1421 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1422 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_default(struct sd_lun *un, 1424 uint8_t *sense_datap, 1425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1428 void *arg, int flag); 1429 1430 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1435 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1436 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1448 1449 static void sd_start_stop_unit_callback(void *arg); 1450 static void sd_start_stop_unit_task(void *arg); 1451 1452 static void sd_taskq_create(void); 1453 static void sd_taskq_delete(void); 1454 static void sd_target_change_task(void *arg); 1455 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1456 static void sd_media_change_task(void *arg); 1457 1458 static int sd_handle_mchange(struct sd_lun *un); 1459 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1460 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1461 uint32_t *lbap, int path_flag); 1462 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1463 uint32_t *lbap, int path_flag); 1464 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1465 int path_flag); 1466 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1467 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1468 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1469 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1470 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1471 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1472 uchar_t usr_cmd, uchar_t *usr_bufp); 1473 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1474 struct dk_callback *dkc); 1475 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1476 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1478 uchar_t *bufaddr, uint_t buflen, int path_flag); 1479 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1482 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1483 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1484 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1485 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1486 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1487 size_t buflen, daddr_t start_block, int path_flag); 1488 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1489 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1490 path_flag) 1491 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1492 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1493 path_flag) 1494 1495 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1496 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1497 uint16_t param_ptr, int path_flag); 1498 1499 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1500 static void sd_free_rqs(struct sd_lun *un); 1501 1502 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1503 uchar_t *data, int len, int fmt); 1504 static void sd_panic_for_res_conflict(struct sd_lun *un); 1505 1506 /* 1507 * Disk Ioctl Function Prototypes 1508 */ 1509 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1510 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1511 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1512 1513 /* 1514 * Multi-host Ioctl Prototypes 1515 */ 1516 static int sd_check_mhd(dev_t dev, int interval); 1517 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1519 static char *sd_sname(uchar_t status); 1520 static void sd_mhd_resvd_recover(void *arg); 1521 static void sd_resv_reclaim_thread(); 1522 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1523 static int sd_reserve_release(dev_t dev, int cmd); 1524 static void sd_rmv_resv_reclaim_req(dev_t dev); 1525 static void sd_mhd_reset_notify_cb(caddr_t arg); 1526 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1527 mhioc_inkeys_t *usrp, int flag); 1528 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1529 mhioc_inresvs_t *usrp, int flag); 1530 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1531 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1532 static int sd_mhdioc_release(dev_t dev); 1533 static int sd_mhdioc_register_devid(dev_t dev); 1534 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1536 1537 /* 1538 * SCSI removable prototypes 1539 */ 1540 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1541 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1542 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1543 static int sr_pause_resume(dev_t dev, int mode); 1544 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1545 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1546 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1547 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1548 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1555 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1556 static int sr_eject(dev_t dev); 1557 static void sr_ejected(register struct sd_lun *un); 1558 static int sr_check_wp(dev_t dev); 1559 static int sd_check_media(dev_t dev, enum dkio_state state); 1560 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1561 static void sd_delayed_cv_broadcast(void *arg); 1562 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1563 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1564 1565 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1566 1567 /* 1568 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1569 */ 1570 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1571 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1572 static void sd_wm_cache_destructor(void *wm, void *un); 1573 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1574 daddr_t endb, ushort_t typ); 1575 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1576 daddr_t endb); 1577 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1578 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1579 static void sd_read_modify_write_task(void * arg); 1580 static int 1581 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1582 struct buf **bpp); 1583 1584 1585 /* 1586 * Function prototypes for failfast support. 1587 */ 1588 static void sd_failfast_flushq(struct sd_lun *un); 1589 static int sd_failfast_flushq_callback(struct buf *bp); 1590 1591 /* 1592 * Function prototypes to check for lsi devices 1593 */ 1594 static void sd_is_lsi(struct sd_lun *un); 1595 1596 /* 1597 * Function prototypes for partial DMA support 1598 */ 1599 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1600 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1601 1602 1603 /* Function prototypes for cmlb */ 1604 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1605 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1606 1607 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1608 1609 /* 1610 * Constants for failfast support: 1611 * 1612 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1613 * failfast processing being performed. 1614 * 1615 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1616 * failfast processing on all bufs with B_FAILFAST set. 1617 */ 1618 1619 #define SD_FAILFAST_INACTIVE 0 1620 #define SD_FAILFAST_ACTIVE 1 1621 1622 /* 1623 * Bitmask to control behavior of buf(9S) flushes when a transition to 1624 * the failfast state occurs. Optional bits include: 1625 * 1626 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1627 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1628 * be flushed. 1629 * 1630 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1631 * driver, in addition to the regular wait queue. This includes the xbuf 1632 * queues. When clear, only the driver's wait queue will be flushed. 1633 */ 1634 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1635 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1636 1637 /* 1638 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1639 * to flush all queues within the driver. 1640 */ 1641 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1642 1643 1644 /* 1645 * SD Testing Fault Injection 1646 */ 1647 #ifdef SD_FAULT_INJECTION 1648 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1649 static void sd_faultinjection(struct scsi_pkt *pktp); 1650 static void sd_injection_log(char *buf, struct sd_lun *un); 1651 #endif 1652 1653 /* 1654 * Device driver ops vector 1655 */ 1656 static struct cb_ops sd_cb_ops = { 1657 sdopen, /* open */ 1658 sdclose, /* close */ 1659 sdstrategy, /* strategy */ 1660 nodev, /* print */ 1661 sddump, /* dump */ 1662 sdread, /* read */ 1663 sdwrite, /* write */ 1664 sdioctl, /* ioctl */ 1665 nodev, /* devmap */ 1666 nodev, /* mmap */ 1667 nodev, /* segmap */ 1668 nochpoll, /* poll */ 1669 sd_prop_op, /* cb_prop_op */ 1670 0, /* streamtab */ 1671 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1672 CB_REV, /* cb_rev */ 1673 sdaread, /* async I/O read entry point */ 1674 sdawrite /* async I/O write entry point */ 1675 }; 1676 1677 static struct dev_ops sd_ops = { 1678 DEVO_REV, /* devo_rev, */ 1679 0, /* refcnt */ 1680 sdinfo, /* info */ 1681 nulldev, /* identify */ 1682 sdprobe, /* probe */ 1683 sdattach, /* attach */ 1684 sddetach, /* detach */ 1685 nodev, /* reset */ 1686 &sd_cb_ops, /* driver operations */ 1687 NULL, /* bus operations */ 1688 sdpower, /* power */ 1689 ddi_quiesce_not_needed, /* quiesce */ 1690 }; 1691 1692 1693 /* 1694 * This is the loadable module wrapper. 1695 */ 1696 #include <sys/modctl.h> 1697 1698 static struct modldrv modldrv = { 1699 &mod_driverops, /* Type of module. This one is a driver */ 1700 SD_MODULE_NAME, /* Module name. */ 1701 &sd_ops /* driver ops */ 1702 }; 1703 1704 1705 static struct modlinkage modlinkage = { 1706 MODREV_1, 1707 &modldrv, 1708 NULL 1709 }; 1710 1711 static cmlb_tg_ops_t sd_tgops = { 1712 TG_DK_OPS_VERSION_1, 1713 sd_tg_rdwr, 1714 sd_tg_getinfo 1715 }; 1716 1717 static struct scsi_asq_key_strings sd_additional_codes[] = { 1718 0x81, 0, "Logical Unit is Reserved", 1719 0x85, 0, "Audio Address Not Valid", 1720 0xb6, 0, "Media Load Mechanism Failed", 1721 0xB9, 0, "Audio Play Operation Aborted", 1722 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1723 0x53, 2, "Medium removal prevented", 1724 0x6f, 0, "Authentication failed during key exchange", 1725 0x6f, 1, "Key not present", 1726 0x6f, 2, "Key not established", 1727 0x6f, 3, "Read without proper authentication", 1728 0x6f, 4, "Mismatched region to this logical unit", 1729 0x6f, 5, "Region reset count error", 1730 0xffff, 0x0, NULL 1731 }; 1732 1733 1734 /* 1735 * Struct for passing printing information for sense data messages 1736 */ 1737 struct sd_sense_info { 1738 int ssi_severity; 1739 int ssi_pfa_flag; 1740 }; 1741 1742 /* 1743 * Table of function pointers for iostart-side routines. Separate "chains" 1744 * of layered function calls are formed by placing the function pointers 1745 * sequentially in the desired order. Functions are called according to an 1746 * incrementing table index ordering. The last function in each chain must 1747 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1748 * in the sd_iodone_chain[] array. 1749 * 1750 * Note: It may seem more natural to organize both the iostart and iodone 1751 * functions together, into an array of structures (or some similar 1752 * organization) with a common index, rather than two separate arrays which 1753 * must be maintained in synchronization. The purpose of this division is 1754 * to achieve improved performance: individual arrays allows for more 1755 * effective cache line utilization on certain platforms. 1756 */ 1757 1758 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1759 1760 1761 static sd_chain_t sd_iostart_chain[] = { 1762 1763 /* Chain for buf IO for disk drive targets (PM enabled) */ 1764 sd_mapblockaddr_iostart, /* Index: 0 */ 1765 sd_pm_iostart, /* Index: 1 */ 1766 sd_core_iostart, /* Index: 2 */ 1767 1768 /* Chain for buf IO for disk drive targets (PM disabled) */ 1769 sd_mapblockaddr_iostart, /* Index: 3 */ 1770 sd_core_iostart, /* Index: 4 */ 1771 1772 /* Chain for buf IO for removable-media targets (PM enabled) */ 1773 sd_mapblockaddr_iostart, /* Index: 5 */ 1774 sd_mapblocksize_iostart, /* Index: 6 */ 1775 sd_pm_iostart, /* Index: 7 */ 1776 sd_core_iostart, /* Index: 8 */ 1777 1778 /* Chain for buf IO for removable-media targets (PM disabled) */ 1779 sd_mapblockaddr_iostart, /* Index: 9 */ 1780 sd_mapblocksize_iostart, /* Index: 10 */ 1781 sd_core_iostart, /* Index: 11 */ 1782 1783 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1784 sd_mapblockaddr_iostart, /* Index: 12 */ 1785 sd_checksum_iostart, /* Index: 13 */ 1786 sd_pm_iostart, /* Index: 14 */ 1787 sd_core_iostart, /* Index: 15 */ 1788 1789 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1790 sd_mapblockaddr_iostart, /* Index: 16 */ 1791 sd_checksum_iostart, /* Index: 17 */ 1792 sd_core_iostart, /* Index: 18 */ 1793 1794 /* Chain for USCSI commands (all targets) */ 1795 sd_pm_iostart, /* Index: 19 */ 1796 sd_core_iostart, /* Index: 20 */ 1797 1798 /* Chain for checksumming USCSI commands (all targets) */ 1799 sd_checksum_uscsi_iostart, /* Index: 21 */ 1800 sd_pm_iostart, /* Index: 22 */ 1801 sd_core_iostart, /* Index: 23 */ 1802 1803 /* Chain for "direct" USCSI commands (all targets) */ 1804 sd_core_iostart, /* Index: 24 */ 1805 1806 /* Chain for "direct priority" USCSI commands (all targets) */ 1807 sd_core_iostart, /* Index: 25 */ 1808 }; 1809 1810 /* 1811 * Macros to locate the first function of each iostart chain in the 1812 * sd_iostart_chain[] array. These are located by the index in the array. 1813 */ 1814 #define SD_CHAIN_DISK_IOSTART 0 1815 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1816 #define SD_CHAIN_RMMEDIA_IOSTART 5 1817 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1818 #define SD_CHAIN_CHKSUM_IOSTART 12 1819 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1820 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1821 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1822 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1823 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1824 1825 1826 /* 1827 * Table of function pointers for the iodone-side routines for the driver- 1828 * internal layering mechanism. The calling sequence for iodone routines 1829 * uses a decrementing table index, so the last routine called in a chain 1830 * must be at the lowest array index location for that chain. The last 1831 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1832 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1833 * of the functions in an iodone side chain must correspond to the ordering 1834 * of the iostart routines for that chain. Note that there is no iodone 1835 * side routine that corresponds to sd_core_iostart(), so there is no 1836 * entry in the table for this. 1837 */ 1838 1839 static sd_chain_t sd_iodone_chain[] = { 1840 1841 /* Chain for buf IO for disk drive targets (PM enabled) */ 1842 sd_buf_iodone, /* Index: 0 */ 1843 sd_mapblockaddr_iodone, /* Index: 1 */ 1844 sd_pm_iodone, /* Index: 2 */ 1845 1846 /* Chain for buf IO for disk drive targets (PM disabled) */ 1847 sd_buf_iodone, /* Index: 3 */ 1848 sd_mapblockaddr_iodone, /* Index: 4 */ 1849 1850 /* Chain for buf IO for removable-media targets (PM enabled) */ 1851 sd_buf_iodone, /* Index: 5 */ 1852 sd_mapblockaddr_iodone, /* Index: 6 */ 1853 sd_mapblocksize_iodone, /* Index: 7 */ 1854 sd_pm_iodone, /* Index: 8 */ 1855 1856 /* Chain for buf IO for removable-media targets (PM disabled) */ 1857 sd_buf_iodone, /* Index: 9 */ 1858 sd_mapblockaddr_iodone, /* Index: 10 */ 1859 sd_mapblocksize_iodone, /* Index: 11 */ 1860 1861 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1862 sd_buf_iodone, /* Index: 12 */ 1863 sd_mapblockaddr_iodone, /* Index: 13 */ 1864 sd_checksum_iodone, /* Index: 14 */ 1865 sd_pm_iodone, /* Index: 15 */ 1866 1867 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1868 sd_buf_iodone, /* Index: 16 */ 1869 sd_mapblockaddr_iodone, /* Index: 17 */ 1870 sd_checksum_iodone, /* Index: 18 */ 1871 1872 /* Chain for USCSI commands (non-checksum targets) */ 1873 sd_uscsi_iodone, /* Index: 19 */ 1874 sd_pm_iodone, /* Index: 20 */ 1875 1876 /* Chain for USCSI commands (checksum targets) */ 1877 sd_uscsi_iodone, /* Index: 21 */ 1878 sd_checksum_uscsi_iodone, /* Index: 22 */ 1879 sd_pm_iodone, /* Index: 22 */ 1880 1881 /* Chain for "direct" USCSI commands (all targets) */ 1882 sd_uscsi_iodone, /* Index: 24 */ 1883 1884 /* Chain for "direct priority" USCSI commands (all targets) */ 1885 sd_uscsi_iodone, /* Index: 25 */ 1886 }; 1887 1888 1889 /* 1890 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1891 * each iodone-side chain. These are located by the array index, but as the 1892 * iodone side functions are called in a decrementing-index order, the 1893 * highest index number in each chain must be specified (as these correspond 1894 * to the first function in the iodone chain that will be called by the core 1895 * at IO completion time). 1896 */ 1897 1898 #define SD_CHAIN_DISK_IODONE 2 1899 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1900 #define SD_CHAIN_RMMEDIA_IODONE 8 1901 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1902 #define SD_CHAIN_CHKSUM_IODONE 15 1903 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1904 #define SD_CHAIN_USCSI_CMD_IODONE 20 1905 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1906 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1907 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1908 1909 1910 1911 1912 /* 1913 * Array to map a layering chain index to the appropriate initpkt routine. 1914 * The redundant entries are present so that the index used for accessing 1915 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1916 * with this table as well. 1917 */ 1918 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1919 1920 static sd_initpkt_t sd_initpkt_map[] = { 1921 1922 /* Chain for buf IO for disk drive targets (PM enabled) */ 1923 sd_initpkt_for_buf, /* Index: 0 */ 1924 sd_initpkt_for_buf, /* Index: 1 */ 1925 sd_initpkt_for_buf, /* Index: 2 */ 1926 1927 /* Chain for buf IO for disk drive targets (PM disabled) */ 1928 sd_initpkt_for_buf, /* Index: 3 */ 1929 sd_initpkt_for_buf, /* Index: 4 */ 1930 1931 /* Chain for buf IO for removable-media targets (PM enabled) */ 1932 sd_initpkt_for_buf, /* Index: 5 */ 1933 sd_initpkt_for_buf, /* Index: 6 */ 1934 sd_initpkt_for_buf, /* Index: 7 */ 1935 sd_initpkt_for_buf, /* Index: 8 */ 1936 1937 /* Chain for buf IO for removable-media targets (PM disabled) */ 1938 sd_initpkt_for_buf, /* Index: 9 */ 1939 sd_initpkt_for_buf, /* Index: 10 */ 1940 sd_initpkt_for_buf, /* Index: 11 */ 1941 1942 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1943 sd_initpkt_for_buf, /* Index: 12 */ 1944 sd_initpkt_for_buf, /* Index: 13 */ 1945 sd_initpkt_for_buf, /* Index: 14 */ 1946 sd_initpkt_for_buf, /* Index: 15 */ 1947 1948 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1949 sd_initpkt_for_buf, /* Index: 16 */ 1950 sd_initpkt_for_buf, /* Index: 17 */ 1951 sd_initpkt_for_buf, /* Index: 18 */ 1952 1953 /* Chain for USCSI commands (non-checksum targets) */ 1954 sd_initpkt_for_uscsi, /* Index: 19 */ 1955 sd_initpkt_for_uscsi, /* Index: 20 */ 1956 1957 /* Chain for USCSI commands (checksum targets) */ 1958 sd_initpkt_for_uscsi, /* Index: 21 */ 1959 sd_initpkt_for_uscsi, /* Index: 22 */ 1960 sd_initpkt_for_uscsi, /* Index: 22 */ 1961 1962 /* Chain for "direct" USCSI commands (all targets) */ 1963 sd_initpkt_for_uscsi, /* Index: 24 */ 1964 1965 /* Chain for "direct priority" USCSI commands (all targets) */ 1966 sd_initpkt_for_uscsi, /* Index: 25 */ 1967 1968 }; 1969 1970 1971 /* 1972 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1973 * The redundant entries are present so that the index used for accessing 1974 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1975 * with this table as well. 1976 */ 1977 typedef void (*sd_destroypkt_t)(struct buf *); 1978 1979 static sd_destroypkt_t sd_destroypkt_map[] = { 1980 1981 /* Chain for buf IO for disk drive targets (PM enabled) */ 1982 sd_destroypkt_for_buf, /* Index: 0 */ 1983 sd_destroypkt_for_buf, /* Index: 1 */ 1984 sd_destroypkt_for_buf, /* Index: 2 */ 1985 1986 /* Chain for buf IO for disk drive targets (PM disabled) */ 1987 sd_destroypkt_for_buf, /* Index: 3 */ 1988 sd_destroypkt_for_buf, /* Index: 4 */ 1989 1990 /* Chain for buf IO for removable-media targets (PM enabled) */ 1991 sd_destroypkt_for_buf, /* Index: 5 */ 1992 sd_destroypkt_for_buf, /* Index: 6 */ 1993 sd_destroypkt_for_buf, /* Index: 7 */ 1994 sd_destroypkt_for_buf, /* Index: 8 */ 1995 1996 /* Chain for buf IO for removable-media targets (PM disabled) */ 1997 sd_destroypkt_for_buf, /* Index: 9 */ 1998 sd_destroypkt_for_buf, /* Index: 10 */ 1999 sd_destroypkt_for_buf, /* Index: 11 */ 2000 2001 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2002 sd_destroypkt_for_buf, /* Index: 12 */ 2003 sd_destroypkt_for_buf, /* Index: 13 */ 2004 sd_destroypkt_for_buf, /* Index: 14 */ 2005 sd_destroypkt_for_buf, /* Index: 15 */ 2006 2007 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2008 sd_destroypkt_for_buf, /* Index: 16 */ 2009 sd_destroypkt_for_buf, /* Index: 17 */ 2010 sd_destroypkt_for_buf, /* Index: 18 */ 2011 2012 /* Chain for USCSI commands (non-checksum targets) */ 2013 sd_destroypkt_for_uscsi, /* Index: 19 */ 2014 sd_destroypkt_for_uscsi, /* Index: 20 */ 2015 2016 /* Chain for USCSI commands (checksum targets) */ 2017 sd_destroypkt_for_uscsi, /* Index: 21 */ 2018 sd_destroypkt_for_uscsi, /* Index: 22 */ 2019 sd_destroypkt_for_uscsi, /* Index: 22 */ 2020 2021 /* Chain for "direct" USCSI commands (all targets) */ 2022 sd_destroypkt_for_uscsi, /* Index: 24 */ 2023 2024 /* Chain for "direct priority" USCSI commands (all targets) */ 2025 sd_destroypkt_for_uscsi, /* Index: 25 */ 2026 2027 }; 2028 2029 2030 2031 /* 2032 * Array to map a layering chain index to the appropriate chain "type". 2033 * The chain type indicates a specific property/usage of the chain. 2034 * The redundant entries are present so that the index used for accessing 2035 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2036 * with this table as well. 2037 */ 2038 2039 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2040 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2041 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2042 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2043 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2044 /* (for error recovery) */ 2045 2046 static int sd_chain_type_map[] = { 2047 2048 /* Chain for buf IO for disk drive targets (PM enabled) */ 2049 SD_CHAIN_BUFIO, /* Index: 0 */ 2050 SD_CHAIN_BUFIO, /* Index: 1 */ 2051 SD_CHAIN_BUFIO, /* Index: 2 */ 2052 2053 /* Chain for buf IO for disk drive targets (PM disabled) */ 2054 SD_CHAIN_BUFIO, /* Index: 3 */ 2055 SD_CHAIN_BUFIO, /* Index: 4 */ 2056 2057 /* Chain for buf IO for removable-media targets (PM enabled) */ 2058 SD_CHAIN_BUFIO, /* Index: 5 */ 2059 SD_CHAIN_BUFIO, /* Index: 6 */ 2060 SD_CHAIN_BUFIO, /* Index: 7 */ 2061 SD_CHAIN_BUFIO, /* Index: 8 */ 2062 2063 /* Chain for buf IO for removable-media targets (PM disabled) */ 2064 SD_CHAIN_BUFIO, /* Index: 9 */ 2065 SD_CHAIN_BUFIO, /* Index: 10 */ 2066 SD_CHAIN_BUFIO, /* Index: 11 */ 2067 2068 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2069 SD_CHAIN_BUFIO, /* Index: 12 */ 2070 SD_CHAIN_BUFIO, /* Index: 13 */ 2071 SD_CHAIN_BUFIO, /* Index: 14 */ 2072 SD_CHAIN_BUFIO, /* Index: 15 */ 2073 2074 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2075 SD_CHAIN_BUFIO, /* Index: 16 */ 2076 SD_CHAIN_BUFIO, /* Index: 17 */ 2077 SD_CHAIN_BUFIO, /* Index: 18 */ 2078 2079 /* Chain for USCSI commands (non-checksum targets) */ 2080 SD_CHAIN_USCSI, /* Index: 19 */ 2081 SD_CHAIN_USCSI, /* Index: 20 */ 2082 2083 /* Chain for USCSI commands (checksum targets) */ 2084 SD_CHAIN_USCSI, /* Index: 21 */ 2085 SD_CHAIN_USCSI, /* Index: 22 */ 2086 SD_CHAIN_USCSI, /* Index: 22 */ 2087 2088 /* Chain for "direct" USCSI commands (all targets) */ 2089 SD_CHAIN_DIRECT, /* Index: 24 */ 2090 2091 /* Chain for "direct priority" USCSI commands (all targets) */ 2092 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2093 }; 2094 2095 2096 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2097 #define SD_IS_BUFIO(xp) \ 2098 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2099 2100 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2101 #define SD_IS_DIRECT_PRIORITY(xp) \ 2102 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2103 2104 2105 2106 /* 2107 * Struct, array, and macros to map a specific chain to the appropriate 2108 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2109 * 2110 * The sd_chain_index_map[] array is used at attach time to set the various 2111 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2112 * chain to be used with the instance. This allows different instances to use 2113 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2114 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2115 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2116 * dynamically & without the use of locking; and (2) a layer may update the 2117 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2118 * to allow for deferred processing of an IO within the same chain from a 2119 * different execution context. 2120 */ 2121 2122 struct sd_chain_index { 2123 int sci_iostart_index; 2124 int sci_iodone_index; 2125 }; 2126 2127 static struct sd_chain_index sd_chain_index_map[] = { 2128 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2129 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2130 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2131 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2132 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2133 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2134 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2135 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2136 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2137 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2138 }; 2139 2140 2141 /* 2142 * The following are indexes into the sd_chain_index_map[] array. 2143 */ 2144 2145 /* un->un_buf_chain_type must be set to one of these */ 2146 #define SD_CHAIN_INFO_DISK 0 2147 #define SD_CHAIN_INFO_DISK_NO_PM 1 2148 #define SD_CHAIN_INFO_RMMEDIA 2 2149 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2150 #define SD_CHAIN_INFO_CHKSUM 4 2151 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2152 2153 /* un->un_uscsi_chain_type must be set to one of these */ 2154 #define SD_CHAIN_INFO_USCSI_CMD 6 2155 /* USCSI with PM disabled is the same as DIRECT */ 2156 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2157 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2158 2159 /* un->un_direct_chain_type must be set to one of these */ 2160 #define SD_CHAIN_INFO_DIRECT_CMD 8 2161 2162 /* un->un_priority_chain_type must be set to one of these */ 2163 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2164 2165 /* size for devid inquiries */ 2166 #define MAX_INQUIRY_SIZE 0xF0 2167 2168 /* 2169 * Macros used by functions to pass a given buf(9S) struct along to the 2170 * next function in the layering chain for further processing. 2171 * 2172 * In the following macros, passing more than three arguments to the called 2173 * routines causes the optimizer for the SPARC compiler to stop doing tail 2174 * call elimination which results in significant performance degradation. 2175 */ 2176 #define SD_BEGIN_IOSTART(index, un, bp) \ 2177 ((*(sd_iostart_chain[index]))(index, un, bp)) 2178 2179 #define SD_BEGIN_IODONE(index, un, bp) \ 2180 ((*(sd_iodone_chain[index]))(index, un, bp)) 2181 2182 #define SD_NEXT_IOSTART(index, un, bp) \ 2183 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2184 2185 #define SD_NEXT_IODONE(index, un, bp) \ 2186 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2187 2188 /* 2189 * Function: _init 2190 * 2191 * Description: This is the driver _init(9E) entry point. 2192 * 2193 * Return Code: Returns the value from mod_install(9F) or 2194 * ddi_soft_state_init(9F) as appropriate. 2195 * 2196 * Context: Called when driver module loaded. 2197 */ 2198 2199 int 2200 _init(void) 2201 { 2202 int err; 2203 2204 /* establish driver name from module name */ 2205 sd_label = (char *)mod_modname(&modlinkage); 2206 2207 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2208 SD_MAXUNIT); 2209 2210 if (err != 0) { 2211 return (err); 2212 } 2213 2214 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2215 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2216 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2217 2218 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2219 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2220 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2221 2222 /* 2223 * it's ok to init here even for fibre device 2224 */ 2225 sd_scsi_probe_cache_init(); 2226 2227 sd_scsi_target_lun_init(); 2228 2229 /* 2230 * Creating taskq before mod_install ensures that all callers (threads) 2231 * that enter the module after a successful mod_install encounter 2232 * a valid taskq. 2233 */ 2234 sd_taskq_create(); 2235 2236 err = mod_install(&modlinkage); 2237 if (err != 0) { 2238 /* delete taskq if install fails */ 2239 sd_taskq_delete(); 2240 2241 mutex_destroy(&sd_detach_mutex); 2242 mutex_destroy(&sd_log_mutex); 2243 mutex_destroy(&sd_label_mutex); 2244 2245 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2246 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2247 cv_destroy(&sd_tr.srq_inprocess_cv); 2248 2249 sd_scsi_probe_cache_fini(); 2250 2251 sd_scsi_target_lun_fini(); 2252 2253 ddi_soft_state_fini(&sd_state); 2254 return (err); 2255 } 2256 2257 return (err); 2258 } 2259 2260 2261 /* 2262 * Function: _fini 2263 * 2264 * Description: This is the driver _fini(9E) entry point. 2265 * 2266 * Return Code: Returns the value from mod_remove(9F) 2267 * 2268 * Context: Called when driver module is unloaded. 2269 */ 2270 2271 int 2272 _fini(void) 2273 { 2274 int err; 2275 2276 if ((err = mod_remove(&modlinkage)) != 0) { 2277 return (err); 2278 } 2279 2280 sd_taskq_delete(); 2281 2282 mutex_destroy(&sd_detach_mutex); 2283 mutex_destroy(&sd_log_mutex); 2284 mutex_destroy(&sd_label_mutex); 2285 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2286 2287 sd_scsi_probe_cache_fini(); 2288 2289 sd_scsi_target_lun_fini(); 2290 2291 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2292 cv_destroy(&sd_tr.srq_inprocess_cv); 2293 2294 ddi_soft_state_fini(&sd_state); 2295 2296 return (err); 2297 } 2298 2299 2300 /* 2301 * Function: _info 2302 * 2303 * Description: This is the driver _info(9E) entry point. 2304 * 2305 * Arguments: modinfop - pointer to the driver modinfo structure 2306 * 2307 * Return Code: Returns the value from mod_info(9F). 2308 * 2309 * Context: Kernel thread context 2310 */ 2311 2312 int 2313 _info(struct modinfo *modinfop) 2314 { 2315 return (mod_info(&modlinkage, modinfop)); 2316 } 2317 2318 2319 /* 2320 * The following routines implement the driver message logging facility. 2321 * They provide component- and level- based debug output filtering. 2322 * Output may also be restricted to messages for a single instance by 2323 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2324 * to NULL, then messages for all instances are printed. 2325 * 2326 * These routines have been cloned from each other due to the language 2327 * constraints of macros and variable argument list processing. 2328 */ 2329 2330 2331 /* 2332 * Function: sd_log_err 2333 * 2334 * Description: This routine is called by the SD_ERROR macro for debug 2335 * logging of error conditions. 2336 * 2337 * Arguments: comp - driver component being logged 2338 * dev - pointer to driver info structure 2339 * fmt - error string and format to be logged 2340 */ 2341 2342 static void 2343 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2344 { 2345 va_list ap; 2346 dev_info_t *dev; 2347 2348 ASSERT(un != NULL); 2349 dev = SD_DEVINFO(un); 2350 ASSERT(dev != NULL); 2351 2352 /* 2353 * Filter messages based on the global component and level masks. 2354 * Also print if un matches the value of sd_debug_un, or if 2355 * sd_debug_un is set to NULL. 2356 */ 2357 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2358 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2359 mutex_enter(&sd_log_mutex); 2360 va_start(ap, fmt); 2361 (void) vsprintf(sd_log_buf, fmt, ap); 2362 va_end(ap); 2363 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2364 mutex_exit(&sd_log_mutex); 2365 } 2366 #ifdef SD_FAULT_INJECTION 2367 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2368 if (un->sd_injection_mask & comp) { 2369 mutex_enter(&sd_log_mutex); 2370 va_start(ap, fmt); 2371 (void) vsprintf(sd_log_buf, fmt, ap); 2372 va_end(ap); 2373 sd_injection_log(sd_log_buf, un); 2374 mutex_exit(&sd_log_mutex); 2375 } 2376 #endif 2377 } 2378 2379 2380 /* 2381 * Function: sd_log_info 2382 * 2383 * Description: This routine is called by the SD_INFO macro for debug 2384 * logging of general purpose informational conditions. 2385 * 2386 * Arguments: comp - driver component being logged 2387 * dev - pointer to driver info structure 2388 * fmt - info string and format to be logged 2389 */ 2390 2391 static void 2392 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2393 { 2394 va_list ap; 2395 dev_info_t *dev; 2396 2397 ASSERT(un != NULL); 2398 dev = SD_DEVINFO(un); 2399 ASSERT(dev != NULL); 2400 2401 /* 2402 * Filter messages based on the global component and level masks. 2403 * Also print if un matches the value of sd_debug_un, or if 2404 * sd_debug_un is set to NULL. 2405 */ 2406 if ((sd_component_mask & component) && 2407 (sd_level_mask & SD_LOGMASK_INFO) && 2408 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2409 mutex_enter(&sd_log_mutex); 2410 va_start(ap, fmt); 2411 (void) vsprintf(sd_log_buf, fmt, ap); 2412 va_end(ap); 2413 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2414 mutex_exit(&sd_log_mutex); 2415 } 2416 #ifdef SD_FAULT_INJECTION 2417 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2418 if (un->sd_injection_mask & component) { 2419 mutex_enter(&sd_log_mutex); 2420 va_start(ap, fmt); 2421 (void) vsprintf(sd_log_buf, fmt, ap); 2422 va_end(ap); 2423 sd_injection_log(sd_log_buf, un); 2424 mutex_exit(&sd_log_mutex); 2425 } 2426 #endif 2427 } 2428 2429 2430 /* 2431 * Function: sd_log_trace 2432 * 2433 * Description: This routine is called by the SD_TRACE macro for debug 2434 * logging of trace conditions (i.e. function entry/exit). 2435 * 2436 * Arguments: comp - driver component being logged 2437 * dev - pointer to driver info structure 2438 * fmt - trace string and format to be logged 2439 */ 2440 2441 static void 2442 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2443 { 2444 va_list ap; 2445 dev_info_t *dev; 2446 2447 ASSERT(un != NULL); 2448 dev = SD_DEVINFO(un); 2449 ASSERT(dev != NULL); 2450 2451 /* 2452 * Filter messages based on the global component and level masks. 2453 * Also print if un matches the value of sd_debug_un, or if 2454 * sd_debug_un is set to NULL. 2455 */ 2456 if ((sd_component_mask & component) && 2457 (sd_level_mask & SD_LOGMASK_TRACE) && 2458 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2459 mutex_enter(&sd_log_mutex); 2460 va_start(ap, fmt); 2461 (void) vsprintf(sd_log_buf, fmt, ap); 2462 va_end(ap); 2463 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2464 mutex_exit(&sd_log_mutex); 2465 } 2466 #ifdef SD_FAULT_INJECTION 2467 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2468 if (un->sd_injection_mask & component) { 2469 mutex_enter(&sd_log_mutex); 2470 va_start(ap, fmt); 2471 (void) vsprintf(sd_log_buf, fmt, ap); 2472 va_end(ap); 2473 sd_injection_log(sd_log_buf, un); 2474 mutex_exit(&sd_log_mutex); 2475 } 2476 #endif 2477 } 2478 2479 2480 /* 2481 * Function: sdprobe 2482 * 2483 * Description: This is the driver probe(9e) entry point function. 2484 * 2485 * Arguments: devi - opaque device info handle 2486 * 2487 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2488 * DDI_PROBE_FAILURE: If the probe failed. 2489 * DDI_PROBE_PARTIAL: If the instance is not present now, 2490 * but may be present in the future. 2491 */ 2492 2493 static int 2494 sdprobe(dev_info_t *devi) 2495 { 2496 struct scsi_device *devp; 2497 int rval; 2498 int instance; 2499 2500 /* 2501 * if it wasn't for pln, sdprobe could actually be nulldev 2502 * in the "__fibre" case. 2503 */ 2504 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2505 return (DDI_PROBE_DONTCARE); 2506 } 2507 2508 devp = ddi_get_driver_private(devi); 2509 2510 if (devp == NULL) { 2511 /* Ooops... nexus driver is mis-configured... */ 2512 return (DDI_PROBE_FAILURE); 2513 } 2514 2515 instance = ddi_get_instance(devi); 2516 2517 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2518 return (DDI_PROBE_PARTIAL); 2519 } 2520 2521 /* 2522 * Call the SCSA utility probe routine to see if we actually 2523 * have a target at this SCSI nexus. 2524 */ 2525 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2526 case SCSIPROBE_EXISTS: 2527 switch (devp->sd_inq->inq_dtype) { 2528 case DTYPE_DIRECT: 2529 rval = DDI_PROBE_SUCCESS; 2530 break; 2531 case DTYPE_RODIRECT: 2532 /* CDs etc. Can be removable media */ 2533 rval = DDI_PROBE_SUCCESS; 2534 break; 2535 case DTYPE_OPTICAL: 2536 /* 2537 * Rewritable optical driver HP115AA 2538 * Can also be removable media 2539 */ 2540 2541 /* 2542 * Do not attempt to bind to DTYPE_OPTICAL if 2543 * pre solaris 9 sparc sd behavior is required 2544 * 2545 * If first time through and sd_dtype_optical_bind 2546 * has not been set in /etc/system check properties 2547 */ 2548 2549 if (sd_dtype_optical_bind < 0) { 2550 sd_dtype_optical_bind = ddi_prop_get_int 2551 (DDI_DEV_T_ANY, devi, 0, 2552 "optical-device-bind", 1); 2553 } 2554 2555 if (sd_dtype_optical_bind == 0) { 2556 rval = DDI_PROBE_FAILURE; 2557 } else { 2558 rval = DDI_PROBE_SUCCESS; 2559 } 2560 break; 2561 2562 case DTYPE_NOTPRESENT: 2563 default: 2564 rval = DDI_PROBE_FAILURE; 2565 break; 2566 } 2567 break; 2568 default: 2569 rval = DDI_PROBE_PARTIAL; 2570 break; 2571 } 2572 2573 /* 2574 * This routine checks for resource allocation prior to freeing, 2575 * so it will take care of the "smart probing" case where a 2576 * scsi_probe() may or may not have been issued and will *not* 2577 * free previously-freed resources. 2578 */ 2579 scsi_unprobe(devp); 2580 return (rval); 2581 } 2582 2583 2584 /* 2585 * Function: sdinfo 2586 * 2587 * Description: This is the driver getinfo(9e) entry point function. 2588 * Given the device number, return the devinfo pointer from 2589 * the scsi_device structure or the instance number 2590 * associated with the dev_t. 2591 * 2592 * Arguments: dip - pointer to device info structure 2593 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2594 * DDI_INFO_DEVT2INSTANCE) 2595 * arg - driver dev_t 2596 * resultp - user buffer for request response 2597 * 2598 * Return Code: DDI_SUCCESS 2599 * DDI_FAILURE 2600 */ 2601 /* ARGSUSED */ 2602 static int 2603 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2604 { 2605 struct sd_lun *un; 2606 dev_t dev; 2607 int instance; 2608 int error; 2609 2610 switch (infocmd) { 2611 case DDI_INFO_DEVT2DEVINFO: 2612 dev = (dev_t)arg; 2613 instance = SDUNIT(dev); 2614 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2615 return (DDI_FAILURE); 2616 } 2617 *result = (void *) SD_DEVINFO(un); 2618 error = DDI_SUCCESS; 2619 break; 2620 case DDI_INFO_DEVT2INSTANCE: 2621 dev = (dev_t)arg; 2622 instance = SDUNIT(dev); 2623 *result = (void *)(uintptr_t)instance; 2624 error = DDI_SUCCESS; 2625 break; 2626 default: 2627 error = DDI_FAILURE; 2628 } 2629 return (error); 2630 } 2631 2632 /* 2633 * Function: sd_prop_op 2634 * 2635 * Description: This is the driver prop_op(9e) entry point function. 2636 * Return the number of blocks for the partition in question 2637 * or forward the request to the property facilities. 2638 * 2639 * Arguments: dev - device number 2640 * dip - pointer to device info structure 2641 * prop_op - property operator 2642 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2643 * name - pointer to property name 2644 * valuep - pointer or address of the user buffer 2645 * lengthp - property length 2646 * 2647 * Return Code: DDI_PROP_SUCCESS 2648 * DDI_PROP_NOT_FOUND 2649 * DDI_PROP_UNDEFINED 2650 * DDI_PROP_NO_MEMORY 2651 * DDI_PROP_BUF_TOO_SMALL 2652 */ 2653 2654 static int 2655 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2656 char *name, caddr_t valuep, int *lengthp) 2657 { 2658 struct sd_lun *un; 2659 2660 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2661 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2662 name, valuep, lengthp)); 2663 2664 return (cmlb_prop_op(un->un_cmlbhandle, 2665 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2666 SDPART(dev), (void *)SD_PATH_DIRECT)); 2667 } 2668 2669 /* 2670 * The following functions are for smart probing: 2671 * sd_scsi_probe_cache_init() 2672 * sd_scsi_probe_cache_fini() 2673 * sd_scsi_clear_probe_cache() 2674 * sd_scsi_probe_with_cache() 2675 */ 2676 2677 /* 2678 * Function: sd_scsi_probe_cache_init 2679 * 2680 * Description: Initializes the probe response cache mutex and head pointer. 2681 * 2682 * Context: Kernel thread context 2683 */ 2684 2685 static void 2686 sd_scsi_probe_cache_init(void) 2687 { 2688 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2689 sd_scsi_probe_cache_head = NULL; 2690 } 2691 2692 2693 /* 2694 * Function: sd_scsi_probe_cache_fini 2695 * 2696 * Description: Frees all resources associated with the probe response cache. 2697 * 2698 * Context: Kernel thread context 2699 */ 2700 2701 static void 2702 sd_scsi_probe_cache_fini(void) 2703 { 2704 struct sd_scsi_probe_cache *cp; 2705 struct sd_scsi_probe_cache *ncp; 2706 2707 /* Clean up our smart probing linked list */ 2708 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2709 ncp = cp->next; 2710 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2711 } 2712 sd_scsi_probe_cache_head = NULL; 2713 mutex_destroy(&sd_scsi_probe_cache_mutex); 2714 } 2715 2716 2717 /* 2718 * Function: sd_scsi_clear_probe_cache 2719 * 2720 * Description: This routine clears the probe response cache. This is 2721 * done when open() returns ENXIO so that when deferred 2722 * attach is attempted (possibly after a device has been 2723 * turned on) we will retry the probe. Since we don't know 2724 * which target we failed to open, we just clear the 2725 * entire cache. 2726 * 2727 * Context: Kernel thread context 2728 */ 2729 2730 static void 2731 sd_scsi_clear_probe_cache(void) 2732 { 2733 struct sd_scsi_probe_cache *cp; 2734 int i; 2735 2736 mutex_enter(&sd_scsi_probe_cache_mutex); 2737 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2738 /* 2739 * Reset all entries to SCSIPROBE_EXISTS. This will 2740 * force probing to be performed the next time 2741 * sd_scsi_probe_with_cache is called. 2742 */ 2743 for (i = 0; i < NTARGETS_WIDE; i++) { 2744 cp->cache[i] = SCSIPROBE_EXISTS; 2745 } 2746 } 2747 mutex_exit(&sd_scsi_probe_cache_mutex); 2748 } 2749 2750 2751 /* 2752 * Function: sd_scsi_probe_with_cache 2753 * 2754 * Description: This routine implements support for a scsi device probe 2755 * with cache. The driver maintains a cache of the target 2756 * responses to scsi probes. If we get no response from a 2757 * target during a probe inquiry, we remember that, and we 2758 * avoid additional calls to scsi_probe on non-zero LUNs 2759 * on the same target until the cache is cleared. By doing 2760 * so we avoid the 1/4 sec selection timeout for nonzero 2761 * LUNs. lun0 of a target is always probed. 2762 * 2763 * Arguments: devp - Pointer to a scsi_device(9S) structure 2764 * waitfunc - indicates what the allocator routines should 2765 * do when resources are not available. This value 2766 * is passed on to scsi_probe() when that routine 2767 * is called. 2768 * 2769 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2770 * otherwise the value returned by scsi_probe(9F). 2771 * 2772 * Context: Kernel thread context 2773 */ 2774 2775 static int 2776 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2777 { 2778 struct sd_scsi_probe_cache *cp; 2779 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2780 int lun, tgt; 2781 2782 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2783 SCSI_ADDR_PROP_LUN, 0); 2784 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2785 SCSI_ADDR_PROP_TARGET, -1); 2786 2787 /* Make sure caching enabled and target in range */ 2788 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2789 /* do it the old way (no cache) */ 2790 return (scsi_probe(devp, waitfn)); 2791 } 2792 2793 mutex_enter(&sd_scsi_probe_cache_mutex); 2794 2795 /* Find the cache for this scsi bus instance */ 2796 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2797 if (cp->pdip == pdip) { 2798 break; 2799 } 2800 } 2801 2802 /* If we can't find a cache for this pdip, create one */ 2803 if (cp == NULL) { 2804 int i; 2805 2806 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2807 KM_SLEEP); 2808 cp->pdip = pdip; 2809 cp->next = sd_scsi_probe_cache_head; 2810 sd_scsi_probe_cache_head = cp; 2811 for (i = 0; i < NTARGETS_WIDE; i++) { 2812 cp->cache[i] = SCSIPROBE_EXISTS; 2813 } 2814 } 2815 2816 mutex_exit(&sd_scsi_probe_cache_mutex); 2817 2818 /* Recompute the cache for this target if LUN zero */ 2819 if (lun == 0) { 2820 cp->cache[tgt] = SCSIPROBE_EXISTS; 2821 } 2822 2823 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2824 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2825 return (SCSIPROBE_NORESP); 2826 } 2827 2828 /* Do the actual probe; save & return the result */ 2829 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2830 } 2831 2832 2833 /* 2834 * Function: sd_scsi_target_lun_init 2835 * 2836 * Description: Initializes the attached lun chain mutex and head pointer. 2837 * 2838 * Context: Kernel thread context 2839 */ 2840 2841 static void 2842 sd_scsi_target_lun_init(void) 2843 { 2844 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2845 sd_scsi_target_lun_head = NULL; 2846 } 2847 2848 2849 /* 2850 * Function: sd_scsi_target_lun_fini 2851 * 2852 * Description: Frees all resources associated with the attached lun 2853 * chain 2854 * 2855 * Context: Kernel thread context 2856 */ 2857 2858 static void 2859 sd_scsi_target_lun_fini(void) 2860 { 2861 struct sd_scsi_hba_tgt_lun *cp; 2862 struct sd_scsi_hba_tgt_lun *ncp; 2863 2864 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2865 ncp = cp->next; 2866 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2867 } 2868 sd_scsi_target_lun_head = NULL; 2869 mutex_destroy(&sd_scsi_target_lun_mutex); 2870 } 2871 2872 2873 /* 2874 * Function: sd_scsi_get_target_lun_count 2875 * 2876 * Description: This routine will check in the attached lun chain to see 2877 * how many luns are attached on the required SCSI controller 2878 * and target. Currently, some capabilities like tagged queue 2879 * are supported per target based by HBA. So all luns in a 2880 * target have the same capabilities. Based on this assumption, 2881 * sd should only set these capabilities once per target. This 2882 * function is called when sd needs to decide how many luns 2883 * already attached on a target. 2884 * 2885 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2886 * controller device. 2887 * target - The target ID on the controller's SCSI bus. 2888 * 2889 * Return Code: The number of luns attached on the required target and 2890 * controller. 2891 * -1 if target ID is not in parallel SCSI scope or the given 2892 * dip is not in the chain. 2893 * 2894 * Context: Kernel thread context 2895 */ 2896 2897 static int 2898 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2899 { 2900 struct sd_scsi_hba_tgt_lun *cp; 2901 2902 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2903 return (-1); 2904 } 2905 2906 mutex_enter(&sd_scsi_target_lun_mutex); 2907 2908 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2909 if (cp->pdip == dip) { 2910 break; 2911 } 2912 } 2913 2914 mutex_exit(&sd_scsi_target_lun_mutex); 2915 2916 if (cp == NULL) { 2917 return (-1); 2918 } 2919 2920 return (cp->nlun[target]); 2921 } 2922 2923 2924 /* 2925 * Function: sd_scsi_update_lun_on_target 2926 * 2927 * Description: This routine is used to update the attached lun chain when a 2928 * lun is attached or detached on a target. 2929 * 2930 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2931 * controller device. 2932 * target - The target ID on the controller's SCSI bus. 2933 * flag - Indicate the lun is attached or detached. 2934 * 2935 * Context: Kernel thread context 2936 */ 2937 2938 static void 2939 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2940 { 2941 struct sd_scsi_hba_tgt_lun *cp; 2942 2943 mutex_enter(&sd_scsi_target_lun_mutex); 2944 2945 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2946 if (cp->pdip == dip) { 2947 break; 2948 } 2949 } 2950 2951 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2952 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2953 KM_SLEEP); 2954 cp->pdip = dip; 2955 cp->next = sd_scsi_target_lun_head; 2956 sd_scsi_target_lun_head = cp; 2957 } 2958 2959 mutex_exit(&sd_scsi_target_lun_mutex); 2960 2961 if (cp != NULL) { 2962 if (flag == SD_SCSI_LUN_ATTACH) { 2963 cp->nlun[target] ++; 2964 } else { 2965 cp->nlun[target] --; 2966 } 2967 } 2968 } 2969 2970 2971 /* 2972 * Function: sd_spin_up_unit 2973 * 2974 * Description: Issues the following commands to spin-up the device: 2975 * START STOP UNIT, and INQUIRY. 2976 * 2977 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 2978 * structure for this target. 2979 * 2980 * Return Code: 0 - success 2981 * EIO - failure 2982 * EACCES - reservation conflict 2983 * 2984 * Context: Kernel thread context 2985 */ 2986 2987 static int 2988 sd_spin_up_unit(sd_ssc_t *ssc) 2989 { 2990 size_t resid = 0; 2991 int has_conflict = FALSE; 2992 uchar_t *bufaddr; 2993 int status; 2994 struct sd_lun *un; 2995 2996 ASSERT(ssc != NULL); 2997 un = ssc->ssc_un; 2998 ASSERT(un != NULL); 2999 3000 /* 3001 * Send a throwaway START UNIT command. 3002 * 3003 * If we fail on this, we don't care presently what precisely 3004 * is wrong. EMC's arrays will also fail this with a check 3005 * condition (0x2/0x4/0x3) if the device is "inactive," but 3006 * we don't want to fail the attach because it may become 3007 * "active" later. 3008 */ 3009 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3010 SD_PATH_DIRECT); 3011 3012 if (status != 0) { 3013 if (status == EACCES) 3014 has_conflict = TRUE; 3015 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3016 } 3017 3018 /* 3019 * Send another INQUIRY command to the target. This is necessary for 3020 * non-removable media direct access devices because their INQUIRY data 3021 * may not be fully qualified until they are spun up (perhaps via the 3022 * START command above). Note: This seems to be needed for some 3023 * legacy devices only.) The INQUIRY command should succeed even if a 3024 * Reservation Conflict is present. 3025 */ 3026 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3027 3028 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3029 != 0) { 3030 kmem_free(bufaddr, SUN_INQSIZE); 3031 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3032 return (EIO); 3033 } 3034 3035 /* 3036 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3037 * Note that this routine does not return a failure here even if the 3038 * INQUIRY command did not return any data. This is a legacy behavior. 3039 */ 3040 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3041 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3042 } 3043 3044 kmem_free(bufaddr, SUN_INQSIZE); 3045 3046 /* If we hit a reservation conflict above, tell the caller. */ 3047 if (has_conflict == TRUE) { 3048 return (EACCES); 3049 } 3050 3051 return (0); 3052 } 3053 3054 #ifdef _LP64 3055 /* 3056 * Function: sd_enable_descr_sense 3057 * 3058 * Description: This routine attempts to select descriptor sense format 3059 * using the Control mode page. Devices that support 64 bit 3060 * LBAs (for >2TB luns) should also implement descriptor 3061 * sense data so we will call this function whenever we see 3062 * a lun larger than 2TB. If for some reason the device 3063 * supports 64 bit LBAs but doesn't support descriptor sense 3064 * presumably the mode select will fail. Everything will 3065 * continue to work normally except that we will not get 3066 * complete sense data for commands that fail with an LBA 3067 * larger than 32 bits. 3068 * 3069 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3070 * structure for this target. 3071 * 3072 * Context: Kernel thread context only 3073 */ 3074 3075 static void 3076 sd_enable_descr_sense(sd_ssc_t *ssc) 3077 { 3078 uchar_t *header; 3079 struct mode_control_scsi3 *ctrl_bufp; 3080 size_t buflen; 3081 size_t bd_len; 3082 int status; 3083 struct sd_lun *un; 3084 3085 ASSERT(ssc != NULL); 3086 un = ssc->ssc_un; 3087 ASSERT(un != NULL); 3088 3089 /* 3090 * Read MODE SENSE page 0xA, Control Mode Page 3091 */ 3092 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3093 sizeof (struct mode_control_scsi3); 3094 header = kmem_zalloc(buflen, KM_SLEEP); 3095 3096 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3097 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3098 3099 if (status != 0) { 3100 SD_ERROR(SD_LOG_COMMON, un, 3101 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3102 goto eds_exit; 3103 } 3104 3105 /* 3106 * Determine size of Block Descriptors in order to locate 3107 * the mode page data. ATAPI devices return 0, SCSI devices 3108 * should return MODE_BLK_DESC_LENGTH. 3109 */ 3110 bd_len = ((struct mode_header *)header)->bdesc_length; 3111 3112 /* Clear the mode data length field for MODE SELECT */ 3113 ((struct mode_header *)header)->length = 0; 3114 3115 ctrl_bufp = (struct mode_control_scsi3 *) 3116 (header + MODE_HEADER_LENGTH + bd_len); 3117 3118 /* 3119 * If the page length is smaller than the expected value, 3120 * the target device doesn't support D_SENSE. Bail out here. 3121 */ 3122 if (ctrl_bufp->mode_page.length < 3123 sizeof (struct mode_control_scsi3) - 2) { 3124 SD_ERROR(SD_LOG_COMMON, un, 3125 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3126 goto eds_exit; 3127 } 3128 3129 /* 3130 * Clear PS bit for MODE SELECT 3131 */ 3132 ctrl_bufp->mode_page.ps = 0; 3133 3134 /* 3135 * Set D_SENSE to enable descriptor sense format. 3136 */ 3137 ctrl_bufp->d_sense = 1; 3138 3139 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3140 3141 /* 3142 * Use MODE SELECT to commit the change to the D_SENSE bit 3143 */ 3144 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3145 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3146 3147 if (status != 0) { 3148 SD_INFO(SD_LOG_COMMON, un, 3149 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3150 } else { 3151 kmem_free(header, buflen); 3152 return; 3153 } 3154 3155 eds_exit: 3156 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3157 kmem_free(header, buflen); 3158 } 3159 3160 /* 3161 * Function: sd_reenable_dsense_task 3162 * 3163 * Description: Re-enable descriptor sense after device or bus reset 3164 * 3165 * Context: Executes in a taskq() thread context 3166 */ 3167 static void 3168 sd_reenable_dsense_task(void *arg) 3169 { 3170 struct sd_lun *un = arg; 3171 sd_ssc_t *ssc; 3172 3173 ASSERT(un != NULL); 3174 3175 ssc = sd_ssc_init(un); 3176 sd_enable_descr_sense(ssc); 3177 sd_ssc_fini(ssc); 3178 } 3179 #endif /* _LP64 */ 3180 3181 /* 3182 * Function: sd_set_mmc_caps 3183 * 3184 * Description: This routine determines if the device is MMC compliant and if 3185 * the device supports CDDA via a mode sense of the CDVD 3186 * capabilities mode page. Also checks if the device is a 3187 * dvdram writable device. 3188 * 3189 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3190 * structure for this target. 3191 * 3192 * Context: Kernel thread context only 3193 */ 3194 3195 static void 3196 sd_set_mmc_caps(sd_ssc_t *ssc) 3197 { 3198 struct mode_header_grp2 *sense_mhp; 3199 uchar_t *sense_page; 3200 caddr_t buf; 3201 int bd_len; 3202 int status; 3203 struct uscsi_cmd com; 3204 int rtn; 3205 uchar_t *out_data_rw, *out_data_hd; 3206 uchar_t *rqbuf_rw, *rqbuf_hd; 3207 struct sd_lun *un; 3208 3209 ASSERT(ssc != NULL); 3210 un = ssc->ssc_un; 3211 ASSERT(un != NULL); 3212 3213 /* 3214 * The flags which will be set in this function are - mmc compliant, 3215 * dvdram writable device, cdda support. Initialize them to FALSE 3216 * and if a capability is detected - it will be set to TRUE. 3217 */ 3218 un->un_f_mmc_cap = FALSE; 3219 un->un_f_dvdram_writable_device = FALSE; 3220 un->un_f_cfg_cdda = FALSE; 3221 3222 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3223 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3224 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3225 3226 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3227 3228 if (status != 0) { 3229 /* command failed; just return */ 3230 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3231 return; 3232 } 3233 /* 3234 * If the mode sense request for the CDROM CAPABILITIES 3235 * page (0x2A) succeeds the device is assumed to be MMC. 3236 */ 3237 un->un_f_mmc_cap = TRUE; 3238 3239 /* Get to the page data */ 3240 sense_mhp = (struct mode_header_grp2 *)buf; 3241 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3242 sense_mhp->bdesc_length_lo; 3243 if (bd_len > MODE_BLK_DESC_LENGTH) { 3244 /* 3245 * We did not get back the expected block descriptor 3246 * length so we cannot determine if the device supports 3247 * CDDA. However, we still indicate the device is MMC 3248 * according to the successful response to the page 3249 * 0x2A mode sense request. 3250 */ 3251 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3252 "sd_set_mmc_caps: Mode Sense returned " 3253 "invalid block descriptor length\n"); 3254 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3255 return; 3256 } 3257 3258 /* See if read CDDA is supported */ 3259 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3260 bd_len); 3261 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3262 3263 /* See if writing DVD RAM is supported. */ 3264 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3265 if (un->un_f_dvdram_writable_device == TRUE) { 3266 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3267 return; 3268 } 3269 3270 /* 3271 * If the device presents DVD or CD capabilities in the mode 3272 * page, we can return here since a RRD will not have 3273 * these capabilities. 3274 */ 3275 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3276 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3277 return; 3278 } 3279 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3280 3281 /* 3282 * If un->un_f_dvdram_writable_device is still FALSE, 3283 * check for a Removable Rigid Disk (RRD). A RRD 3284 * device is identified by the features RANDOM_WRITABLE and 3285 * HARDWARE_DEFECT_MANAGEMENT. 3286 */ 3287 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3288 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3289 3290 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3291 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3292 RANDOM_WRITABLE, SD_PATH_STANDARD); 3293 3294 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3295 3296 if (rtn != 0) { 3297 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3298 kmem_free(rqbuf_rw, SENSE_LENGTH); 3299 return; 3300 } 3301 3302 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3303 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3304 3305 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3306 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3307 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3308 3309 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3310 3311 if (rtn == 0) { 3312 /* 3313 * We have good information, check for random writable 3314 * and hardware defect features. 3315 */ 3316 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3317 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3318 un->un_f_dvdram_writable_device = TRUE; 3319 } 3320 } 3321 3322 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3323 kmem_free(rqbuf_rw, SENSE_LENGTH); 3324 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3325 kmem_free(rqbuf_hd, SENSE_LENGTH); 3326 } 3327 3328 /* 3329 * Function: sd_check_for_writable_cd 3330 * 3331 * Description: This routine determines if the media in the device is 3332 * writable or not. It uses the get configuration command (0x46) 3333 * to determine if the media is writable 3334 * 3335 * Arguments: un - driver soft state (unit) structure 3336 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3337 * chain and the normal command waitq, or 3338 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3339 * "direct" chain and bypass the normal command 3340 * waitq. 3341 * 3342 * Context: Never called at interrupt context. 3343 */ 3344 3345 static void 3346 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3347 { 3348 struct uscsi_cmd com; 3349 uchar_t *out_data; 3350 uchar_t *rqbuf; 3351 int rtn; 3352 uchar_t *out_data_rw, *out_data_hd; 3353 uchar_t *rqbuf_rw, *rqbuf_hd; 3354 struct mode_header_grp2 *sense_mhp; 3355 uchar_t *sense_page; 3356 caddr_t buf; 3357 int bd_len; 3358 int status; 3359 struct sd_lun *un; 3360 3361 ASSERT(ssc != NULL); 3362 un = ssc->ssc_un; 3363 ASSERT(un != NULL); 3364 ASSERT(mutex_owned(SD_MUTEX(un))); 3365 3366 /* 3367 * Initialize the writable media to false, if configuration info. 3368 * tells us otherwise then only we will set it. 3369 */ 3370 un->un_f_mmc_writable_media = FALSE; 3371 mutex_exit(SD_MUTEX(un)); 3372 3373 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3374 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3375 3376 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3377 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3378 3379 if (rtn != 0) 3380 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3381 3382 mutex_enter(SD_MUTEX(un)); 3383 if (rtn == 0) { 3384 /* 3385 * We have good information, check for writable DVD. 3386 */ 3387 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3388 un->un_f_mmc_writable_media = TRUE; 3389 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3390 kmem_free(rqbuf, SENSE_LENGTH); 3391 return; 3392 } 3393 } 3394 3395 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3396 kmem_free(rqbuf, SENSE_LENGTH); 3397 3398 /* 3399 * Determine if this is a RRD type device. 3400 */ 3401 mutex_exit(SD_MUTEX(un)); 3402 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3403 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3404 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3405 3406 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3407 3408 mutex_enter(SD_MUTEX(un)); 3409 if (status != 0) { 3410 /* command failed; just return */ 3411 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3412 return; 3413 } 3414 3415 /* Get to the page data */ 3416 sense_mhp = (struct mode_header_grp2 *)buf; 3417 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3418 if (bd_len > MODE_BLK_DESC_LENGTH) { 3419 /* 3420 * We did not get back the expected block descriptor length so 3421 * we cannot check the mode page. 3422 */ 3423 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3424 "sd_check_for_writable_cd: Mode Sense returned " 3425 "invalid block descriptor length\n"); 3426 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3427 return; 3428 } 3429 3430 /* 3431 * If the device presents DVD or CD capabilities in the mode 3432 * page, we can return here since a RRD device will not have 3433 * these capabilities. 3434 */ 3435 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3436 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3437 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3438 return; 3439 } 3440 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3441 3442 /* 3443 * If un->un_f_mmc_writable_media is still FALSE, 3444 * check for RRD type media. A RRD device is identified 3445 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3446 */ 3447 mutex_exit(SD_MUTEX(un)); 3448 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3449 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3450 3451 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3452 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3453 RANDOM_WRITABLE, path_flag); 3454 3455 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3456 if (rtn != 0) { 3457 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3458 kmem_free(rqbuf_rw, SENSE_LENGTH); 3459 mutex_enter(SD_MUTEX(un)); 3460 return; 3461 } 3462 3463 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3464 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3465 3466 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3467 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3468 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3469 3470 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3471 mutex_enter(SD_MUTEX(un)); 3472 if (rtn == 0) { 3473 /* 3474 * We have good information, check for random writable 3475 * and hardware defect features as current. 3476 */ 3477 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3478 (out_data_rw[10] & 0x1) && 3479 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3480 (out_data_hd[10] & 0x1)) { 3481 un->un_f_mmc_writable_media = TRUE; 3482 } 3483 } 3484 3485 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3486 kmem_free(rqbuf_rw, SENSE_LENGTH); 3487 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3488 kmem_free(rqbuf_hd, SENSE_LENGTH); 3489 } 3490 3491 /* 3492 * Function: sd_read_unit_properties 3493 * 3494 * Description: The following implements a property lookup mechanism. 3495 * Properties for particular disks (keyed on vendor, model 3496 * and rev numbers) are sought in the sd.conf file via 3497 * sd_process_sdconf_file(), and if not found there, are 3498 * looked for in a list hardcoded in this driver via 3499 * sd_process_sdconf_table() Once located the properties 3500 * are used to update the driver unit structure. 3501 * 3502 * Arguments: un - driver soft state (unit) structure 3503 */ 3504 3505 static void 3506 sd_read_unit_properties(struct sd_lun *un) 3507 { 3508 /* 3509 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3510 * the "sd-config-list" property (from the sd.conf file) or if 3511 * there was not a match for the inquiry vid/pid. If this event 3512 * occurs the static driver configuration table is searched for 3513 * a match. 3514 */ 3515 ASSERT(un != NULL); 3516 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3517 sd_process_sdconf_table(un); 3518 } 3519 3520 /* check for LSI device */ 3521 sd_is_lsi(un); 3522 3523 3524 } 3525 3526 3527 /* 3528 * Function: sd_process_sdconf_file 3529 * 3530 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3531 * driver's config file (ie, sd.conf) and update the driver 3532 * soft state structure accordingly. 3533 * 3534 * Arguments: un - driver soft state (unit) structure 3535 * 3536 * Return Code: SD_SUCCESS - The properties were successfully set according 3537 * to the driver configuration file. 3538 * SD_FAILURE - The driver config list was not obtained or 3539 * there was no vid/pid match. This indicates that 3540 * the static config table should be used. 3541 * 3542 * The config file has a property, "sd-config-list". Currently we support 3543 * two kinds of formats. For both formats, the value of this property 3544 * is a list of duplets: 3545 * 3546 * sd-config-list= 3547 * <duplet>, 3548 * [,<duplet>]*; 3549 * 3550 * For the improved format, where 3551 * 3552 * <duplet>:= "<vid+pid>","<tunable-list>" 3553 * 3554 * and 3555 * 3556 * <tunable-list>:= <tunable> [, <tunable> ]*; 3557 * <tunable> = <name> : <value> 3558 * 3559 * The <vid+pid> is the string that is returned by the target device on a 3560 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3561 * to apply to all target devices with the specified <vid+pid>. 3562 * 3563 * Each <tunable> is a "<name> : <value>" pair. 3564 * 3565 * For the old format, the structure of each duplet is as follows: 3566 * 3567 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3568 * 3569 * The first entry of the duplet is the device ID string (the concatenated 3570 * vid & pid; not to be confused with a device_id). This is defined in 3571 * the same way as in the sd_disk_table. 3572 * 3573 * The second part of the duplet is a string that identifies a 3574 * data-property-name-list. The data-property-name-list is defined as 3575 * follows: 3576 * 3577 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3578 * 3579 * The syntax of <data-property-name> depends on the <version> field. 3580 * 3581 * If version = SD_CONF_VERSION_1 we have the following syntax: 3582 * 3583 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3584 * 3585 * where the prop0 value will be used to set prop0 if bit0 set in the 3586 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3587 * 3588 */ 3589 3590 static int 3591 sd_process_sdconf_file(struct sd_lun *un) 3592 { 3593 char **config_list = NULL; 3594 uint_t nelements; 3595 char *vidptr; 3596 int vidlen; 3597 char *dnlist_ptr; 3598 char *dataname_ptr; 3599 char *dataname_lasts; 3600 int *data_list = NULL; 3601 uint_t data_list_len; 3602 int rval = SD_FAILURE; 3603 int i; 3604 3605 ASSERT(un != NULL); 3606 3607 /* Obtain the configuration list associated with the .conf file */ 3608 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3609 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3610 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3611 return (SD_FAILURE); 3612 } 3613 3614 /* 3615 * Compare vids in each duplet to the inquiry vid - if a match is 3616 * made, get the data value and update the soft state structure 3617 * accordingly. 3618 * 3619 * Each duplet should show as a pair of strings, return SD_FAILURE 3620 * otherwise. 3621 */ 3622 if (nelements & 1) { 3623 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3624 "sd-config-list should show as pairs of strings.\n"); 3625 if (config_list) 3626 ddi_prop_free(config_list); 3627 return (SD_FAILURE); 3628 } 3629 3630 for (i = 0; i < nelements; i += 2) { 3631 /* 3632 * Note: The assumption here is that each vid entry is on 3633 * a unique line from its associated duplet. 3634 */ 3635 vidptr = config_list[i]; 3636 vidlen = (int)strlen(vidptr); 3637 if ((vidlen == 0) || 3638 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3639 continue; 3640 } 3641 3642 /* 3643 * dnlist contains 1 or more blank separated 3644 * data-property-name entries 3645 */ 3646 dnlist_ptr = config_list[i + 1]; 3647 3648 if (strchr(dnlist_ptr, ':') != NULL) { 3649 /* 3650 * Decode the improved format sd-config-list. 3651 */ 3652 sd_nvpair_str_decode(un, dnlist_ptr); 3653 } else { 3654 /* 3655 * The old format sd-config-list, loop through all 3656 * data-property-name entries in the 3657 * data-property-name-list 3658 * setting the properties for each. 3659 */ 3660 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3661 &dataname_lasts); dataname_ptr != NULL; 3662 dataname_ptr = sd_strtok_r(NULL, " \t", 3663 &dataname_lasts)) { 3664 int version; 3665 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3667 "sd_process_sdconf_file: disk:%s, " 3668 "data:%s\n", vidptr, dataname_ptr); 3669 3670 /* Get the data list */ 3671 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3672 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3673 &data_list_len) != DDI_PROP_SUCCESS) { 3674 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3675 "sd_process_sdconf_file: data " 3676 "property (%s) has no value\n", 3677 dataname_ptr); 3678 continue; 3679 } 3680 3681 version = data_list[0]; 3682 3683 if (version == SD_CONF_VERSION_1) { 3684 sd_tunables values; 3685 3686 /* Set the properties */ 3687 if (sd_chk_vers1_data(un, data_list[1], 3688 &data_list[2], data_list_len, 3689 dataname_ptr) == SD_SUCCESS) { 3690 sd_get_tunables_from_conf(un, 3691 data_list[1], &data_list[2], 3692 &values); 3693 sd_set_vers1_properties(un, 3694 data_list[1], &values); 3695 rval = SD_SUCCESS; 3696 } else { 3697 rval = SD_FAILURE; 3698 } 3699 } else { 3700 scsi_log(SD_DEVINFO(un), sd_label, 3701 CE_WARN, "data property %s version " 3702 "0x%x is invalid.", 3703 dataname_ptr, version); 3704 rval = SD_FAILURE; 3705 } 3706 if (data_list) 3707 ddi_prop_free(data_list); 3708 } 3709 } 3710 } 3711 3712 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3713 if (config_list) { 3714 ddi_prop_free(config_list); 3715 } 3716 3717 return (rval); 3718 } 3719 3720 /* 3721 * Function: sd_nvpair_str_decode() 3722 * 3723 * Description: Parse the improved format sd-config-list to get 3724 * each entry of tunable, which includes a name-value pair. 3725 * Then call sd_set_properties() to set the property. 3726 * 3727 * Arguments: un - driver soft state (unit) structure 3728 * nvpair_str - the tunable list 3729 */ 3730 static void 3731 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3732 { 3733 char *nv, *name, *value, *token; 3734 char *nv_lasts, *v_lasts, *x_lasts; 3735 3736 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3737 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3738 token = sd_strtok_r(nv, ":", &v_lasts); 3739 name = sd_strtok_r(token, " \t", &x_lasts); 3740 token = sd_strtok_r(NULL, ":", &v_lasts); 3741 value = sd_strtok_r(token, " \t", &x_lasts); 3742 if (name == NULL || value == NULL) { 3743 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3744 "sd_nvpair_str_decode: " 3745 "name or value is not valid!\n"); 3746 } else { 3747 sd_set_properties(un, name, value); 3748 } 3749 } 3750 } 3751 3752 /* 3753 * Function: sd_strtok_r() 3754 * 3755 * Description: This function uses strpbrk and strspn to break 3756 * string into tokens on sequentially subsequent calls. Return 3757 * NULL when no non-separator characters remain. The first 3758 * argument is NULL for subsequent calls. 3759 */ 3760 static char * 3761 sd_strtok_r(char *string, const char *sepset, char **lasts) 3762 { 3763 char *q, *r; 3764 3765 /* First or subsequent call */ 3766 if (string == NULL) 3767 string = *lasts; 3768 3769 if (string == NULL) 3770 return (NULL); 3771 3772 /* Skip leading separators */ 3773 q = string + strspn(string, sepset); 3774 3775 if (*q == '\0') 3776 return (NULL); 3777 3778 if ((r = strpbrk(q, sepset)) == NULL) 3779 *lasts = NULL; 3780 else { 3781 *r = '\0'; 3782 *lasts = r + 1; 3783 } 3784 return (q); 3785 } 3786 3787 /* 3788 * Function: sd_set_properties() 3789 * 3790 * Description: Set device properties based on the improved 3791 * format sd-config-list. 3792 * 3793 * Arguments: un - driver soft state (unit) structure 3794 * name - supported tunable name 3795 * value - tunable value 3796 */ 3797 static void 3798 sd_set_properties(struct sd_lun *un, char *name, char *value) 3799 { 3800 char *endptr = NULL; 3801 long val = 0; 3802 3803 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3804 if (strcasecmp(value, "true") == 0) { 3805 un->un_f_suppress_cache_flush = TRUE; 3806 } else if (strcasecmp(value, "false") == 0) { 3807 un->un_f_suppress_cache_flush = FALSE; 3808 } else { 3809 goto value_invalid; 3810 } 3811 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3812 "suppress_cache_flush flag set to %d\n", 3813 un->un_f_suppress_cache_flush); 3814 return; 3815 } 3816 3817 if (strcasecmp(name, "controller-type") == 0) { 3818 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3819 un->un_ctype = val; 3820 } else { 3821 goto value_invalid; 3822 } 3823 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3824 "ctype set to %d\n", un->un_ctype); 3825 return; 3826 } 3827 3828 if (strcasecmp(name, "delay-busy") == 0) { 3829 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3830 un->un_busy_timeout = drv_usectohz(val / 1000); 3831 } else { 3832 goto value_invalid; 3833 } 3834 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3835 "busy_timeout set to %d\n", un->un_busy_timeout); 3836 return; 3837 } 3838 3839 if (strcasecmp(name, "disksort") == 0) { 3840 if (strcasecmp(value, "true") == 0) { 3841 un->un_f_disksort_disabled = FALSE; 3842 } else if (strcasecmp(value, "false") == 0) { 3843 un->un_f_disksort_disabled = TRUE; 3844 } else { 3845 goto value_invalid; 3846 } 3847 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3848 "disksort disabled flag set to %d\n", 3849 un->un_f_disksort_disabled); 3850 return; 3851 } 3852 3853 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3854 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3855 un->un_reserve_release_time = val; 3856 } else { 3857 goto value_invalid; 3858 } 3859 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3860 "reservation release timeout set to %d\n", 3861 un->un_reserve_release_time); 3862 return; 3863 } 3864 3865 if (strcasecmp(name, "reset-lun") == 0) { 3866 if (strcasecmp(value, "true") == 0) { 3867 un->un_f_lun_reset_enabled = TRUE; 3868 } else if (strcasecmp(value, "false") == 0) { 3869 un->un_f_lun_reset_enabled = FALSE; 3870 } else { 3871 goto value_invalid; 3872 } 3873 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3874 "lun reset enabled flag set to %d\n", 3875 un->un_f_lun_reset_enabled); 3876 return; 3877 } 3878 3879 if (strcasecmp(name, "retries-busy") == 0) { 3880 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3881 un->un_busy_retry_count = val; 3882 } else { 3883 goto value_invalid; 3884 } 3885 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3886 "busy retry count set to %d\n", un->un_busy_retry_count); 3887 return; 3888 } 3889 3890 if (strcasecmp(name, "retries-timeout") == 0) { 3891 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3892 un->un_retry_count = val; 3893 } else { 3894 goto value_invalid; 3895 } 3896 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3897 "timeout retry count set to %d\n", un->un_retry_count); 3898 return; 3899 } 3900 3901 if (strcasecmp(name, "retries-notready") == 0) { 3902 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3903 un->un_notready_retry_count = val; 3904 } else { 3905 goto value_invalid; 3906 } 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3908 "notready retry count set to %d\n", 3909 un->un_notready_retry_count); 3910 return; 3911 } 3912 3913 if (strcasecmp(name, "retries-reset") == 0) { 3914 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3915 un->un_reset_retry_count = val; 3916 } else { 3917 goto value_invalid; 3918 } 3919 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3920 "reset retry count set to %d\n", 3921 un->un_reset_retry_count); 3922 return; 3923 } 3924 3925 if (strcasecmp(name, "throttle-max") == 0) { 3926 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3927 un->un_saved_throttle = un->un_throttle = val; 3928 } else { 3929 goto value_invalid; 3930 } 3931 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3932 "throttle set to %d\n", un->un_throttle); 3933 } 3934 3935 if (strcasecmp(name, "throttle-min") == 0) { 3936 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3937 un->un_min_throttle = val; 3938 } else { 3939 goto value_invalid; 3940 } 3941 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3942 "min throttle set to %d\n", un->un_min_throttle); 3943 } 3944 3945 /* 3946 * Validate the throttle values. 3947 * If any of the numbers are invalid, set everything to defaults. 3948 */ 3949 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3950 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3951 (un->un_min_throttle > un->un_throttle)) { 3952 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3953 un->un_min_throttle = sd_min_throttle; 3954 } 3955 return; 3956 3957 value_invalid: 3958 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3959 "value of prop %s is invalid\n", name); 3960 } 3961 3962 /* 3963 * Function: sd_get_tunables_from_conf() 3964 * 3965 * 3966 * This function reads the data list from the sd.conf file and pulls 3967 * the values that can have numeric values as arguments and places 3968 * the values in the appropriate sd_tunables member. 3969 * Since the order of the data list members varies across platforms 3970 * This function reads them from the data list in a platform specific 3971 * order and places them into the correct sd_tunable member that is 3972 * consistent across all platforms. 3973 */ 3974 static void 3975 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3976 sd_tunables *values) 3977 { 3978 int i; 3979 int mask; 3980 3981 bzero(values, sizeof (sd_tunables)); 3982 3983 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3984 3985 mask = 1 << i; 3986 if (mask > flags) { 3987 break; 3988 } 3989 3990 switch (mask & flags) { 3991 case 0: /* This mask bit not set in flags */ 3992 continue; 3993 case SD_CONF_BSET_THROTTLE: 3994 values->sdt_throttle = data_list[i]; 3995 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3996 "sd_get_tunables_from_conf: throttle = %d\n", 3997 values->sdt_throttle); 3998 break; 3999 case SD_CONF_BSET_CTYPE: 4000 values->sdt_ctype = data_list[i]; 4001 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4002 "sd_get_tunables_from_conf: ctype = %d\n", 4003 values->sdt_ctype); 4004 break; 4005 case SD_CONF_BSET_NRR_COUNT: 4006 values->sdt_not_rdy_retries = data_list[i]; 4007 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4008 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4009 values->sdt_not_rdy_retries); 4010 break; 4011 case SD_CONF_BSET_BSY_RETRY_COUNT: 4012 values->sdt_busy_retries = data_list[i]; 4013 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4014 "sd_get_tunables_from_conf: busy_retries = %d\n", 4015 values->sdt_busy_retries); 4016 break; 4017 case SD_CONF_BSET_RST_RETRIES: 4018 values->sdt_reset_retries = data_list[i]; 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4020 "sd_get_tunables_from_conf: reset_retries = %d\n", 4021 values->sdt_reset_retries); 4022 break; 4023 case SD_CONF_BSET_RSV_REL_TIME: 4024 values->sdt_reserv_rel_time = data_list[i]; 4025 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4026 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4027 values->sdt_reserv_rel_time); 4028 break; 4029 case SD_CONF_BSET_MIN_THROTTLE: 4030 values->sdt_min_throttle = data_list[i]; 4031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4032 "sd_get_tunables_from_conf: min_throttle = %d\n", 4033 values->sdt_min_throttle); 4034 break; 4035 case SD_CONF_BSET_DISKSORT_DISABLED: 4036 values->sdt_disk_sort_dis = data_list[i]; 4037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4038 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4039 values->sdt_disk_sort_dis); 4040 break; 4041 case SD_CONF_BSET_LUN_RESET_ENABLED: 4042 values->sdt_lun_reset_enable = data_list[i]; 4043 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4044 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4045 "\n", values->sdt_lun_reset_enable); 4046 break; 4047 case SD_CONF_BSET_CACHE_IS_NV: 4048 values->sdt_suppress_cache_flush = data_list[i]; 4049 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4050 "sd_get_tunables_from_conf: \ 4051 suppress_cache_flush = %d" 4052 "\n", values->sdt_suppress_cache_flush); 4053 break; 4054 } 4055 } 4056 } 4057 4058 /* 4059 * Function: sd_process_sdconf_table 4060 * 4061 * Description: Search the static configuration table for a match on the 4062 * inquiry vid/pid and update the driver soft state structure 4063 * according to the table property values for the device. 4064 * 4065 * The form of a configuration table entry is: 4066 * <vid+pid>,<flags>,<property-data> 4067 * "SEAGATE ST42400N",1,0x40000, 4068 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4069 * 4070 * Arguments: un - driver soft state (unit) structure 4071 */ 4072 4073 static void 4074 sd_process_sdconf_table(struct sd_lun *un) 4075 { 4076 char *id = NULL; 4077 int table_index; 4078 int idlen; 4079 4080 ASSERT(un != NULL); 4081 for (table_index = 0; table_index < sd_disk_table_size; 4082 table_index++) { 4083 id = sd_disk_table[table_index].device_id; 4084 idlen = strlen(id); 4085 if (idlen == 0) { 4086 continue; 4087 } 4088 4089 /* 4090 * The static configuration table currently does not 4091 * implement version 10 properties. Additionally, 4092 * multiple data-property-name entries are not 4093 * implemented in the static configuration table. 4094 */ 4095 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4096 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4097 "sd_process_sdconf_table: disk %s\n", id); 4098 sd_set_vers1_properties(un, 4099 sd_disk_table[table_index].flags, 4100 sd_disk_table[table_index].properties); 4101 break; 4102 } 4103 } 4104 } 4105 4106 4107 /* 4108 * Function: sd_sdconf_id_match 4109 * 4110 * Description: This local function implements a case sensitive vid/pid 4111 * comparison as well as the boundary cases of wild card and 4112 * multiple blanks. 4113 * 4114 * Note: An implicit assumption made here is that the scsi 4115 * inquiry structure will always keep the vid, pid and 4116 * revision strings in consecutive sequence, so they can be 4117 * read as a single string. If this assumption is not the 4118 * case, a separate string, to be used for the check, needs 4119 * to be built with these strings concatenated. 4120 * 4121 * Arguments: un - driver soft state (unit) structure 4122 * id - table or config file vid/pid 4123 * idlen - length of the vid/pid (bytes) 4124 * 4125 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4126 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4127 */ 4128 4129 static int 4130 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4131 { 4132 struct scsi_inquiry *sd_inq; 4133 int rval = SD_SUCCESS; 4134 4135 ASSERT(un != NULL); 4136 sd_inq = un->un_sd->sd_inq; 4137 ASSERT(id != NULL); 4138 4139 /* 4140 * We use the inq_vid as a pointer to a buffer containing the 4141 * vid and pid and use the entire vid/pid length of the table 4142 * entry for the comparison. This works because the inq_pid 4143 * data member follows inq_vid in the scsi_inquiry structure. 4144 */ 4145 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4146 /* 4147 * The user id string is compared to the inquiry vid/pid 4148 * using a case insensitive comparison and ignoring 4149 * multiple spaces. 4150 */ 4151 rval = sd_blank_cmp(un, id, idlen); 4152 if (rval != SD_SUCCESS) { 4153 /* 4154 * User id strings that start and end with a "*" 4155 * are a special case. These do not have a 4156 * specific vendor, and the product string can 4157 * appear anywhere in the 16 byte PID portion of 4158 * the inquiry data. This is a simple strstr() 4159 * type search for the user id in the inquiry data. 4160 */ 4161 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4162 char *pidptr = &id[1]; 4163 int i; 4164 int j; 4165 int pidstrlen = idlen - 2; 4166 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4167 pidstrlen; 4168 4169 if (j < 0) { 4170 return (SD_FAILURE); 4171 } 4172 for (i = 0; i < j; i++) { 4173 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4174 pidptr, pidstrlen) == 0) { 4175 rval = SD_SUCCESS; 4176 break; 4177 } 4178 } 4179 } 4180 } 4181 } 4182 return (rval); 4183 } 4184 4185 4186 /* 4187 * Function: sd_blank_cmp 4188 * 4189 * Description: If the id string starts and ends with a space, treat 4190 * multiple consecutive spaces as equivalent to a single 4191 * space. For example, this causes a sd_disk_table entry 4192 * of " NEC CDROM " to match a device's id string of 4193 * "NEC CDROM". 4194 * 4195 * Note: The success exit condition for this routine is if 4196 * the pointer to the table entry is '\0' and the cnt of 4197 * the inquiry length is zero. This will happen if the inquiry 4198 * string returned by the device is padded with spaces to be 4199 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4200 * SCSI spec states that the inquiry string is to be padded with 4201 * spaces. 4202 * 4203 * Arguments: un - driver soft state (unit) structure 4204 * id - table or config file vid/pid 4205 * idlen - length of the vid/pid (bytes) 4206 * 4207 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4208 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4209 */ 4210 4211 static int 4212 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4213 { 4214 char *p1; 4215 char *p2; 4216 int cnt; 4217 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4218 sizeof (SD_INQUIRY(un)->inq_pid); 4219 4220 ASSERT(un != NULL); 4221 p2 = un->un_sd->sd_inq->inq_vid; 4222 ASSERT(id != NULL); 4223 p1 = id; 4224 4225 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4226 /* 4227 * Note: string p1 is terminated by a NUL but string p2 4228 * isn't. The end of p2 is determined by cnt. 4229 */ 4230 for (;;) { 4231 /* skip over any extra blanks in both strings */ 4232 while ((*p1 != '\0') && (*p1 == ' ')) { 4233 p1++; 4234 } 4235 while ((cnt != 0) && (*p2 == ' ')) { 4236 p2++; 4237 cnt--; 4238 } 4239 4240 /* compare the two strings */ 4241 if ((cnt == 0) || 4242 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4243 break; 4244 } 4245 while ((cnt > 0) && 4246 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4247 p1++; 4248 p2++; 4249 cnt--; 4250 } 4251 } 4252 } 4253 4254 /* return SD_SUCCESS if both strings match */ 4255 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4256 } 4257 4258 4259 /* 4260 * Function: sd_chk_vers1_data 4261 * 4262 * Description: Verify the version 1 device properties provided by the 4263 * user via the configuration file 4264 * 4265 * Arguments: un - driver soft state (unit) structure 4266 * flags - integer mask indicating properties to be set 4267 * prop_list - integer list of property values 4268 * list_len - number of the elements 4269 * 4270 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4271 * SD_FAILURE - Indicates the user provided data is invalid 4272 */ 4273 4274 static int 4275 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4276 int list_len, char *dataname_ptr) 4277 { 4278 int i; 4279 int mask = 1; 4280 int index = 0; 4281 4282 ASSERT(un != NULL); 4283 4284 /* Check for a NULL property name and list */ 4285 if (dataname_ptr == NULL) { 4286 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4287 "sd_chk_vers1_data: NULL data property name."); 4288 return (SD_FAILURE); 4289 } 4290 if (prop_list == NULL) { 4291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4292 "sd_chk_vers1_data: %s NULL data property list.", 4293 dataname_ptr); 4294 return (SD_FAILURE); 4295 } 4296 4297 /* Display a warning if undefined bits are set in the flags */ 4298 if (flags & ~SD_CONF_BIT_MASK) { 4299 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4300 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4301 "Properties not set.", 4302 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4303 return (SD_FAILURE); 4304 } 4305 4306 /* 4307 * Verify the length of the list by identifying the highest bit set 4308 * in the flags and validating that the property list has a length 4309 * up to the index of this bit. 4310 */ 4311 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4312 if (flags & mask) { 4313 index++; 4314 } 4315 mask = 1 << i; 4316 } 4317 if (list_len < (index + 2)) { 4318 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4319 "sd_chk_vers1_data: " 4320 "Data property list %s size is incorrect. " 4321 "Properties not set.", dataname_ptr); 4322 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4323 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4324 return (SD_FAILURE); 4325 } 4326 return (SD_SUCCESS); 4327 } 4328 4329 4330 /* 4331 * Function: sd_set_vers1_properties 4332 * 4333 * Description: Set version 1 device properties based on a property list 4334 * retrieved from the driver configuration file or static 4335 * configuration table. Version 1 properties have the format: 4336 * 4337 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4338 * 4339 * where the prop0 value will be used to set prop0 if bit0 4340 * is set in the flags 4341 * 4342 * Arguments: un - driver soft state (unit) structure 4343 * flags - integer mask indicating properties to be set 4344 * prop_list - integer list of property values 4345 */ 4346 4347 static void 4348 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4349 { 4350 ASSERT(un != NULL); 4351 4352 /* 4353 * Set the flag to indicate cache is to be disabled. An attempt 4354 * to disable the cache via sd_cache_control() will be made 4355 * later during attach once the basic initialization is complete. 4356 */ 4357 if (flags & SD_CONF_BSET_NOCACHE) { 4358 un->un_f_opt_disable_cache = TRUE; 4359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4360 "sd_set_vers1_properties: caching disabled flag set\n"); 4361 } 4362 4363 /* CD-specific configuration parameters */ 4364 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4365 un->un_f_cfg_playmsf_bcd = TRUE; 4366 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4367 "sd_set_vers1_properties: playmsf_bcd set\n"); 4368 } 4369 if (flags & SD_CONF_BSET_READSUB_BCD) { 4370 un->un_f_cfg_readsub_bcd = TRUE; 4371 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4372 "sd_set_vers1_properties: readsub_bcd set\n"); 4373 } 4374 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4375 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4376 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4377 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4378 } 4379 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4380 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4381 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4382 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4383 } 4384 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4385 un->un_f_cfg_no_read_header = TRUE; 4386 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4387 "sd_set_vers1_properties: no_read_header set\n"); 4388 } 4389 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4390 un->un_f_cfg_read_cd_xd4 = TRUE; 4391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4392 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4393 } 4394 4395 /* Support for devices which do not have valid/unique serial numbers */ 4396 if (flags & SD_CONF_BSET_FAB_DEVID) { 4397 un->un_f_opt_fab_devid = TRUE; 4398 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4399 "sd_set_vers1_properties: fab_devid bit set\n"); 4400 } 4401 4402 /* Support for user throttle configuration */ 4403 if (flags & SD_CONF_BSET_THROTTLE) { 4404 ASSERT(prop_list != NULL); 4405 un->un_saved_throttle = un->un_throttle = 4406 prop_list->sdt_throttle; 4407 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4408 "sd_set_vers1_properties: throttle set to %d\n", 4409 prop_list->sdt_throttle); 4410 } 4411 4412 /* Set the per disk retry count according to the conf file or table. */ 4413 if (flags & SD_CONF_BSET_NRR_COUNT) { 4414 ASSERT(prop_list != NULL); 4415 if (prop_list->sdt_not_rdy_retries) { 4416 un->un_notready_retry_count = 4417 prop_list->sdt_not_rdy_retries; 4418 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4419 "sd_set_vers1_properties: not ready retry count" 4420 " set to %d\n", un->un_notready_retry_count); 4421 } 4422 } 4423 4424 /* The controller type is reported for generic disk driver ioctls */ 4425 if (flags & SD_CONF_BSET_CTYPE) { 4426 ASSERT(prop_list != NULL); 4427 switch (prop_list->sdt_ctype) { 4428 case CTYPE_CDROM: 4429 un->un_ctype = prop_list->sdt_ctype; 4430 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4431 "sd_set_vers1_properties: ctype set to " 4432 "CTYPE_CDROM\n"); 4433 break; 4434 case CTYPE_CCS: 4435 un->un_ctype = prop_list->sdt_ctype; 4436 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4437 "sd_set_vers1_properties: ctype set to " 4438 "CTYPE_CCS\n"); 4439 break; 4440 case CTYPE_ROD: /* RW optical */ 4441 un->un_ctype = prop_list->sdt_ctype; 4442 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4443 "sd_set_vers1_properties: ctype set to " 4444 "CTYPE_ROD\n"); 4445 break; 4446 default: 4447 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4448 "sd_set_vers1_properties: Could not set " 4449 "invalid ctype value (%d)", 4450 prop_list->sdt_ctype); 4451 } 4452 } 4453 4454 /* Purple failover timeout */ 4455 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4456 ASSERT(prop_list != NULL); 4457 un->un_busy_retry_count = 4458 prop_list->sdt_busy_retries; 4459 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4460 "sd_set_vers1_properties: " 4461 "busy retry count set to %d\n", 4462 un->un_busy_retry_count); 4463 } 4464 4465 /* Purple reset retry count */ 4466 if (flags & SD_CONF_BSET_RST_RETRIES) { 4467 ASSERT(prop_list != NULL); 4468 un->un_reset_retry_count = 4469 prop_list->sdt_reset_retries; 4470 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4471 "sd_set_vers1_properties: " 4472 "reset retry count set to %d\n", 4473 un->un_reset_retry_count); 4474 } 4475 4476 /* Purple reservation release timeout */ 4477 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4478 ASSERT(prop_list != NULL); 4479 un->un_reserve_release_time = 4480 prop_list->sdt_reserv_rel_time; 4481 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4482 "sd_set_vers1_properties: " 4483 "reservation release timeout set to %d\n", 4484 un->un_reserve_release_time); 4485 } 4486 4487 /* 4488 * Driver flag telling the driver to verify that no commands are pending 4489 * for a device before issuing a Test Unit Ready. This is a workaround 4490 * for a firmware bug in some Seagate eliteI drives. 4491 */ 4492 if (flags & SD_CONF_BSET_TUR_CHECK) { 4493 un->un_f_cfg_tur_check = TRUE; 4494 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4495 "sd_set_vers1_properties: tur queue check set\n"); 4496 } 4497 4498 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4499 un->un_min_throttle = prop_list->sdt_min_throttle; 4500 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4501 "sd_set_vers1_properties: min throttle set to %d\n", 4502 un->un_min_throttle); 4503 } 4504 4505 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4506 un->un_f_disksort_disabled = 4507 (prop_list->sdt_disk_sort_dis != 0) ? 4508 TRUE : FALSE; 4509 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4510 "sd_set_vers1_properties: disksort disabled " 4511 "flag set to %d\n", 4512 prop_list->sdt_disk_sort_dis); 4513 } 4514 4515 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4516 un->un_f_lun_reset_enabled = 4517 (prop_list->sdt_lun_reset_enable != 0) ? 4518 TRUE : FALSE; 4519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4520 "sd_set_vers1_properties: lun reset enabled " 4521 "flag set to %d\n", 4522 prop_list->sdt_lun_reset_enable); 4523 } 4524 4525 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4526 un->un_f_suppress_cache_flush = 4527 (prop_list->sdt_suppress_cache_flush != 0) ? 4528 TRUE : FALSE; 4529 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4530 "sd_set_vers1_properties: suppress_cache_flush " 4531 "flag set to %d\n", 4532 prop_list->sdt_suppress_cache_flush); 4533 } 4534 4535 /* 4536 * Validate the throttle values. 4537 * If any of the numbers are invalid, set everything to defaults. 4538 */ 4539 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4540 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4541 (un->un_min_throttle > un->un_throttle)) { 4542 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4543 un->un_min_throttle = sd_min_throttle; 4544 } 4545 } 4546 4547 /* 4548 * Function: sd_is_lsi() 4549 * 4550 * Description: Check for lsi devices, step through the static device 4551 * table to match vid/pid. 4552 * 4553 * Args: un - ptr to sd_lun 4554 * 4555 * Notes: When creating new LSI property, need to add the new LSI property 4556 * to this function. 4557 */ 4558 static void 4559 sd_is_lsi(struct sd_lun *un) 4560 { 4561 char *id = NULL; 4562 int table_index; 4563 int idlen; 4564 void *prop; 4565 4566 ASSERT(un != NULL); 4567 for (table_index = 0; table_index < sd_disk_table_size; 4568 table_index++) { 4569 id = sd_disk_table[table_index].device_id; 4570 idlen = strlen(id); 4571 if (idlen == 0) { 4572 continue; 4573 } 4574 4575 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4576 prop = sd_disk_table[table_index].properties; 4577 if (prop == &lsi_properties || 4578 prop == &lsi_oem_properties || 4579 prop == &lsi_properties_scsi || 4580 prop == &symbios_properties) { 4581 un->un_f_cfg_is_lsi = TRUE; 4582 } 4583 break; 4584 } 4585 } 4586 } 4587 4588 /* 4589 * Function: sd_get_physical_geometry 4590 * 4591 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4592 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4593 * target, and use this information to initialize the physical 4594 * geometry cache specified by pgeom_p. 4595 * 4596 * MODE SENSE is an optional command, so failure in this case 4597 * does not necessarily denote an error. We want to use the 4598 * MODE SENSE commands to derive the physical geometry of the 4599 * device, but if either command fails, the logical geometry is 4600 * used as the fallback for disk label geometry in cmlb. 4601 * 4602 * This requires that un->un_blockcount and un->un_tgt_blocksize 4603 * have already been initialized for the current target and 4604 * that the current values be passed as args so that we don't 4605 * end up ever trying to use -1 as a valid value. This could 4606 * happen if either value is reset while we're not holding 4607 * the mutex. 4608 * 4609 * Arguments: un - driver soft state (unit) structure 4610 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4611 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4612 * to use the USCSI "direct" chain and bypass the normal 4613 * command waitq. 4614 * 4615 * Context: Kernel thread only (can sleep). 4616 */ 4617 4618 static int 4619 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4620 diskaddr_t capacity, int lbasize, int path_flag) 4621 { 4622 struct mode_format *page3p; 4623 struct mode_geometry *page4p; 4624 struct mode_header *headerp; 4625 int sector_size; 4626 int nsect; 4627 int nhead; 4628 int ncyl; 4629 int intrlv; 4630 int spc; 4631 diskaddr_t modesense_capacity; 4632 int rpm; 4633 int bd_len; 4634 int mode_header_length; 4635 uchar_t *p3bufp; 4636 uchar_t *p4bufp; 4637 int cdbsize; 4638 int ret = EIO; 4639 sd_ssc_t *ssc; 4640 int status; 4641 4642 ASSERT(un != NULL); 4643 4644 if (lbasize == 0) { 4645 if (ISCD(un)) { 4646 lbasize = 2048; 4647 } else { 4648 lbasize = un->un_sys_blocksize; 4649 } 4650 } 4651 pgeom_p->g_secsize = (unsigned short)lbasize; 4652 4653 /* 4654 * If the unit is a cd/dvd drive MODE SENSE page three 4655 * and MODE SENSE page four are reserved (see SBC spec 4656 * and MMC spec). To prevent soft errors just return 4657 * using the default LBA size. 4658 */ 4659 if (ISCD(un)) 4660 return (ret); 4661 4662 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4663 4664 /* 4665 * Retrieve MODE SENSE page 3 - Format Device Page 4666 */ 4667 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4668 ssc = sd_ssc_init(un); 4669 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4670 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4671 if (status != 0) { 4672 SD_ERROR(SD_LOG_COMMON, un, 4673 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4674 goto page3_exit; 4675 } 4676 4677 /* 4678 * Determine size of Block Descriptors in order to locate the mode 4679 * page data. ATAPI devices return 0, SCSI devices should return 4680 * MODE_BLK_DESC_LENGTH. 4681 */ 4682 headerp = (struct mode_header *)p3bufp; 4683 if (un->un_f_cfg_is_atapi == TRUE) { 4684 struct mode_header_grp2 *mhp = 4685 (struct mode_header_grp2 *)headerp; 4686 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4687 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4688 } else { 4689 mode_header_length = MODE_HEADER_LENGTH; 4690 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4691 } 4692 4693 if (bd_len > MODE_BLK_DESC_LENGTH) { 4694 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4695 "received unexpected bd_len of %d, page3\n", bd_len); 4696 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4697 "sd_get_physical_geometry: received unexpected " 4698 "bd_len of %d, page3", bd_len); 4699 status = EIO; 4700 goto page3_exit; 4701 } 4702 4703 page3p = (struct mode_format *) 4704 ((caddr_t)headerp + mode_header_length + bd_len); 4705 4706 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4707 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4708 "mode sense pg3 code mismatch %d\n", 4709 page3p->mode_page.code); 4710 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4711 "sd_get_physical_geometry: mode sense pg3 code " 4712 "mismatch %d", page3p->mode_page.code); 4713 status = EIO; 4714 goto page3_exit; 4715 } 4716 4717 /* 4718 * Use this physical geometry data only if BOTH MODE SENSE commands 4719 * complete successfully; otherwise, revert to the logical geometry. 4720 * So, we need to save everything in temporary variables. 4721 */ 4722 sector_size = BE_16(page3p->data_bytes_sect); 4723 4724 /* 4725 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4726 */ 4727 if (sector_size == 0) { 4728 sector_size = un->un_sys_blocksize; 4729 } else { 4730 sector_size &= ~(un->un_sys_blocksize - 1); 4731 } 4732 4733 nsect = BE_16(page3p->sect_track); 4734 intrlv = BE_16(page3p->interleave); 4735 4736 SD_INFO(SD_LOG_COMMON, un, 4737 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4738 SD_INFO(SD_LOG_COMMON, un, 4739 " mode page: %d; nsect: %d; sector size: %d;\n", 4740 page3p->mode_page.code, nsect, sector_size); 4741 SD_INFO(SD_LOG_COMMON, un, 4742 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4743 BE_16(page3p->track_skew), 4744 BE_16(page3p->cylinder_skew)); 4745 4746 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4747 4748 /* 4749 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4750 */ 4751 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4752 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4753 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4754 if (status != 0) { 4755 SD_ERROR(SD_LOG_COMMON, un, 4756 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4757 goto page4_exit; 4758 } 4759 4760 /* 4761 * Determine size of Block Descriptors in order to locate the mode 4762 * page data. ATAPI devices return 0, SCSI devices should return 4763 * MODE_BLK_DESC_LENGTH. 4764 */ 4765 headerp = (struct mode_header *)p4bufp; 4766 if (un->un_f_cfg_is_atapi == TRUE) { 4767 struct mode_header_grp2 *mhp = 4768 (struct mode_header_grp2 *)headerp; 4769 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4770 } else { 4771 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4772 } 4773 4774 if (bd_len > MODE_BLK_DESC_LENGTH) { 4775 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4776 "received unexpected bd_len of %d, page4\n", bd_len); 4777 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4778 "sd_get_physical_geometry: received unexpected " 4779 "bd_len of %d, page4", bd_len); 4780 status = EIO; 4781 goto page4_exit; 4782 } 4783 4784 page4p = (struct mode_geometry *) 4785 ((caddr_t)headerp + mode_header_length + bd_len); 4786 4787 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4788 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4789 "mode sense pg4 code mismatch %d\n", 4790 page4p->mode_page.code); 4791 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4792 "sd_get_physical_geometry: mode sense pg4 code " 4793 "mismatch %d", page4p->mode_page.code); 4794 status = EIO; 4795 goto page4_exit; 4796 } 4797 4798 /* 4799 * Stash the data now, after we know that both commands completed. 4800 */ 4801 4802 4803 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4804 spc = nhead * nsect; 4805 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4806 rpm = BE_16(page4p->rpm); 4807 4808 modesense_capacity = spc * ncyl; 4809 4810 SD_INFO(SD_LOG_COMMON, un, 4811 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4812 SD_INFO(SD_LOG_COMMON, un, 4813 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4814 SD_INFO(SD_LOG_COMMON, un, 4815 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4816 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4817 (void *)pgeom_p, capacity); 4818 4819 /* 4820 * Compensate if the drive's geometry is not rectangular, i.e., 4821 * the product of C * H * S returned by MODE SENSE >= that returned 4822 * by read capacity. This is an idiosyncrasy of the original x86 4823 * disk subsystem. 4824 */ 4825 if (modesense_capacity >= capacity) { 4826 SD_INFO(SD_LOG_COMMON, un, 4827 "sd_get_physical_geometry: adjusting acyl; " 4828 "old: %d; new: %d\n", pgeom_p->g_acyl, 4829 (modesense_capacity - capacity + spc - 1) / spc); 4830 if (sector_size != 0) { 4831 /* 1243403: NEC D38x7 drives don't support sec size */ 4832 pgeom_p->g_secsize = (unsigned short)sector_size; 4833 } 4834 pgeom_p->g_nsect = (unsigned short)nsect; 4835 pgeom_p->g_nhead = (unsigned short)nhead; 4836 pgeom_p->g_capacity = capacity; 4837 pgeom_p->g_acyl = 4838 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4839 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4840 } 4841 4842 pgeom_p->g_rpm = (unsigned short)rpm; 4843 pgeom_p->g_intrlv = (unsigned short)intrlv; 4844 ret = 0; 4845 4846 SD_INFO(SD_LOG_COMMON, un, 4847 "sd_get_physical_geometry: mode sense geometry:\n"); 4848 SD_INFO(SD_LOG_COMMON, un, 4849 " nsect: %d; sector size: %d; interlv: %d\n", 4850 nsect, sector_size, intrlv); 4851 SD_INFO(SD_LOG_COMMON, un, 4852 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4853 nhead, ncyl, rpm, modesense_capacity); 4854 SD_INFO(SD_LOG_COMMON, un, 4855 "sd_get_physical_geometry: (cached)\n"); 4856 SD_INFO(SD_LOG_COMMON, un, 4857 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4858 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4859 pgeom_p->g_nhead, pgeom_p->g_nsect); 4860 SD_INFO(SD_LOG_COMMON, un, 4861 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4862 pgeom_p->g_secsize, pgeom_p->g_capacity, 4863 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4864 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4865 4866 page4_exit: 4867 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4868 4869 page3_exit: 4870 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4871 4872 if (status != 0) { 4873 if (status == EIO) { 4874 /* 4875 * Some disks do not support mode sense(6), we 4876 * should ignore this kind of error(sense key is 4877 * 0x5 - illegal request). 4878 */ 4879 uint8_t *sensep; 4880 int senlen; 4881 4882 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4883 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4884 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4885 4886 if (senlen > 0 && 4887 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4888 sd_ssc_assessment(ssc, 4889 SD_FMT_IGNORE_COMPROMISE); 4890 } else { 4891 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4892 } 4893 } else { 4894 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4895 } 4896 } 4897 sd_ssc_fini(ssc); 4898 return (ret); 4899 } 4900 4901 /* 4902 * Function: sd_get_virtual_geometry 4903 * 4904 * Description: Ask the controller to tell us about the target device. 4905 * 4906 * Arguments: un - pointer to softstate 4907 * capacity - disk capacity in #blocks 4908 * lbasize - disk block size in bytes 4909 * 4910 * Context: Kernel thread only 4911 */ 4912 4913 static int 4914 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4915 diskaddr_t capacity, int lbasize) 4916 { 4917 uint_t geombuf; 4918 int spc; 4919 4920 ASSERT(un != NULL); 4921 4922 /* Set sector size, and total number of sectors */ 4923 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4924 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4925 4926 /* Let the HBA tell us its geometry */ 4927 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4928 4929 /* A value of -1 indicates an undefined "geometry" property */ 4930 if (geombuf == (-1)) { 4931 return (EINVAL); 4932 } 4933 4934 /* Initialize the logical geometry cache. */ 4935 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4936 lgeom_p->g_nsect = geombuf & 0xffff; 4937 lgeom_p->g_secsize = un->un_sys_blocksize; 4938 4939 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4940 4941 /* 4942 * Note: The driver originally converted the capacity value from 4943 * target blocks to system blocks. However, the capacity value passed 4944 * to this routine is already in terms of system blocks (this scaling 4945 * is done when the READ CAPACITY command is issued and processed). 4946 * This 'error' may have gone undetected because the usage of g_ncyl 4947 * (which is based upon g_capacity) is very limited within the driver 4948 */ 4949 lgeom_p->g_capacity = capacity; 4950 4951 /* 4952 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4953 * hba may return zero values if the device has been removed. 4954 */ 4955 if (spc == 0) { 4956 lgeom_p->g_ncyl = 0; 4957 } else { 4958 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4959 } 4960 lgeom_p->g_acyl = 0; 4961 4962 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4963 return (0); 4964 4965 } 4966 /* 4967 * Function: sd_update_block_info 4968 * 4969 * Description: Calculate a byte count to sector count bitshift value 4970 * from sector size. 4971 * 4972 * Arguments: un: unit struct. 4973 * lbasize: new target sector size 4974 * capacity: new target capacity, ie. block count 4975 * 4976 * Context: Kernel thread context 4977 */ 4978 4979 static void 4980 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4981 { 4982 if (lbasize != 0) { 4983 un->un_tgt_blocksize = lbasize; 4984 un->un_f_tgt_blocksize_is_valid = TRUE; 4985 } 4986 4987 if (capacity != 0) { 4988 un->un_blockcount = capacity; 4989 un->un_f_blockcount_is_valid = TRUE; 4990 } 4991 } 4992 4993 4994 /* 4995 * Function: sd_register_devid 4996 * 4997 * Description: This routine will obtain the device id information from the 4998 * target, obtain the serial number, and register the device 4999 * id with the ddi framework. 5000 * 5001 * Arguments: devi - the system's dev_info_t for the device. 5002 * un - driver soft state (unit) structure 5003 * reservation_flag - indicates if a reservation conflict 5004 * occurred during attach 5005 * 5006 * Context: Kernel Thread 5007 */ 5008 static void 5009 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5010 { 5011 int rval = 0; 5012 uchar_t *inq80 = NULL; 5013 size_t inq80_len = MAX_INQUIRY_SIZE; 5014 size_t inq80_resid = 0; 5015 uchar_t *inq83 = NULL; 5016 size_t inq83_len = MAX_INQUIRY_SIZE; 5017 size_t inq83_resid = 0; 5018 int dlen, len; 5019 char *sn; 5020 struct sd_lun *un; 5021 5022 ASSERT(ssc != NULL); 5023 un = ssc->ssc_un; 5024 ASSERT(un != NULL); 5025 ASSERT(mutex_owned(SD_MUTEX(un))); 5026 ASSERT((SD_DEVINFO(un)) == devi); 5027 5028 /* 5029 * If transport has already registered a devid for this target 5030 * then that takes precedence over the driver's determination 5031 * of the devid. 5032 */ 5033 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5034 ASSERT(un->un_devid); 5035 return; /* use devid registered by the transport */ 5036 } 5037 5038 /* 5039 * This is the case of antiquated Sun disk drives that have the 5040 * FAB_DEVID property set in the disk_table. These drives 5041 * manage the devid's by storing them in last 2 available sectors 5042 * on the drive and have them fabricated by the ddi layer by calling 5043 * ddi_devid_init and passing the DEVID_FAB flag. 5044 */ 5045 if (un->un_f_opt_fab_devid == TRUE) { 5046 /* 5047 * Depending on EINVAL isn't reliable, since a reserved disk 5048 * may result in invalid geometry, so check to make sure a 5049 * reservation conflict did not occur during attach. 5050 */ 5051 if ((sd_get_devid(ssc) == EINVAL) && 5052 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5053 /* 5054 * The devid is invalid AND there is no reservation 5055 * conflict. Fabricate a new devid. 5056 */ 5057 (void) sd_create_devid(ssc); 5058 } 5059 5060 /* Register the devid if it exists */ 5061 if (un->un_devid != NULL) { 5062 (void) ddi_devid_register(SD_DEVINFO(un), 5063 un->un_devid); 5064 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5065 "sd_register_devid: Devid Fabricated\n"); 5066 } 5067 return; 5068 } 5069 5070 /* 5071 * We check the availability of the World Wide Name (0x83) and Unit 5072 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5073 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5074 * 0x83 is available, that is the best choice. Our next choice is 5075 * 0x80. If neither are available, we munge the devid from the device 5076 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5077 * to fabricate a devid for non-Sun qualified disks. 5078 */ 5079 if (sd_check_vpd_page_support(ssc) == 0) { 5080 /* collect page 80 data if available */ 5081 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5082 5083 mutex_exit(SD_MUTEX(un)); 5084 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5085 5086 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5087 0x01, 0x80, &inq80_resid); 5088 5089 if (rval != 0) { 5090 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5091 kmem_free(inq80, inq80_len); 5092 inq80 = NULL; 5093 inq80_len = 0; 5094 } else if (ddi_prop_exists( 5095 DDI_DEV_T_NONE, SD_DEVINFO(un), 5096 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5097 INQUIRY_SERIAL_NO) == 0) { 5098 /* 5099 * If we don't already have a serial number 5100 * property, do quick verify of data returned 5101 * and define property. 5102 */ 5103 dlen = inq80_len - inq80_resid; 5104 len = (size_t)inq80[3]; 5105 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5106 /* 5107 * Ensure sn termination, skip leading 5108 * blanks, and create property 5109 * 'inquiry-serial-no'. 5110 */ 5111 sn = (char *)&inq80[4]; 5112 sn[len] = 0; 5113 while (*sn && (*sn == ' ')) 5114 sn++; 5115 if (*sn) { 5116 (void) ddi_prop_update_string( 5117 DDI_DEV_T_NONE, 5118 SD_DEVINFO(un), 5119 INQUIRY_SERIAL_NO, sn); 5120 } 5121 } 5122 } 5123 mutex_enter(SD_MUTEX(un)); 5124 } 5125 5126 /* collect page 83 data if available */ 5127 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5128 mutex_exit(SD_MUTEX(un)); 5129 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5130 5131 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5132 0x01, 0x83, &inq83_resid); 5133 5134 if (rval != 0) { 5135 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5136 kmem_free(inq83, inq83_len); 5137 inq83 = NULL; 5138 inq83_len = 0; 5139 } 5140 mutex_enter(SD_MUTEX(un)); 5141 } 5142 } 5143 5144 /* encode best devid possible based on data available */ 5145 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5146 (char *)ddi_driver_name(SD_DEVINFO(un)), 5147 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5148 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5149 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5150 5151 /* devid successfully encoded, register devid */ 5152 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5153 5154 } else { 5155 /* 5156 * Unable to encode a devid based on data available. 5157 * This is not a Sun qualified disk. Older Sun disk 5158 * drives that have the SD_FAB_DEVID property 5159 * set in the disk_table and non Sun qualified 5160 * disks are treated in the same manner. These 5161 * drives manage the devid's by storing them in 5162 * last 2 available sectors on the drive and 5163 * have them fabricated by the ddi layer by 5164 * calling ddi_devid_init and passing the 5165 * DEVID_FAB flag. 5166 * Create a fabricate devid only if there's no 5167 * fabricate devid existed. 5168 */ 5169 if (sd_get_devid(ssc) == EINVAL) { 5170 (void) sd_create_devid(ssc); 5171 } 5172 un->un_f_opt_fab_devid = TRUE; 5173 5174 /* Register the devid if it exists */ 5175 if (un->un_devid != NULL) { 5176 (void) ddi_devid_register(SD_DEVINFO(un), 5177 un->un_devid); 5178 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5179 "sd_register_devid: devid fabricated using " 5180 "ddi framework\n"); 5181 } 5182 } 5183 5184 /* clean up resources */ 5185 if (inq80 != NULL) { 5186 kmem_free(inq80, inq80_len); 5187 } 5188 if (inq83 != NULL) { 5189 kmem_free(inq83, inq83_len); 5190 } 5191 } 5192 5193 5194 5195 /* 5196 * Function: sd_get_devid 5197 * 5198 * Description: This routine will return 0 if a valid device id has been 5199 * obtained from the target and stored in the soft state. If a 5200 * valid device id has not been previously read and stored, a 5201 * read attempt will be made. 5202 * 5203 * Arguments: un - driver soft state (unit) structure 5204 * 5205 * Return Code: 0 if we successfully get the device id 5206 * 5207 * Context: Kernel Thread 5208 */ 5209 5210 static int 5211 sd_get_devid(sd_ssc_t *ssc) 5212 { 5213 struct dk_devid *dkdevid; 5214 ddi_devid_t tmpid; 5215 uint_t *ip; 5216 size_t sz; 5217 diskaddr_t blk; 5218 int status; 5219 int chksum; 5220 int i; 5221 size_t buffer_size; 5222 struct sd_lun *un; 5223 5224 ASSERT(ssc != NULL); 5225 un = ssc->ssc_un; 5226 ASSERT(un != NULL); 5227 ASSERT(mutex_owned(SD_MUTEX(un))); 5228 5229 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5230 un); 5231 5232 if (un->un_devid != NULL) { 5233 return (0); 5234 } 5235 5236 mutex_exit(SD_MUTEX(un)); 5237 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5238 (void *)SD_PATH_DIRECT) != 0) { 5239 mutex_enter(SD_MUTEX(un)); 5240 return (EINVAL); 5241 } 5242 5243 /* 5244 * Read and verify device id, stored in the reserved cylinders at the 5245 * end of the disk. Backup label is on the odd sectors of the last 5246 * track of the last cylinder. Device id will be on track of the next 5247 * to last cylinder. 5248 */ 5249 mutex_enter(SD_MUTEX(un)); 5250 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5251 mutex_exit(SD_MUTEX(un)); 5252 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5253 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5254 SD_PATH_DIRECT); 5255 5256 if (status != 0) { 5257 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5258 goto error; 5259 } 5260 5261 /* Validate the revision */ 5262 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5263 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5264 status = EINVAL; 5265 goto error; 5266 } 5267 5268 /* Calculate the checksum */ 5269 chksum = 0; 5270 ip = (uint_t *)dkdevid; 5271 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5272 i++) { 5273 chksum ^= ip[i]; 5274 } 5275 5276 /* Compare the checksums */ 5277 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5278 status = EINVAL; 5279 goto error; 5280 } 5281 5282 /* Validate the device id */ 5283 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5284 status = EINVAL; 5285 goto error; 5286 } 5287 5288 /* 5289 * Store the device id in the driver soft state 5290 */ 5291 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5292 tmpid = kmem_alloc(sz, KM_SLEEP); 5293 5294 mutex_enter(SD_MUTEX(un)); 5295 5296 un->un_devid = tmpid; 5297 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5298 5299 kmem_free(dkdevid, buffer_size); 5300 5301 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5302 5303 return (status); 5304 error: 5305 mutex_enter(SD_MUTEX(un)); 5306 kmem_free(dkdevid, buffer_size); 5307 return (status); 5308 } 5309 5310 5311 /* 5312 * Function: sd_create_devid 5313 * 5314 * Description: This routine will fabricate the device id and write it 5315 * to the disk. 5316 * 5317 * Arguments: un - driver soft state (unit) structure 5318 * 5319 * Return Code: value of the fabricated device id 5320 * 5321 * Context: Kernel Thread 5322 */ 5323 5324 static ddi_devid_t 5325 sd_create_devid(sd_ssc_t *ssc) 5326 { 5327 struct sd_lun *un; 5328 5329 ASSERT(ssc != NULL); 5330 un = ssc->ssc_un; 5331 ASSERT(un != NULL); 5332 5333 /* Fabricate the devid */ 5334 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5335 == DDI_FAILURE) { 5336 return (NULL); 5337 } 5338 5339 /* Write the devid to disk */ 5340 if (sd_write_deviceid(ssc) != 0) { 5341 ddi_devid_free(un->un_devid); 5342 un->un_devid = NULL; 5343 } 5344 5345 return (un->un_devid); 5346 } 5347 5348 5349 /* 5350 * Function: sd_write_deviceid 5351 * 5352 * Description: This routine will write the device id to the disk 5353 * reserved sector. 5354 * 5355 * Arguments: un - driver soft state (unit) structure 5356 * 5357 * Return Code: EINVAL 5358 * value returned by sd_send_scsi_cmd 5359 * 5360 * Context: Kernel Thread 5361 */ 5362 5363 static int 5364 sd_write_deviceid(sd_ssc_t *ssc) 5365 { 5366 struct dk_devid *dkdevid; 5367 diskaddr_t blk; 5368 uint_t *ip, chksum; 5369 int status; 5370 int i; 5371 struct sd_lun *un; 5372 5373 ASSERT(ssc != NULL); 5374 un = ssc->ssc_un; 5375 ASSERT(un != NULL); 5376 ASSERT(mutex_owned(SD_MUTEX(un))); 5377 5378 mutex_exit(SD_MUTEX(un)); 5379 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5380 (void *)SD_PATH_DIRECT) != 0) { 5381 mutex_enter(SD_MUTEX(un)); 5382 return (-1); 5383 } 5384 5385 5386 /* Allocate the buffer */ 5387 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5388 5389 /* Fill in the revision */ 5390 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5391 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5392 5393 /* Copy in the device id */ 5394 mutex_enter(SD_MUTEX(un)); 5395 bcopy(un->un_devid, &dkdevid->dkd_devid, 5396 ddi_devid_sizeof(un->un_devid)); 5397 mutex_exit(SD_MUTEX(un)); 5398 5399 /* Calculate the checksum */ 5400 chksum = 0; 5401 ip = (uint_t *)dkdevid; 5402 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5403 i++) { 5404 chksum ^= ip[i]; 5405 } 5406 5407 /* Fill-in checksum */ 5408 DKD_FORMCHKSUM(chksum, dkdevid); 5409 5410 /* Write the reserved sector */ 5411 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5412 SD_PATH_DIRECT); 5413 if (status != 0) 5414 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5415 5416 kmem_free(dkdevid, un->un_sys_blocksize); 5417 5418 mutex_enter(SD_MUTEX(un)); 5419 return (status); 5420 } 5421 5422 5423 /* 5424 * Function: sd_check_vpd_page_support 5425 * 5426 * Description: This routine sends an inquiry command with the EVPD bit set and 5427 * a page code of 0x00 to the device. It is used to determine which 5428 * vital product pages are available to find the devid. We are 5429 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5430 * device does not support that command. 5431 * 5432 * Arguments: un - driver soft state (unit) structure 5433 * 5434 * Return Code: 0 - success 5435 * 1 - check condition 5436 * 5437 * Context: This routine can sleep. 5438 */ 5439 5440 static int 5441 sd_check_vpd_page_support(sd_ssc_t *ssc) 5442 { 5443 uchar_t *page_list = NULL; 5444 uchar_t page_length = 0xff; /* Use max possible length */ 5445 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5446 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5447 int rval = 0; 5448 int counter; 5449 struct sd_lun *un; 5450 5451 ASSERT(ssc != NULL); 5452 un = ssc->ssc_un; 5453 ASSERT(un != NULL); 5454 ASSERT(mutex_owned(SD_MUTEX(un))); 5455 5456 mutex_exit(SD_MUTEX(un)); 5457 5458 /* 5459 * We'll set the page length to the maximum to save figuring it out 5460 * with an additional call. 5461 */ 5462 page_list = kmem_zalloc(page_length, KM_SLEEP); 5463 5464 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5465 page_code, NULL); 5466 5467 if (rval != 0) 5468 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5469 5470 mutex_enter(SD_MUTEX(un)); 5471 5472 /* 5473 * Now we must validate that the device accepted the command, as some 5474 * drives do not support it. If the drive does support it, we will 5475 * return 0, and the supported pages will be in un_vpd_page_mask. If 5476 * not, we return -1. 5477 */ 5478 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5479 /* Loop to find one of the 2 pages we need */ 5480 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5481 5482 /* 5483 * Pages are returned in ascending order, and 0x83 is what we 5484 * are hoping for. 5485 */ 5486 while ((page_list[counter] <= 0x86) && 5487 (counter <= (page_list[VPD_PAGE_LENGTH] + 5488 VPD_HEAD_OFFSET))) { 5489 /* 5490 * Add 3 because page_list[3] is the number of 5491 * pages minus 3 5492 */ 5493 5494 switch (page_list[counter]) { 5495 case 0x00: 5496 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5497 break; 5498 case 0x80: 5499 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5500 break; 5501 case 0x81: 5502 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5503 break; 5504 case 0x82: 5505 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5506 break; 5507 case 0x83: 5508 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5509 break; 5510 case 0x86: 5511 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5512 break; 5513 } 5514 counter++; 5515 } 5516 5517 } else { 5518 rval = -1; 5519 5520 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5521 "sd_check_vpd_page_support: This drive does not implement " 5522 "VPD pages.\n"); 5523 } 5524 5525 kmem_free(page_list, page_length); 5526 5527 return (rval); 5528 } 5529 5530 5531 /* 5532 * Function: sd_setup_pm 5533 * 5534 * Description: Initialize Power Management on the device 5535 * 5536 * Context: Kernel Thread 5537 */ 5538 5539 static void 5540 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5541 { 5542 uint_t log_page_size; 5543 uchar_t *log_page_data; 5544 int rval = 0; 5545 struct sd_lun *un; 5546 5547 ASSERT(ssc != NULL); 5548 un = ssc->ssc_un; 5549 ASSERT(un != NULL); 5550 5551 /* 5552 * Since we are called from attach, holding a mutex for 5553 * un is unnecessary. Because some of the routines called 5554 * from here require SD_MUTEX to not be held, assert this 5555 * right up front. 5556 */ 5557 ASSERT(!mutex_owned(SD_MUTEX(un))); 5558 /* 5559 * Since the sd device does not have the 'reg' property, 5560 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5561 * The following code is to tell cpr that this device 5562 * DOES need to be suspended and resumed. 5563 */ 5564 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5565 "pm-hardware-state", "needs-suspend-resume"); 5566 5567 /* 5568 * This complies with the new power management framework 5569 * for certain desktop machines. Create the pm_components 5570 * property as a string array property. 5571 */ 5572 if (un->un_f_pm_supported) { 5573 /* 5574 * not all devices have a motor, try it first. 5575 * some devices may return ILLEGAL REQUEST, some 5576 * will hang 5577 * The following START_STOP_UNIT is used to check if target 5578 * device has a motor. 5579 */ 5580 un->un_f_start_stop_supported = TRUE; 5581 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5582 SD_PATH_DIRECT); 5583 5584 if (rval != 0) { 5585 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5586 un->un_f_start_stop_supported = FALSE; 5587 } 5588 5589 /* 5590 * create pm properties anyways otherwise the parent can't 5591 * go to sleep 5592 */ 5593 (void) sd_create_pm_components(devi, un); 5594 un->un_f_pm_is_enabled = TRUE; 5595 return; 5596 } 5597 5598 if (!un->un_f_log_sense_supported) { 5599 un->un_power_level = SD_SPINDLE_ON; 5600 un->un_f_pm_is_enabled = FALSE; 5601 return; 5602 } 5603 5604 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5605 5606 #ifdef SDDEBUG 5607 if (sd_force_pm_supported) { 5608 /* Force a successful result */ 5609 rval = 1; 5610 } 5611 #endif 5612 5613 /* 5614 * If the start-stop cycle counter log page is not supported 5615 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5616 * then we should not create the pm_components property. 5617 */ 5618 if (rval == -1) { 5619 /* 5620 * Error. 5621 * Reading log sense failed, most likely this is 5622 * an older drive that does not support log sense. 5623 * If this fails auto-pm is not supported. 5624 */ 5625 un->un_power_level = SD_SPINDLE_ON; 5626 un->un_f_pm_is_enabled = FALSE; 5627 5628 } else if (rval == 0) { 5629 /* 5630 * Page not found. 5631 * The start stop cycle counter is implemented as page 5632 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5633 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5634 */ 5635 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5636 /* 5637 * Page found, use this one. 5638 */ 5639 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5640 un->un_f_pm_is_enabled = TRUE; 5641 } else { 5642 /* 5643 * Error or page not found. 5644 * auto-pm is not supported for this device. 5645 */ 5646 un->un_power_level = SD_SPINDLE_ON; 5647 un->un_f_pm_is_enabled = FALSE; 5648 } 5649 } else { 5650 /* 5651 * Page found, use it. 5652 */ 5653 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5654 un->un_f_pm_is_enabled = TRUE; 5655 } 5656 5657 5658 if (un->un_f_pm_is_enabled == TRUE) { 5659 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5660 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5661 5662 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5663 log_page_size, un->un_start_stop_cycle_page, 5664 0x01, 0, SD_PATH_DIRECT); 5665 5666 if (rval != 0) { 5667 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5668 } 5669 5670 #ifdef SDDEBUG 5671 if (sd_force_pm_supported) { 5672 /* Force a successful result */ 5673 rval = 0; 5674 } 5675 #endif 5676 5677 /* 5678 * If the Log sense for Page( Start/stop cycle counter page) 5679 * succeeds, then power management is supported and we can 5680 * enable auto-pm. 5681 */ 5682 if (rval == 0) { 5683 (void) sd_create_pm_components(devi, un); 5684 } else { 5685 un->un_power_level = SD_SPINDLE_ON; 5686 un->un_f_pm_is_enabled = FALSE; 5687 } 5688 5689 kmem_free(log_page_data, log_page_size); 5690 } 5691 } 5692 5693 5694 /* 5695 * Function: sd_create_pm_components 5696 * 5697 * Description: Initialize PM property. 5698 * 5699 * Context: Kernel thread context 5700 */ 5701 5702 static void 5703 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5704 { 5705 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5706 5707 ASSERT(!mutex_owned(SD_MUTEX(un))); 5708 5709 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5710 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5711 /* 5712 * When components are initially created they are idle, 5713 * power up any non-removables. 5714 * Note: the return value of pm_raise_power can't be used 5715 * for determining if PM should be enabled for this device. 5716 * Even if you check the return values and remove this 5717 * property created above, the PM framework will not honor the 5718 * change after the first call to pm_raise_power. Hence, 5719 * removal of that property does not help if pm_raise_power 5720 * fails. In the case of removable media, the start/stop 5721 * will fail if the media is not present. 5722 */ 5723 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5724 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5725 mutex_enter(SD_MUTEX(un)); 5726 un->un_power_level = SD_SPINDLE_ON; 5727 mutex_enter(&un->un_pm_mutex); 5728 /* Set to on and not busy. */ 5729 un->un_pm_count = 0; 5730 } else { 5731 mutex_enter(SD_MUTEX(un)); 5732 un->un_power_level = SD_SPINDLE_OFF; 5733 mutex_enter(&un->un_pm_mutex); 5734 /* Set to off. */ 5735 un->un_pm_count = -1; 5736 } 5737 mutex_exit(&un->un_pm_mutex); 5738 mutex_exit(SD_MUTEX(un)); 5739 } else { 5740 un->un_power_level = SD_SPINDLE_ON; 5741 un->un_f_pm_is_enabled = FALSE; 5742 } 5743 } 5744 5745 5746 /* 5747 * Function: sd_ddi_suspend 5748 * 5749 * Description: Performs system power-down operations. This includes 5750 * setting the drive state to indicate its suspended so 5751 * that no new commands will be accepted. Also, wait for 5752 * all commands that are in transport or queued to a timer 5753 * for retry to complete. All timeout threads are cancelled. 5754 * 5755 * Return Code: DDI_FAILURE or DDI_SUCCESS 5756 * 5757 * Context: Kernel thread context 5758 */ 5759 5760 static int 5761 sd_ddi_suspend(dev_info_t *devi) 5762 { 5763 struct sd_lun *un; 5764 clock_t wait_cmds_complete; 5765 5766 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5767 if (un == NULL) { 5768 return (DDI_FAILURE); 5769 } 5770 5771 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5772 5773 mutex_enter(SD_MUTEX(un)); 5774 5775 /* Return success if the device is already suspended. */ 5776 if (un->un_state == SD_STATE_SUSPENDED) { 5777 mutex_exit(SD_MUTEX(un)); 5778 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5779 "device already suspended, exiting\n"); 5780 return (DDI_SUCCESS); 5781 } 5782 5783 /* Return failure if the device is being used by HA */ 5784 if (un->un_resvd_status & 5785 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5786 mutex_exit(SD_MUTEX(un)); 5787 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5788 "device in use by HA, exiting\n"); 5789 return (DDI_FAILURE); 5790 } 5791 5792 /* 5793 * Return failure if the device is in a resource wait 5794 * or power changing state. 5795 */ 5796 if ((un->un_state == SD_STATE_RWAIT) || 5797 (un->un_state == SD_STATE_PM_CHANGING)) { 5798 mutex_exit(SD_MUTEX(un)); 5799 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5800 "device in resource wait state, exiting\n"); 5801 return (DDI_FAILURE); 5802 } 5803 5804 5805 un->un_save_state = un->un_last_state; 5806 New_state(un, SD_STATE_SUSPENDED); 5807 5808 /* 5809 * Wait for all commands that are in transport or queued to a timer 5810 * for retry to complete. 5811 * 5812 * While waiting, no new commands will be accepted or sent because of 5813 * the new state we set above. 5814 * 5815 * Wait till current operation has completed. If we are in the resource 5816 * wait state (with an intr outstanding) then we need to wait till the 5817 * intr completes and starts the next cmd. We want to wait for 5818 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5819 */ 5820 wait_cmds_complete = ddi_get_lbolt() + 5821 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5822 5823 while (un->un_ncmds_in_transport != 0) { 5824 /* 5825 * Fail if commands do not finish in the specified time. 5826 */ 5827 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5828 wait_cmds_complete) == -1) { 5829 /* 5830 * Undo the state changes made above. Everything 5831 * must go back to it's original value. 5832 */ 5833 Restore_state(un); 5834 un->un_last_state = un->un_save_state; 5835 /* Wake up any threads that might be waiting. */ 5836 cv_broadcast(&un->un_suspend_cv); 5837 mutex_exit(SD_MUTEX(un)); 5838 SD_ERROR(SD_LOG_IO_PM, un, 5839 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5840 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5841 return (DDI_FAILURE); 5842 } 5843 } 5844 5845 /* 5846 * Cancel SCSI watch thread and timeouts, if any are active 5847 */ 5848 5849 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5850 opaque_t temp_token = un->un_swr_token; 5851 mutex_exit(SD_MUTEX(un)); 5852 scsi_watch_suspend(temp_token); 5853 mutex_enter(SD_MUTEX(un)); 5854 } 5855 5856 if (un->un_reset_throttle_timeid != NULL) { 5857 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5858 un->un_reset_throttle_timeid = NULL; 5859 mutex_exit(SD_MUTEX(un)); 5860 (void) untimeout(temp_id); 5861 mutex_enter(SD_MUTEX(un)); 5862 } 5863 5864 if (un->un_dcvb_timeid != NULL) { 5865 timeout_id_t temp_id = un->un_dcvb_timeid; 5866 un->un_dcvb_timeid = NULL; 5867 mutex_exit(SD_MUTEX(un)); 5868 (void) untimeout(temp_id); 5869 mutex_enter(SD_MUTEX(un)); 5870 } 5871 5872 mutex_enter(&un->un_pm_mutex); 5873 if (un->un_pm_timeid != NULL) { 5874 timeout_id_t temp_id = un->un_pm_timeid; 5875 un->un_pm_timeid = NULL; 5876 mutex_exit(&un->un_pm_mutex); 5877 mutex_exit(SD_MUTEX(un)); 5878 (void) untimeout(temp_id); 5879 mutex_enter(SD_MUTEX(un)); 5880 } else { 5881 mutex_exit(&un->un_pm_mutex); 5882 } 5883 5884 if (un->un_retry_timeid != NULL) { 5885 timeout_id_t temp_id = un->un_retry_timeid; 5886 un->un_retry_timeid = NULL; 5887 mutex_exit(SD_MUTEX(un)); 5888 (void) untimeout(temp_id); 5889 mutex_enter(SD_MUTEX(un)); 5890 5891 if (un->un_retry_bp != NULL) { 5892 un->un_retry_bp->av_forw = un->un_waitq_headp; 5893 un->un_waitq_headp = un->un_retry_bp; 5894 if (un->un_waitq_tailp == NULL) { 5895 un->un_waitq_tailp = un->un_retry_bp; 5896 } 5897 un->un_retry_bp = NULL; 5898 un->un_retry_statp = NULL; 5899 } 5900 } 5901 5902 if (un->un_direct_priority_timeid != NULL) { 5903 timeout_id_t temp_id = un->un_direct_priority_timeid; 5904 un->un_direct_priority_timeid = NULL; 5905 mutex_exit(SD_MUTEX(un)); 5906 (void) untimeout(temp_id); 5907 mutex_enter(SD_MUTEX(un)); 5908 } 5909 5910 if (un->un_f_is_fibre == TRUE) { 5911 /* 5912 * Remove callbacks for insert and remove events 5913 */ 5914 if (un->un_insert_event != NULL) { 5915 mutex_exit(SD_MUTEX(un)); 5916 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5917 mutex_enter(SD_MUTEX(un)); 5918 un->un_insert_event = NULL; 5919 } 5920 5921 if (un->un_remove_event != NULL) { 5922 mutex_exit(SD_MUTEX(un)); 5923 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5924 mutex_enter(SD_MUTEX(un)); 5925 un->un_remove_event = NULL; 5926 } 5927 } 5928 5929 mutex_exit(SD_MUTEX(un)); 5930 5931 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5932 5933 return (DDI_SUCCESS); 5934 } 5935 5936 5937 /* 5938 * Function: sd_ddi_pm_suspend 5939 * 5940 * Description: Set the drive state to low power. 5941 * Someone else is required to actually change the drive 5942 * power level. 5943 * 5944 * Arguments: un - driver soft state (unit) structure 5945 * 5946 * Return Code: DDI_FAILURE or DDI_SUCCESS 5947 * 5948 * Context: Kernel thread context 5949 */ 5950 5951 static int 5952 sd_ddi_pm_suspend(struct sd_lun *un) 5953 { 5954 ASSERT(un != NULL); 5955 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5956 5957 ASSERT(!mutex_owned(SD_MUTEX(un))); 5958 mutex_enter(SD_MUTEX(un)); 5959 5960 /* 5961 * Exit if power management is not enabled for this device, or if 5962 * the device is being used by HA. 5963 */ 5964 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5965 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5966 mutex_exit(SD_MUTEX(un)); 5967 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5968 return (DDI_SUCCESS); 5969 } 5970 5971 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5972 un->un_ncmds_in_driver); 5973 5974 /* 5975 * See if the device is not busy, ie.: 5976 * - we have no commands in the driver for this device 5977 * - not waiting for resources 5978 */ 5979 if ((un->un_ncmds_in_driver == 0) && 5980 (un->un_state != SD_STATE_RWAIT)) { 5981 /* 5982 * The device is not busy, so it is OK to go to low power state. 5983 * Indicate low power, but rely on someone else to actually 5984 * change it. 5985 */ 5986 mutex_enter(&un->un_pm_mutex); 5987 un->un_pm_count = -1; 5988 mutex_exit(&un->un_pm_mutex); 5989 un->un_power_level = SD_SPINDLE_OFF; 5990 } 5991 5992 mutex_exit(SD_MUTEX(un)); 5993 5994 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5995 5996 return (DDI_SUCCESS); 5997 } 5998 5999 6000 /* 6001 * Function: sd_ddi_resume 6002 * 6003 * Description: Performs system power-up operations.. 6004 * 6005 * Return Code: DDI_SUCCESS 6006 * DDI_FAILURE 6007 * 6008 * Context: Kernel thread context 6009 */ 6010 6011 static int 6012 sd_ddi_resume(dev_info_t *devi) 6013 { 6014 struct sd_lun *un; 6015 6016 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6017 if (un == NULL) { 6018 return (DDI_FAILURE); 6019 } 6020 6021 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6022 6023 mutex_enter(SD_MUTEX(un)); 6024 Restore_state(un); 6025 6026 /* 6027 * Restore the state which was saved to give the 6028 * the right state in un_last_state 6029 */ 6030 un->un_last_state = un->un_save_state; 6031 /* 6032 * Note: throttle comes back at full. 6033 * Also note: this MUST be done before calling pm_raise_power 6034 * otherwise the system can get hung in biowait. The scenario where 6035 * this'll happen is under cpr suspend. Writing of the system 6036 * state goes through sddump, which writes 0 to un_throttle. If 6037 * writing the system state then fails, example if the partition is 6038 * too small, then cpr attempts a resume. If throttle isn't restored 6039 * from the saved value until after calling pm_raise_power then 6040 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6041 * in biowait. 6042 */ 6043 un->un_throttle = un->un_saved_throttle; 6044 6045 /* 6046 * The chance of failure is very rare as the only command done in power 6047 * entry point is START command when you transition from 0->1 or 6048 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6049 * which suspend was done. Ignore the return value as the resume should 6050 * not be failed. In the case of removable media the media need not be 6051 * inserted and hence there is a chance that raise power will fail with 6052 * media not present. 6053 */ 6054 if (un->un_f_attach_spinup) { 6055 mutex_exit(SD_MUTEX(un)); 6056 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6057 mutex_enter(SD_MUTEX(un)); 6058 } 6059 6060 /* 6061 * Don't broadcast to the suspend cv and therefore possibly 6062 * start I/O until after power has been restored. 6063 */ 6064 cv_broadcast(&un->un_suspend_cv); 6065 cv_broadcast(&un->un_state_cv); 6066 6067 /* restart thread */ 6068 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6069 scsi_watch_resume(un->un_swr_token); 6070 } 6071 6072 #if (defined(__fibre)) 6073 if (un->un_f_is_fibre == TRUE) { 6074 /* 6075 * Add callbacks for insert and remove events 6076 */ 6077 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6078 sd_init_event_callbacks(un); 6079 } 6080 } 6081 #endif 6082 6083 /* 6084 * Transport any pending commands to the target. 6085 * 6086 * If this is a low-activity device commands in queue will have to wait 6087 * until new commands come in, which may take awhile. Also, we 6088 * specifically don't check un_ncmds_in_transport because we know that 6089 * there really are no commands in progress after the unit was 6090 * suspended and we could have reached the throttle level, been 6091 * suspended, and have no new commands coming in for awhile. Highly 6092 * unlikely, but so is the low-activity disk scenario. 6093 */ 6094 ddi_xbuf_dispatch(un->un_xbuf_attr); 6095 6096 sd_start_cmds(un, NULL); 6097 mutex_exit(SD_MUTEX(un)); 6098 6099 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6100 6101 return (DDI_SUCCESS); 6102 } 6103 6104 6105 /* 6106 * Function: sd_ddi_pm_resume 6107 * 6108 * Description: Set the drive state to powered on. 6109 * Someone else is required to actually change the drive 6110 * power level. 6111 * 6112 * Arguments: un - driver soft state (unit) structure 6113 * 6114 * Return Code: DDI_SUCCESS 6115 * 6116 * Context: Kernel thread context 6117 */ 6118 6119 static int 6120 sd_ddi_pm_resume(struct sd_lun *un) 6121 { 6122 ASSERT(un != NULL); 6123 6124 ASSERT(!mutex_owned(SD_MUTEX(un))); 6125 mutex_enter(SD_MUTEX(un)); 6126 un->un_power_level = SD_SPINDLE_ON; 6127 6128 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6129 mutex_enter(&un->un_pm_mutex); 6130 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6131 un->un_pm_count++; 6132 ASSERT(un->un_pm_count == 0); 6133 /* 6134 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6135 * un_suspend_cv is for a system resume, not a power management 6136 * device resume. (4297749) 6137 * cv_broadcast(&un->un_suspend_cv); 6138 */ 6139 } 6140 mutex_exit(&un->un_pm_mutex); 6141 mutex_exit(SD_MUTEX(un)); 6142 6143 return (DDI_SUCCESS); 6144 } 6145 6146 6147 /* 6148 * Function: sd_pm_idletimeout_handler 6149 * 6150 * Description: A timer routine that's active only while a device is busy. 6151 * The purpose is to extend slightly the pm framework's busy 6152 * view of the device to prevent busy/idle thrashing for 6153 * back-to-back commands. Do this by comparing the current time 6154 * to the time at which the last command completed and when the 6155 * difference is greater than sd_pm_idletime, call 6156 * pm_idle_component. In addition to indicating idle to the pm 6157 * framework, update the chain type to again use the internal pm 6158 * layers of the driver. 6159 * 6160 * Arguments: arg - driver soft state (unit) structure 6161 * 6162 * Context: Executes in a timeout(9F) thread context 6163 */ 6164 6165 static void 6166 sd_pm_idletimeout_handler(void *arg) 6167 { 6168 struct sd_lun *un = arg; 6169 6170 time_t now; 6171 6172 mutex_enter(&sd_detach_mutex); 6173 if (un->un_detach_count != 0) { 6174 /* Abort if the instance is detaching */ 6175 mutex_exit(&sd_detach_mutex); 6176 return; 6177 } 6178 mutex_exit(&sd_detach_mutex); 6179 6180 now = ddi_get_time(); 6181 /* 6182 * Grab both mutexes, in the proper order, since we're accessing 6183 * both PM and softstate variables. 6184 */ 6185 mutex_enter(SD_MUTEX(un)); 6186 mutex_enter(&un->un_pm_mutex); 6187 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6188 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6189 /* 6190 * Update the chain types. 6191 * This takes affect on the next new command received. 6192 */ 6193 if (un->un_f_non_devbsize_supported) { 6194 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6195 } else { 6196 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6197 } 6198 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6199 6200 SD_TRACE(SD_LOG_IO_PM, un, 6201 "sd_pm_idletimeout_handler: idling device\n"); 6202 (void) pm_idle_component(SD_DEVINFO(un), 0); 6203 un->un_pm_idle_timeid = NULL; 6204 } else { 6205 un->un_pm_idle_timeid = 6206 timeout(sd_pm_idletimeout_handler, un, 6207 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6208 } 6209 mutex_exit(&un->un_pm_mutex); 6210 mutex_exit(SD_MUTEX(un)); 6211 } 6212 6213 6214 /* 6215 * Function: sd_pm_timeout_handler 6216 * 6217 * Description: Callback to tell framework we are idle. 6218 * 6219 * Context: timeout(9f) thread context. 6220 */ 6221 6222 static void 6223 sd_pm_timeout_handler(void *arg) 6224 { 6225 struct sd_lun *un = arg; 6226 6227 (void) pm_idle_component(SD_DEVINFO(un), 0); 6228 mutex_enter(&un->un_pm_mutex); 6229 un->un_pm_timeid = NULL; 6230 mutex_exit(&un->un_pm_mutex); 6231 } 6232 6233 6234 /* 6235 * Function: sdpower 6236 * 6237 * Description: PM entry point. 6238 * 6239 * Return Code: DDI_SUCCESS 6240 * DDI_FAILURE 6241 * 6242 * Context: Kernel thread context 6243 */ 6244 6245 static int 6246 sdpower(dev_info_t *devi, int component, int level) 6247 { 6248 struct sd_lun *un; 6249 int instance; 6250 int rval = DDI_SUCCESS; 6251 uint_t i, log_page_size, maxcycles, ncycles; 6252 uchar_t *log_page_data; 6253 int log_sense_page; 6254 int medium_present; 6255 time_t intvlp; 6256 dev_t dev; 6257 struct pm_trans_data sd_pm_tran_data; 6258 uchar_t save_state; 6259 int sval; 6260 uchar_t state_before_pm; 6261 int got_semaphore_here; 6262 sd_ssc_t *ssc; 6263 6264 instance = ddi_get_instance(devi); 6265 6266 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6267 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6268 component != 0) { 6269 return (DDI_FAILURE); 6270 } 6271 6272 dev = sd_make_device(SD_DEVINFO(un)); 6273 ssc = sd_ssc_init(un); 6274 6275 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6276 6277 /* 6278 * Must synchronize power down with close. 6279 * Attempt to decrement/acquire the open/close semaphore, 6280 * but do NOT wait on it. If it's not greater than zero, 6281 * ie. it can't be decremented without waiting, then 6282 * someone else, either open or close, already has it 6283 * and the try returns 0. Use that knowledge here to determine 6284 * if it's OK to change the device power level. 6285 * Also, only increment it on exit if it was decremented, ie. gotten, 6286 * here. 6287 */ 6288 got_semaphore_here = sema_tryp(&un->un_semoclose); 6289 6290 mutex_enter(SD_MUTEX(un)); 6291 6292 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6293 un->un_ncmds_in_driver); 6294 6295 /* 6296 * If un_ncmds_in_driver is non-zero it indicates commands are 6297 * already being processed in the driver, or if the semaphore was 6298 * not gotten here it indicates an open or close is being processed. 6299 * At the same time somebody is requesting to go low power which 6300 * can't happen, therefore we need to return failure. 6301 */ 6302 if ((level == SD_SPINDLE_OFF) && 6303 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6304 mutex_exit(SD_MUTEX(un)); 6305 6306 if (got_semaphore_here != 0) { 6307 sema_v(&un->un_semoclose); 6308 } 6309 SD_TRACE(SD_LOG_IO_PM, un, 6310 "sdpower: exit, device has queued cmds.\n"); 6311 6312 goto sdpower_failed; 6313 } 6314 6315 /* 6316 * if it is OFFLINE that means the disk is completely dead 6317 * in our case we have to put the disk in on or off by sending commands 6318 * Of course that will fail anyway so return back here. 6319 * 6320 * Power changes to a device that's OFFLINE or SUSPENDED 6321 * are not allowed. 6322 */ 6323 if ((un->un_state == SD_STATE_OFFLINE) || 6324 (un->un_state == SD_STATE_SUSPENDED)) { 6325 mutex_exit(SD_MUTEX(un)); 6326 6327 if (got_semaphore_here != 0) { 6328 sema_v(&un->un_semoclose); 6329 } 6330 SD_TRACE(SD_LOG_IO_PM, un, 6331 "sdpower: exit, device is off-line.\n"); 6332 6333 goto sdpower_failed; 6334 } 6335 6336 /* 6337 * Change the device's state to indicate it's power level 6338 * is being changed. Do this to prevent a power off in the 6339 * middle of commands, which is especially bad on devices 6340 * that are really powered off instead of just spun down. 6341 */ 6342 state_before_pm = un->un_state; 6343 un->un_state = SD_STATE_PM_CHANGING; 6344 6345 mutex_exit(SD_MUTEX(un)); 6346 6347 /* 6348 * If "pm-capable" property is set to TRUE by HBA drivers, 6349 * bypass the following checking, otherwise, check the log 6350 * sense information for this device 6351 */ 6352 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6353 /* 6354 * Get the log sense information to understand whether the 6355 * the powercycle counts have gone beyond the threshhold. 6356 */ 6357 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6358 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6359 6360 mutex_enter(SD_MUTEX(un)); 6361 log_sense_page = un->un_start_stop_cycle_page; 6362 mutex_exit(SD_MUTEX(un)); 6363 6364 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6365 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6366 6367 if (rval != 0) { 6368 if (rval == EIO) 6369 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6370 else 6371 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6372 } 6373 6374 #ifdef SDDEBUG 6375 if (sd_force_pm_supported) { 6376 /* Force a successful result */ 6377 rval = 0; 6378 } 6379 #endif 6380 if (rval != 0) { 6381 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6382 "Log Sense Failed\n"); 6383 6384 kmem_free(log_page_data, log_page_size); 6385 /* Cannot support power management on those drives */ 6386 6387 if (got_semaphore_here != 0) { 6388 sema_v(&un->un_semoclose); 6389 } 6390 /* 6391 * On exit put the state back to it's original value 6392 * and broadcast to anyone waiting for the power 6393 * change completion. 6394 */ 6395 mutex_enter(SD_MUTEX(un)); 6396 un->un_state = state_before_pm; 6397 cv_broadcast(&un->un_suspend_cv); 6398 mutex_exit(SD_MUTEX(un)); 6399 SD_TRACE(SD_LOG_IO_PM, un, 6400 "sdpower: exit, Log Sense Failed.\n"); 6401 6402 goto sdpower_failed; 6403 } 6404 6405 /* 6406 * From the page data - Convert the essential information to 6407 * pm_trans_data 6408 */ 6409 maxcycles = 6410 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6411 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6412 6413 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6414 6415 ncycles = 6416 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6417 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6418 6419 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6420 6421 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6422 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6423 log_page_data[8+i]; 6424 } 6425 6426 kmem_free(log_page_data, log_page_size); 6427 6428 /* 6429 * Call pm_trans_check routine to get the Ok from 6430 * the global policy 6431 */ 6432 6433 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6434 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6435 6436 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6437 #ifdef SDDEBUG 6438 if (sd_force_pm_supported) { 6439 /* Force a successful result */ 6440 rval = 1; 6441 } 6442 #endif 6443 switch (rval) { 6444 case 0: 6445 /* 6446 * Not Ok to Power cycle or error in parameters passed 6447 * Would have given the advised time to consider power 6448 * cycle. Based on the new intvlp parameter we are 6449 * supposed to pretend we are busy so that pm framework 6450 * will never call our power entry point. Because of 6451 * that install a timeout handler and wait for the 6452 * recommended time to elapse so that power management 6453 * can be effective again. 6454 * 6455 * To effect this behavior, call pm_busy_component to 6456 * indicate to the framework this device is busy. 6457 * By not adjusting un_pm_count the rest of PM in 6458 * the driver will function normally, and independent 6459 * of this but because the framework is told the device 6460 * is busy it won't attempt powering down until it gets 6461 * a matching idle. The timeout handler sends this. 6462 * Note: sd_pm_entry can't be called here to do this 6463 * because sdpower may have been called as a result 6464 * of a call to pm_raise_power from within sd_pm_entry. 6465 * 6466 * If a timeout handler is already active then 6467 * don't install another. 6468 */ 6469 mutex_enter(&un->un_pm_mutex); 6470 if (un->un_pm_timeid == NULL) { 6471 un->un_pm_timeid = 6472 timeout(sd_pm_timeout_handler, 6473 un, intvlp * drv_usectohz(1000000)); 6474 mutex_exit(&un->un_pm_mutex); 6475 (void) pm_busy_component(SD_DEVINFO(un), 0); 6476 } else { 6477 mutex_exit(&un->un_pm_mutex); 6478 } 6479 if (got_semaphore_here != 0) { 6480 sema_v(&un->un_semoclose); 6481 } 6482 /* 6483 * On exit put the state back to it's original value 6484 * and broadcast to anyone waiting for the power 6485 * change completion. 6486 */ 6487 mutex_enter(SD_MUTEX(un)); 6488 un->un_state = state_before_pm; 6489 cv_broadcast(&un->un_suspend_cv); 6490 mutex_exit(SD_MUTEX(un)); 6491 6492 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6493 "trans check Failed, not ok to power cycle.\n"); 6494 6495 goto sdpower_failed; 6496 case -1: 6497 if (got_semaphore_here != 0) { 6498 sema_v(&un->un_semoclose); 6499 } 6500 /* 6501 * On exit put the state back to it's original value 6502 * and broadcast to anyone waiting for the power 6503 * change completion. 6504 */ 6505 mutex_enter(SD_MUTEX(un)); 6506 un->un_state = state_before_pm; 6507 cv_broadcast(&un->un_suspend_cv); 6508 mutex_exit(SD_MUTEX(un)); 6509 SD_TRACE(SD_LOG_IO_PM, un, 6510 "sdpower: exit, trans check command Failed.\n"); 6511 6512 goto sdpower_failed; 6513 } 6514 } 6515 6516 if (level == SD_SPINDLE_OFF) { 6517 /* 6518 * Save the last state... if the STOP FAILS we need it 6519 * for restoring 6520 */ 6521 mutex_enter(SD_MUTEX(un)); 6522 save_state = un->un_last_state; 6523 /* 6524 * There must not be any cmds. getting processed 6525 * in the driver when we get here. Power to the 6526 * device is potentially going off. 6527 */ 6528 ASSERT(un->un_ncmds_in_driver == 0); 6529 mutex_exit(SD_MUTEX(un)); 6530 6531 /* 6532 * For now suspend the device completely before spindle is 6533 * turned off 6534 */ 6535 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6536 if (got_semaphore_here != 0) { 6537 sema_v(&un->un_semoclose); 6538 } 6539 /* 6540 * On exit put the state back to it's original value 6541 * and broadcast to anyone waiting for the power 6542 * change completion. 6543 */ 6544 mutex_enter(SD_MUTEX(un)); 6545 un->un_state = state_before_pm; 6546 cv_broadcast(&un->un_suspend_cv); 6547 mutex_exit(SD_MUTEX(un)); 6548 SD_TRACE(SD_LOG_IO_PM, un, 6549 "sdpower: exit, PM suspend Failed.\n"); 6550 6551 goto sdpower_failed; 6552 } 6553 } 6554 6555 /* 6556 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6557 * close, or strategy. Dump no long uses this routine, it uses it's 6558 * own code so it can be done in polled mode. 6559 */ 6560 6561 medium_present = TRUE; 6562 6563 /* 6564 * When powering up, issue a TUR in case the device is at unit 6565 * attention. Don't do retries. Bypass the PM layer, otherwise 6566 * a deadlock on un_pm_busy_cv will occur. 6567 */ 6568 if (level == SD_SPINDLE_ON) { 6569 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6570 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6571 if (sval != 0) 6572 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6573 } 6574 6575 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6576 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6577 6578 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6579 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6580 SD_PATH_DIRECT); 6581 if (sval != 0) { 6582 if (sval == EIO) 6583 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6584 else 6585 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6586 } 6587 6588 /* Command failed, check for media present. */ 6589 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6590 medium_present = FALSE; 6591 } 6592 6593 /* 6594 * The conditions of interest here are: 6595 * if a spindle off with media present fails, 6596 * then restore the state and return an error. 6597 * else if a spindle on fails, 6598 * then return an error (there's no state to restore). 6599 * In all other cases we setup for the new state 6600 * and return success. 6601 */ 6602 switch (level) { 6603 case SD_SPINDLE_OFF: 6604 if ((medium_present == TRUE) && (sval != 0)) { 6605 /* The stop command from above failed */ 6606 rval = DDI_FAILURE; 6607 /* 6608 * The stop command failed, and we have media 6609 * present. Put the level back by calling the 6610 * sd_pm_resume() and set the state back to 6611 * it's previous value. 6612 */ 6613 (void) sd_ddi_pm_resume(un); 6614 mutex_enter(SD_MUTEX(un)); 6615 un->un_last_state = save_state; 6616 mutex_exit(SD_MUTEX(un)); 6617 break; 6618 } 6619 /* 6620 * The stop command from above succeeded. 6621 */ 6622 if (un->un_f_monitor_media_state) { 6623 /* 6624 * Terminate watch thread in case of removable media 6625 * devices going into low power state. This is as per 6626 * the requirements of pm framework, otherwise commands 6627 * will be generated for the device (through watch 6628 * thread), even when the device is in low power state. 6629 */ 6630 mutex_enter(SD_MUTEX(un)); 6631 un->un_f_watcht_stopped = FALSE; 6632 if (un->un_swr_token != NULL) { 6633 opaque_t temp_token = un->un_swr_token; 6634 un->un_f_watcht_stopped = TRUE; 6635 un->un_swr_token = NULL; 6636 mutex_exit(SD_MUTEX(un)); 6637 (void) scsi_watch_request_terminate(temp_token, 6638 SCSI_WATCH_TERMINATE_ALL_WAIT); 6639 } else { 6640 mutex_exit(SD_MUTEX(un)); 6641 } 6642 } 6643 break; 6644 6645 default: /* The level requested is spindle on... */ 6646 /* 6647 * Legacy behavior: return success on a failed spinup 6648 * if there is no media in the drive. 6649 * Do this by looking at medium_present here. 6650 */ 6651 if ((sval != 0) && medium_present) { 6652 /* The start command from above failed */ 6653 rval = DDI_FAILURE; 6654 break; 6655 } 6656 /* 6657 * The start command from above succeeded 6658 * Resume the devices now that we have 6659 * started the disks 6660 */ 6661 (void) sd_ddi_pm_resume(un); 6662 6663 /* 6664 * Resume the watch thread since it was suspended 6665 * when the device went into low power mode. 6666 */ 6667 if (un->un_f_monitor_media_state) { 6668 mutex_enter(SD_MUTEX(un)); 6669 if (un->un_f_watcht_stopped == TRUE) { 6670 opaque_t temp_token; 6671 6672 un->un_f_watcht_stopped = FALSE; 6673 mutex_exit(SD_MUTEX(un)); 6674 temp_token = scsi_watch_request_submit( 6675 SD_SCSI_DEVP(un), 6676 sd_check_media_time, 6677 SENSE_LENGTH, sd_media_watch_cb, 6678 (caddr_t)dev); 6679 mutex_enter(SD_MUTEX(un)); 6680 un->un_swr_token = temp_token; 6681 } 6682 mutex_exit(SD_MUTEX(un)); 6683 } 6684 } 6685 if (got_semaphore_here != 0) { 6686 sema_v(&un->un_semoclose); 6687 } 6688 /* 6689 * On exit put the state back to it's original value 6690 * and broadcast to anyone waiting for the power 6691 * change completion. 6692 */ 6693 mutex_enter(SD_MUTEX(un)); 6694 un->un_state = state_before_pm; 6695 cv_broadcast(&un->un_suspend_cv); 6696 mutex_exit(SD_MUTEX(un)); 6697 6698 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6699 6700 sd_ssc_fini(ssc); 6701 return (rval); 6702 6703 sdpower_failed: 6704 6705 sd_ssc_fini(ssc); 6706 return (DDI_FAILURE); 6707 } 6708 6709 6710 6711 /* 6712 * Function: sdattach 6713 * 6714 * Description: Driver's attach(9e) entry point function. 6715 * 6716 * Arguments: devi - opaque device info handle 6717 * cmd - attach type 6718 * 6719 * Return Code: DDI_SUCCESS 6720 * DDI_FAILURE 6721 * 6722 * Context: Kernel thread context 6723 */ 6724 6725 static int 6726 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6727 { 6728 switch (cmd) { 6729 case DDI_ATTACH: 6730 return (sd_unit_attach(devi)); 6731 case DDI_RESUME: 6732 return (sd_ddi_resume(devi)); 6733 default: 6734 break; 6735 } 6736 return (DDI_FAILURE); 6737 } 6738 6739 6740 /* 6741 * Function: sddetach 6742 * 6743 * Description: Driver's detach(9E) entry point function. 6744 * 6745 * Arguments: devi - opaque device info handle 6746 * cmd - detach type 6747 * 6748 * Return Code: DDI_SUCCESS 6749 * DDI_FAILURE 6750 * 6751 * Context: Kernel thread context 6752 */ 6753 6754 static int 6755 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6756 { 6757 switch (cmd) { 6758 case DDI_DETACH: 6759 return (sd_unit_detach(devi)); 6760 case DDI_SUSPEND: 6761 return (sd_ddi_suspend(devi)); 6762 default: 6763 break; 6764 } 6765 return (DDI_FAILURE); 6766 } 6767 6768 6769 /* 6770 * Function: sd_sync_with_callback 6771 * 6772 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6773 * state while the callback routine is active. 6774 * 6775 * Arguments: un: softstate structure for the instance 6776 * 6777 * Context: Kernel thread context 6778 */ 6779 6780 static void 6781 sd_sync_with_callback(struct sd_lun *un) 6782 { 6783 ASSERT(un != NULL); 6784 6785 mutex_enter(SD_MUTEX(un)); 6786 6787 ASSERT(un->un_in_callback >= 0); 6788 6789 while (un->un_in_callback > 0) { 6790 mutex_exit(SD_MUTEX(un)); 6791 delay(2); 6792 mutex_enter(SD_MUTEX(un)); 6793 } 6794 6795 mutex_exit(SD_MUTEX(un)); 6796 } 6797 6798 /* 6799 * Function: sd_unit_attach 6800 * 6801 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6802 * the soft state structure for the device and performs 6803 * all necessary structure and device initializations. 6804 * 6805 * Arguments: devi: the system's dev_info_t for the device. 6806 * 6807 * Return Code: DDI_SUCCESS if attach is successful. 6808 * DDI_FAILURE if any part of the attach fails. 6809 * 6810 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6811 * Kernel thread context only. Can sleep. 6812 */ 6813 6814 static int 6815 sd_unit_attach(dev_info_t *devi) 6816 { 6817 struct scsi_device *devp; 6818 struct sd_lun *un; 6819 char *variantp; 6820 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6821 int instance; 6822 int rval; 6823 int wc_enabled; 6824 int tgt; 6825 uint64_t capacity; 6826 uint_t lbasize = 0; 6827 dev_info_t *pdip = ddi_get_parent(devi); 6828 int offbyone = 0; 6829 int geom_label_valid = 0; 6830 sd_ssc_t *ssc; 6831 int status; 6832 struct sd_fm_internal *sfip = NULL; 6833 #if defined(__sparc) 6834 int max_xfer_size; 6835 #endif 6836 6837 /* 6838 * Retrieve the target driver's private data area. This was set 6839 * up by the HBA. 6840 */ 6841 devp = ddi_get_driver_private(devi); 6842 6843 /* 6844 * Retrieve the target ID of the device. 6845 */ 6846 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6847 SCSI_ADDR_PROP_TARGET, -1); 6848 6849 /* 6850 * Since we have no idea what state things were left in by the last 6851 * user of the device, set up some 'default' settings, ie. turn 'em 6852 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6853 * Do this before the scsi_probe, which sends an inquiry. 6854 * This is a fix for bug (4430280). 6855 * Of special importance is wide-xfer. The drive could have been left 6856 * in wide transfer mode by the last driver to communicate with it, 6857 * this includes us. If that's the case, and if the following is not 6858 * setup properly or we don't re-negotiate with the drive prior to 6859 * transferring data to/from the drive, it causes bus parity errors, 6860 * data overruns, and unexpected interrupts. This first occurred when 6861 * the fix for bug (4378686) was made. 6862 */ 6863 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6864 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6865 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6866 6867 /* 6868 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6869 * on a target. Setting it per lun instance actually sets the 6870 * capability of this target, which affects those luns already 6871 * attached on the same target. So during attach, we can only disable 6872 * this capability only when no other lun has been attached on this 6873 * target. By doing this, we assume a target has the same tagged-qing 6874 * capability for every lun. The condition can be removed when HBA 6875 * is changed to support per lun based tagged-qing capability. 6876 */ 6877 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6878 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6879 } 6880 6881 /* 6882 * Use scsi_probe() to issue an INQUIRY command to the device. 6883 * This call will allocate and fill in the scsi_inquiry structure 6884 * and point the sd_inq member of the scsi_device structure to it. 6885 * If the attach succeeds, then this memory will not be de-allocated 6886 * (via scsi_unprobe()) until the instance is detached. 6887 */ 6888 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6889 goto probe_failed; 6890 } 6891 6892 /* 6893 * Check the device type as specified in the inquiry data and 6894 * claim it if it is of a type that we support. 6895 */ 6896 switch (devp->sd_inq->inq_dtype) { 6897 case DTYPE_DIRECT: 6898 break; 6899 case DTYPE_RODIRECT: 6900 break; 6901 case DTYPE_OPTICAL: 6902 break; 6903 case DTYPE_NOTPRESENT: 6904 default: 6905 /* Unsupported device type; fail the attach. */ 6906 goto probe_failed; 6907 } 6908 6909 /* 6910 * Allocate the soft state structure for this unit. 6911 * 6912 * We rely upon this memory being set to all zeroes by 6913 * ddi_soft_state_zalloc(). We assume that any member of the 6914 * soft state structure that is not explicitly initialized by 6915 * this routine will have a value of zero. 6916 */ 6917 instance = ddi_get_instance(devp->sd_dev); 6918 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6919 goto probe_failed; 6920 } 6921 6922 /* 6923 * Retrieve a pointer to the newly-allocated soft state. 6924 * 6925 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6926 * was successful, unless something has gone horribly wrong and the 6927 * ddi's soft state internals are corrupt (in which case it is 6928 * probably better to halt here than just fail the attach....) 6929 */ 6930 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6931 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6932 instance); 6933 /*NOTREACHED*/ 6934 } 6935 6936 /* 6937 * Link the back ptr of the driver soft state to the scsi_device 6938 * struct for this lun. 6939 * Save a pointer to the softstate in the driver-private area of 6940 * the scsi_device struct. 6941 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6942 * we first set un->un_sd below. 6943 */ 6944 un->un_sd = devp; 6945 devp->sd_private = (opaque_t)un; 6946 6947 /* 6948 * The following must be after devp is stored in the soft state struct. 6949 */ 6950 #ifdef SDDEBUG 6951 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6952 "%s_unit_attach: un:0x%p instance:%d\n", 6953 ddi_driver_name(devi), un, instance); 6954 #endif 6955 6956 /* 6957 * Set up the device type and node type (for the minor nodes). 6958 * By default we assume that the device can at least support the 6959 * Common Command Set. Call it a CD-ROM if it reports itself 6960 * as a RODIRECT device. 6961 */ 6962 switch (devp->sd_inq->inq_dtype) { 6963 case DTYPE_RODIRECT: 6964 un->un_node_type = DDI_NT_CD_CHAN; 6965 un->un_ctype = CTYPE_CDROM; 6966 break; 6967 case DTYPE_OPTICAL: 6968 un->un_node_type = DDI_NT_BLOCK_CHAN; 6969 un->un_ctype = CTYPE_ROD; 6970 break; 6971 default: 6972 un->un_node_type = DDI_NT_BLOCK_CHAN; 6973 un->un_ctype = CTYPE_CCS; 6974 break; 6975 } 6976 6977 /* 6978 * Try to read the interconnect type from the HBA. 6979 * 6980 * Note: This driver is currently compiled as two binaries, a parallel 6981 * scsi version (sd) and a fibre channel version (ssd). All functional 6982 * differences are determined at compile time. In the future a single 6983 * binary will be provided and the interconnect type will be used to 6984 * differentiate between fibre and parallel scsi behaviors. At that time 6985 * it will be necessary for all fibre channel HBAs to support this 6986 * property. 6987 * 6988 * set un_f_is_fiber to TRUE ( default fiber ) 6989 */ 6990 un->un_f_is_fibre = TRUE; 6991 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6992 case INTERCONNECT_SSA: 6993 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6995 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6996 break; 6997 case INTERCONNECT_PARALLEL: 6998 un->un_f_is_fibre = FALSE; 6999 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7001 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7002 break; 7003 case INTERCONNECT_SATA: 7004 un->un_f_is_fibre = FALSE; 7005 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7007 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7008 break; 7009 case INTERCONNECT_FIBRE: 7010 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7011 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7012 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7013 break; 7014 case INTERCONNECT_FABRIC: 7015 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7016 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7017 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7018 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7019 break; 7020 default: 7021 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7022 /* 7023 * The HBA does not support the "interconnect-type" property 7024 * (or did not provide a recognized type). 7025 * 7026 * Note: This will be obsoleted when a single fibre channel 7027 * and parallel scsi driver is delivered. In the meantime the 7028 * interconnect type will be set to the platform default.If that 7029 * type is not parallel SCSI, it means that we should be 7030 * assuming "ssd" semantics. However, here this also means that 7031 * the FC HBA is not supporting the "interconnect-type" property 7032 * like we expect it to, so log this occurrence. 7033 */ 7034 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7035 if (!SD_IS_PARALLEL_SCSI(un)) { 7036 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7037 "sd_unit_attach: un:0x%p Assuming " 7038 "INTERCONNECT_FIBRE\n", un); 7039 } else { 7040 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7041 "sd_unit_attach: un:0x%p Assuming " 7042 "INTERCONNECT_PARALLEL\n", un); 7043 un->un_f_is_fibre = FALSE; 7044 } 7045 #else 7046 /* 7047 * Note: This source will be implemented when a single fibre 7048 * channel and parallel scsi driver is delivered. The default 7049 * will be to assume that if a device does not support the 7050 * "interconnect-type" property it is a parallel SCSI HBA and 7051 * we will set the interconnect type for parallel scsi. 7052 */ 7053 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7054 un->un_f_is_fibre = FALSE; 7055 #endif 7056 break; 7057 } 7058 7059 if (un->un_f_is_fibre == TRUE) { 7060 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7061 SCSI_VERSION_3) { 7062 switch (un->un_interconnect_type) { 7063 case SD_INTERCONNECT_FIBRE: 7064 case SD_INTERCONNECT_SSA: 7065 un->un_node_type = DDI_NT_BLOCK_WWN; 7066 break; 7067 default: 7068 break; 7069 } 7070 } 7071 } 7072 7073 /* 7074 * Initialize the Request Sense command for the target 7075 */ 7076 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7077 goto alloc_rqs_failed; 7078 } 7079 7080 /* 7081 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7082 * with separate binary for sd and ssd. 7083 * 7084 * x86 has 1 binary, un_retry_count is set base on connection type. 7085 * The hardcoded values will go away when Sparc uses 1 binary 7086 * for sd and ssd. This hardcoded values need to match 7087 * SD_RETRY_COUNT in sddef.h 7088 * The value used is base on interconnect type. 7089 * fibre = 3, parallel = 5 7090 */ 7091 #if defined(__i386) || defined(__amd64) 7092 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7093 #else 7094 un->un_retry_count = SD_RETRY_COUNT; 7095 #endif 7096 7097 /* 7098 * Set the per disk retry count to the default number of retries 7099 * for disks and CDROMs. This value can be overridden by the 7100 * disk property list or an entry in sd.conf. 7101 */ 7102 un->un_notready_retry_count = 7103 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7104 : DISK_NOT_READY_RETRY_COUNT(un); 7105 7106 /* 7107 * Set the busy retry count to the default value of un_retry_count. 7108 * This can be overridden by entries in sd.conf or the device 7109 * config table. 7110 */ 7111 un->un_busy_retry_count = un->un_retry_count; 7112 7113 /* 7114 * Init the reset threshold for retries. This number determines 7115 * how many retries must be performed before a reset can be issued 7116 * (for certain error conditions). This can be overridden by entries 7117 * in sd.conf or the device config table. 7118 */ 7119 un->un_reset_retry_count = (un->un_retry_count / 2); 7120 7121 /* 7122 * Set the victim_retry_count to the default un_retry_count 7123 */ 7124 un->un_victim_retry_count = (2 * un->un_retry_count); 7125 7126 /* 7127 * Set the reservation release timeout to the default value of 7128 * 5 seconds. This can be overridden by entries in ssd.conf or the 7129 * device config table. 7130 */ 7131 un->un_reserve_release_time = 5; 7132 7133 /* 7134 * Set up the default maximum transfer size. Note that this may 7135 * get updated later in the attach, when setting up default wide 7136 * operations for disks. 7137 */ 7138 #if defined(__i386) || defined(__amd64) 7139 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7140 un->un_partial_dma_supported = 1; 7141 #else 7142 un->un_max_xfer_size = (uint_t)maxphys; 7143 #endif 7144 7145 /* 7146 * Get "allow bus device reset" property (defaults to "enabled" if 7147 * the property was not defined). This is to disable bus resets for 7148 * certain kinds of error recovery. Note: In the future when a run-time 7149 * fibre check is available the soft state flag should default to 7150 * enabled. 7151 */ 7152 if (un->un_f_is_fibre == TRUE) { 7153 un->un_f_allow_bus_device_reset = TRUE; 7154 } else { 7155 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7156 "allow-bus-device-reset", 1) != 0) { 7157 un->un_f_allow_bus_device_reset = TRUE; 7158 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7159 "sd_unit_attach: un:0x%p Bus device reset " 7160 "enabled\n", un); 7161 } else { 7162 un->un_f_allow_bus_device_reset = FALSE; 7163 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7164 "sd_unit_attach: un:0x%p Bus device reset " 7165 "disabled\n", un); 7166 } 7167 } 7168 7169 /* 7170 * Check if this is an ATAPI device. ATAPI devices use Group 1 7171 * Read/Write commands and Group 2 Mode Sense/Select commands. 7172 * 7173 * Note: The "obsolete" way of doing this is to check for the "atapi" 7174 * property. The new "variant" property with a value of "atapi" has been 7175 * introduced so that future 'variants' of standard SCSI behavior (like 7176 * atapi) could be specified by the underlying HBA drivers by supplying 7177 * a new value for the "variant" property, instead of having to define a 7178 * new property. 7179 */ 7180 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7181 un->un_f_cfg_is_atapi = TRUE; 7182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7183 "sd_unit_attach: un:0x%p Atapi device\n", un); 7184 } 7185 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7186 &variantp) == DDI_PROP_SUCCESS) { 7187 if (strcmp(variantp, "atapi") == 0) { 7188 un->un_f_cfg_is_atapi = TRUE; 7189 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7190 "sd_unit_attach: un:0x%p Atapi device\n", un); 7191 } 7192 ddi_prop_free(variantp); 7193 } 7194 7195 un->un_cmd_timeout = SD_IO_TIME; 7196 7197 un->un_busy_timeout = SD_BSY_TIMEOUT; 7198 7199 /* Info on current states, statuses, etc. (Updated frequently) */ 7200 un->un_state = SD_STATE_NORMAL; 7201 un->un_last_state = SD_STATE_NORMAL; 7202 7203 /* Control & status info for command throttling */ 7204 un->un_throttle = sd_max_throttle; 7205 un->un_saved_throttle = sd_max_throttle; 7206 un->un_min_throttle = sd_min_throttle; 7207 7208 if (un->un_f_is_fibre == TRUE) { 7209 un->un_f_use_adaptive_throttle = TRUE; 7210 } else { 7211 un->un_f_use_adaptive_throttle = FALSE; 7212 } 7213 7214 /* Removable media support. */ 7215 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7216 un->un_mediastate = DKIO_NONE; 7217 un->un_specified_mediastate = DKIO_NONE; 7218 7219 /* CVs for suspend/resume (PM or DR) */ 7220 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7221 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7222 7223 /* Power management support. */ 7224 un->un_power_level = SD_SPINDLE_UNINIT; 7225 7226 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7227 un->un_f_wcc_inprog = 0; 7228 7229 /* 7230 * The open/close semaphore is used to serialize threads executing 7231 * in the driver's open & close entry point routines for a given 7232 * instance. 7233 */ 7234 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7235 7236 /* 7237 * The conf file entry and softstate variable is a forceful override, 7238 * meaning a non-zero value must be entered to change the default. 7239 */ 7240 un->un_f_disksort_disabled = FALSE; 7241 7242 /* 7243 * Retrieve the properties from the static driver table or the driver 7244 * configuration file (.conf) for this unit and update the soft state 7245 * for the device as needed for the indicated properties. 7246 * Note: the property configuration needs to occur here as some of the 7247 * following routines may have dependencies on soft state flags set 7248 * as part of the driver property configuration. 7249 */ 7250 sd_read_unit_properties(un); 7251 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7252 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7253 7254 /* 7255 * Only if a device has "hotpluggable" property, it is 7256 * treated as hotpluggable device. Otherwise, it is 7257 * regarded as non-hotpluggable one. 7258 */ 7259 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7260 -1) != -1) { 7261 un->un_f_is_hotpluggable = TRUE; 7262 } 7263 7264 /* 7265 * set unit's attributes(flags) according to "hotpluggable" and 7266 * RMB bit in INQUIRY data. 7267 */ 7268 sd_set_unit_attributes(un, devi); 7269 7270 /* 7271 * By default, we mark the capacity, lbasize, and geometry 7272 * as invalid. Only if we successfully read a valid capacity 7273 * will we update the un_blockcount and un_tgt_blocksize with the 7274 * valid values (the geometry will be validated later). 7275 */ 7276 un->un_f_blockcount_is_valid = FALSE; 7277 un->un_f_tgt_blocksize_is_valid = FALSE; 7278 7279 /* 7280 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7281 * otherwise. 7282 */ 7283 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7284 un->un_blockcount = 0; 7285 7286 /* 7287 * Set up the per-instance info needed to determine the correct 7288 * CDBs and other info for issuing commands to the target. 7289 */ 7290 sd_init_cdb_limits(un); 7291 7292 /* 7293 * Set up the IO chains to use, based upon the target type. 7294 */ 7295 if (un->un_f_non_devbsize_supported) { 7296 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7297 } else { 7298 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7299 } 7300 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7301 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7302 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7303 7304 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7305 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7306 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7307 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7308 7309 7310 if (ISCD(un)) { 7311 un->un_additional_codes = sd_additional_codes; 7312 } else { 7313 un->un_additional_codes = NULL; 7314 } 7315 7316 /* 7317 * Create the kstats here so they can be available for attach-time 7318 * routines that send commands to the unit (either polled or via 7319 * sd_send_scsi_cmd). 7320 * 7321 * Note: This is a critical sequence that needs to be maintained: 7322 * 1) Instantiate the kstats here, before any routines using the 7323 * iopath (i.e. sd_send_scsi_cmd). 7324 * 2) Instantiate and initialize the partition stats 7325 * (sd_set_pstats). 7326 * 3) Initialize the error stats (sd_set_errstats), following 7327 * sd_validate_geometry(),sd_register_devid(), 7328 * and sd_cache_control(). 7329 */ 7330 7331 un->un_stats = kstat_create(sd_label, instance, 7332 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7333 if (un->un_stats != NULL) { 7334 un->un_stats->ks_lock = SD_MUTEX(un); 7335 kstat_install(un->un_stats); 7336 } 7337 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7338 "sd_unit_attach: un:0x%p un_stats created\n", un); 7339 7340 sd_create_errstats(un, instance); 7341 if (un->un_errstats == NULL) { 7342 goto create_errstats_failed; 7343 } 7344 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7345 "sd_unit_attach: un:0x%p errstats created\n", un); 7346 7347 /* 7348 * The following if/else code was relocated here from below as part 7349 * of the fix for bug (4430280). However with the default setup added 7350 * on entry to this routine, it's no longer absolutely necessary for 7351 * this to be before the call to sd_spin_up_unit. 7352 */ 7353 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7354 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7355 (devp->sd_inq->inq_ansi == 5)) && 7356 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7357 7358 /* 7359 * If tagged queueing is supported by the target 7360 * and by the host adapter then we will enable it 7361 */ 7362 un->un_tagflags = 0; 7363 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7364 (un->un_f_arq_enabled == TRUE)) { 7365 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7366 1, 1) == 1) { 7367 un->un_tagflags = FLAG_STAG; 7368 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7369 "sd_unit_attach: un:0x%p tag queueing " 7370 "enabled\n", un); 7371 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7372 "untagged-qing", 0) == 1) { 7373 un->un_f_opt_queueing = TRUE; 7374 un->un_saved_throttle = un->un_throttle = 7375 min(un->un_throttle, 3); 7376 } else { 7377 un->un_f_opt_queueing = FALSE; 7378 un->un_saved_throttle = un->un_throttle = 1; 7379 } 7380 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7381 == 1) && (un->un_f_arq_enabled == TRUE)) { 7382 /* The Host Adapter supports internal queueing. */ 7383 un->un_f_opt_queueing = TRUE; 7384 un->un_saved_throttle = un->un_throttle = 7385 min(un->un_throttle, 3); 7386 } else { 7387 un->un_f_opt_queueing = FALSE; 7388 un->un_saved_throttle = un->un_throttle = 1; 7389 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7390 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7391 } 7392 7393 /* 7394 * Enable large transfers for SATA/SAS drives 7395 */ 7396 if (SD_IS_SERIAL(un)) { 7397 un->un_max_xfer_size = 7398 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7399 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7400 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7401 "sd_unit_attach: un:0x%p max transfer " 7402 "size=0x%x\n", un, un->un_max_xfer_size); 7403 7404 } 7405 7406 /* Setup or tear down default wide operations for disks */ 7407 7408 /* 7409 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7410 * and "ssd_max_xfer_size" to exist simultaneously on the same 7411 * system and be set to different values. In the future this 7412 * code may need to be updated when the ssd module is 7413 * obsoleted and removed from the system. (4299588) 7414 */ 7415 if (SD_IS_PARALLEL_SCSI(un) && 7416 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7417 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7418 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7419 1, 1) == 1) { 7420 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7421 "sd_unit_attach: un:0x%p Wide Transfer " 7422 "enabled\n", un); 7423 } 7424 7425 /* 7426 * If tagged queuing has also been enabled, then 7427 * enable large xfers 7428 */ 7429 if (un->un_saved_throttle == sd_max_throttle) { 7430 un->un_max_xfer_size = 7431 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7432 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7433 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7434 "sd_unit_attach: un:0x%p max transfer " 7435 "size=0x%x\n", un, un->un_max_xfer_size); 7436 } 7437 } else { 7438 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7439 0, 1) == 1) { 7440 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7441 "sd_unit_attach: un:0x%p " 7442 "Wide Transfer disabled\n", un); 7443 } 7444 } 7445 } else { 7446 un->un_tagflags = FLAG_STAG; 7447 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7448 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7449 } 7450 7451 /* 7452 * If this target supports LUN reset, try to enable it. 7453 */ 7454 if (un->un_f_lun_reset_enabled) { 7455 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7456 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7457 "un:0x%p lun_reset capability set\n", un); 7458 } else { 7459 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7460 "un:0x%p lun-reset capability not set\n", un); 7461 } 7462 } 7463 7464 /* 7465 * Adjust the maximum transfer size. This is to fix 7466 * the problem of partial DMA support on SPARC. Some 7467 * HBA driver, like aac, has very small dma_attr_maxxfer 7468 * size, which requires partial DMA support on SPARC. 7469 * In the future the SPARC pci nexus driver may solve 7470 * the problem instead of this fix. 7471 */ 7472 #if defined(__sparc) 7473 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7474 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7475 un->un_max_xfer_size = max_xfer_size; 7476 un->un_partial_dma_supported = 1; 7477 } 7478 #endif 7479 7480 /* 7481 * Set PKT_DMA_PARTIAL flag. 7482 */ 7483 if (un->un_partial_dma_supported == 1) { 7484 un->un_pkt_flags = PKT_DMA_PARTIAL; 7485 } else { 7486 un->un_pkt_flags = 0; 7487 } 7488 7489 /* Initialize sd_ssc_t for internal uscsi commands */ 7490 ssc = sd_ssc_init(un); 7491 scsi_fm_init(devp); 7492 7493 /* 7494 * Allocate memory for SCSI FMA stuffs. 7495 */ 7496 un->un_fm_private = 7497 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7498 sfip = (struct sd_fm_internal *)un->un_fm_private; 7499 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7500 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7501 sfip->fm_ssc.ssc_un = un; 7502 7503 /* 7504 * At this point in the attach, we have enough info in the 7505 * soft state to be able to issue commands to the target. 7506 * 7507 * All command paths used below MUST issue their commands as 7508 * SD_PATH_DIRECT. This is important as intermediate layers 7509 * are not all initialized yet (such as PM). 7510 */ 7511 7512 /* 7513 * Send a TEST UNIT READY command to the device. This should clear 7514 * any outstanding UNIT ATTENTION that may be present. 7515 * 7516 * Note: Don't check for success, just track if there is a reservation, 7517 * this is a throw away command to clear any unit attentions. 7518 * 7519 * Note: This MUST be the first command issued to the target during 7520 * attach to ensure power on UNIT ATTENTIONS are cleared. 7521 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7522 * with attempts at spinning up a device with no media. 7523 */ 7524 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7525 if (status != 0) { 7526 if (status == EACCES) 7527 reservation_flag = SD_TARGET_IS_RESERVED; 7528 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7529 } 7530 7531 /* 7532 * If the device is NOT a removable media device, attempt to spin 7533 * it up (using the START_STOP_UNIT command) and read its capacity 7534 * (using the READ CAPACITY command). Note, however, that either 7535 * of these could fail and in some cases we would continue with 7536 * the attach despite the failure (see below). 7537 */ 7538 if (un->un_f_descr_format_supported) { 7539 7540 switch (sd_spin_up_unit(ssc)) { 7541 case 0: 7542 /* 7543 * Spin-up was successful; now try to read the 7544 * capacity. If successful then save the results 7545 * and mark the capacity & lbasize as valid. 7546 */ 7547 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7548 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7549 7550 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7551 &lbasize, SD_PATH_DIRECT); 7552 7553 switch (status) { 7554 case 0: { 7555 if (capacity > DK_MAX_BLOCKS) { 7556 #ifdef _LP64 7557 if ((capacity + 1) > 7558 SD_GROUP1_MAX_ADDRESS) { 7559 /* 7560 * Enable descriptor format 7561 * sense data so that we can 7562 * get 64 bit sense data 7563 * fields. 7564 */ 7565 sd_enable_descr_sense(ssc); 7566 } 7567 #else 7568 /* 32-bit kernels can't handle this */ 7569 scsi_log(SD_DEVINFO(un), 7570 sd_label, CE_WARN, 7571 "disk has %llu blocks, which " 7572 "is too large for a 32-bit " 7573 "kernel", capacity); 7574 7575 #if defined(__i386) || defined(__amd64) 7576 /* 7577 * 1TB disk was treated as (1T - 512)B 7578 * in the past, so that it might have 7579 * valid VTOC and solaris partitions, 7580 * we have to allow it to continue to 7581 * work. 7582 */ 7583 if (capacity -1 > DK_MAX_BLOCKS) 7584 #endif 7585 goto spinup_failed; 7586 #endif 7587 } 7588 7589 /* 7590 * Here it's not necessary to check the case: 7591 * the capacity of the device is bigger than 7592 * what the max hba cdb can support. Because 7593 * sd_send_scsi_READ_CAPACITY will retrieve 7594 * the capacity by sending USCSI command, which 7595 * is constrained by the max hba cdb. Actually, 7596 * sd_send_scsi_READ_CAPACITY will return 7597 * EINVAL when using bigger cdb than required 7598 * cdb length. Will handle this case in 7599 * "case EINVAL". 7600 */ 7601 7602 /* 7603 * The following relies on 7604 * sd_send_scsi_READ_CAPACITY never 7605 * returning 0 for capacity and/or lbasize. 7606 */ 7607 sd_update_block_info(un, lbasize, capacity); 7608 7609 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7610 "sd_unit_attach: un:0x%p capacity = %ld " 7611 "blocks; lbasize= %ld.\n", un, 7612 un->un_blockcount, un->un_tgt_blocksize); 7613 7614 break; 7615 } 7616 case EINVAL: 7617 /* 7618 * In the case where the max-cdb-length property 7619 * is smaller than the required CDB length for 7620 * a SCSI device, a target driver can fail to 7621 * attach to that device. 7622 */ 7623 scsi_log(SD_DEVINFO(un), 7624 sd_label, CE_WARN, 7625 "disk capacity is too large " 7626 "for current cdb length"); 7627 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7628 7629 goto spinup_failed; 7630 case EACCES: 7631 /* 7632 * Should never get here if the spin-up 7633 * succeeded, but code it in anyway. 7634 * From here, just continue with the attach... 7635 */ 7636 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7637 "sd_unit_attach: un:0x%p " 7638 "sd_send_scsi_READ_CAPACITY " 7639 "returned reservation conflict\n", un); 7640 reservation_flag = SD_TARGET_IS_RESERVED; 7641 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7642 break; 7643 default: 7644 /* 7645 * Likewise, should never get here if the 7646 * spin-up succeeded. Just continue with 7647 * the attach... 7648 */ 7649 if (status == EIO) 7650 sd_ssc_assessment(ssc, 7651 SD_FMT_STATUS_CHECK); 7652 else 7653 sd_ssc_assessment(ssc, 7654 SD_FMT_IGNORE); 7655 break; 7656 } 7657 break; 7658 case EACCES: 7659 /* 7660 * Device is reserved by another host. In this case 7661 * we could not spin it up or read the capacity, but 7662 * we continue with the attach anyway. 7663 */ 7664 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7665 "sd_unit_attach: un:0x%p spin-up reservation " 7666 "conflict.\n", un); 7667 reservation_flag = SD_TARGET_IS_RESERVED; 7668 break; 7669 default: 7670 /* Fail the attach if the spin-up failed. */ 7671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7672 "sd_unit_attach: un:0x%p spin-up failed.", un); 7673 goto spinup_failed; 7674 } 7675 7676 } 7677 7678 /* 7679 * Check to see if this is a MMC drive 7680 */ 7681 if (ISCD(un)) { 7682 sd_set_mmc_caps(ssc); 7683 } 7684 7685 7686 /* 7687 * Add a zero-length attribute to tell the world we support 7688 * kernel ioctls (for layered drivers) 7689 */ 7690 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7691 DDI_KERNEL_IOCTL, NULL, 0); 7692 7693 /* 7694 * Add a boolean property to tell the world we support 7695 * the B_FAILFAST flag (for layered drivers) 7696 */ 7697 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7698 "ddi-failfast-supported", NULL, 0); 7699 7700 /* 7701 * Initialize power management 7702 */ 7703 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7704 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7705 sd_setup_pm(ssc, devi); 7706 if (un->un_f_pm_is_enabled == FALSE) { 7707 /* 7708 * For performance, point to a jump table that does 7709 * not include pm. 7710 * The direct and priority chains don't change with PM. 7711 * 7712 * Note: this is currently done based on individual device 7713 * capabilities. When an interface for determining system 7714 * power enabled state becomes available, or when additional 7715 * layers are added to the command chain, these values will 7716 * have to be re-evaluated for correctness. 7717 */ 7718 if (un->un_f_non_devbsize_supported) { 7719 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7720 } else { 7721 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7722 } 7723 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7724 } 7725 7726 /* 7727 * This property is set to 0 by HA software to avoid retries 7728 * on a reserved disk. (The preferred property name is 7729 * "retry-on-reservation-conflict") (1189689) 7730 * 7731 * Note: The use of a global here can have unintended consequences. A 7732 * per instance variable is preferable to match the capabilities of 7733 * different underlying hba's (4402600) 7734 */ 7735 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7736 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7737 sd_retry_on_reservation_conflict); 7738 if (sd_retry_on_reservation_conflict != 0) { 7739 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7740 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7741 sd_retry_on_reservation_conflict); 7742 } 7743 7744 /* Set up options for QFULL handling. */ 7745 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7746 "qfull-retries", -1)) != -1) { 7747 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7748 rval, 1); 7749 } 7750 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7751 "qfull-retry-interval", -1)) != -1) { 7752 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7753 rval, 1); 7754 } 7755 7756 /* 7757 * This just prints a message that announces the existence of the 7758 * device. The message is always printed in the system logfile, but 7759 * only appears on the console if the system is booted with the 7760 * -v (verbose) argument. 7761 */ 7762 ddi_report_dev(devi); 7763 7764 un->un_mediastate = DKIO_NONE; 7765 7766 cmlb_alloc_handle(&un->un_cmlbhandle); 7767 7768 #if defined(__i386) || defined(__amd64) 7769 /* 7770 * On x86, compensate for off-by-1 legacy error 7771 */ 7772 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7773 (lbasize == un->un_sys_blocksize)) 7774 offbyone = CMLB_OFF_BY_ONE; 7775 #endif 7776 7777 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7778 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7779 un->un_node_type, offbyone, un->un_cmlbhandle, 7780 (void *)SD_PATH_DIRECT) != 0) { 7781 goto cmlb_attach_failed; 7782 } 7783 7784 7785 /* 7786 * Read and validate the device's geometry (ie, disk label) 7787 * A new unformatted drive will not have a valid geometry, but 7788 * the driver needs to successfully attach to this device so 7789 * the drive can be formatted via ioctls. 7790 */ 7791 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7792 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7793 7794 mutex_enter(SD_MUTEX(un)); 7795 7796 /* 7797 * Read and initialize the devid for the unit. 7798 */ 7799 if (un->un_f_devid_supported) { 7800 sd_register_devid(ssc, devi, reservation_flag); 7801 } 7802 mutex_exit(SD_MUTEX(un)); 7803 7804 #if (defined(__fibre)) 7805 /* 7806 * Register callbacks for fibre only. You can't do this solely 7807 * on the basis of the devid_type because this is hba specific. 7808 * We need to query our hba capabilities to find out whether to 7809 * register or not. 7810 */ 7811 if (un->un_f_is_fibre) { 7812 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7813 sd_init_event_callbacks(un); 7814 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7815 "sd_unit_attach: un:0x%p event callbacks inserted", 7816 un); 7817 } 7818 } 7819 #endif 7820 7821 if (un->un_f_opt_disable_cache == TRUE) { 7822 /* 7823 * Disable both read cache and write cache. This is 7824 * the historic behavior of the keywords in the config file. 7825 */ 7826 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7827 0) { 7828 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7829 "sd_unit_attach: un:0x%p Could not disable " 7830 "caching", un); 7831 goto devid_failed; 7832 } 7833 } 7834 7835 /* 7836 * Check the value of the WCE bit now and 7837 * set un_f_write_cache_enabled accordingly. 7838 */ 7839 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7840 mutex_enter(SD_MUTEX(un)); 7841 un->un_f_write_cache_enabled = (wc_enabled != 0); 7842 mutex_exit(SD_MUTEX(un)); 7843 7844 /* 7845 * Check the value of the NV_SUP bit and set 7846 * un_f_suppress_cache_flush accordingly. 7847 */ 7848 sd_get_nv_sup(ssc); 7849 7850 /* 7851 * Find out what type of reservation this disk supports. 7852 */ 7853 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7854 7855 switch (status) { 7856 case 0: 7857 /* 7858 * SCSI-3 reservations are supported. 7859 */ 7860 un->un_reservation_type = SD_SCSI3_RESERVATION; 7861 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7862 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7863 break; 7864 case ENOTSUP: 7865 /* 7866 * The PERSISTENT RESERVE IN command would not be recognized by 7867 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7868 */ 7869 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7870 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7871 un->un_reservation_type = SD_SCSI2_RESERVATION; 7872 7873 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7874 break; 7875 default: 7876 /* 7877 * default to SCSI-3 reservations 7878 */ 7879 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7880 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7881 un->un_reservation_type = SD_SCSI3_RESERVATION; 7882 7883 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7884 break; 7885 } 7886 7887 /* 7888 * Set the pstat and error stat values here, so data obtained during the 7889 * previous attach-time routines is available. 7890 * 7891 * Note: This is a critical sequence that needs to be maintained: 7892 * 1) Instantiate the kstats before any routines using the iopath 7893 * (i.e. sd_send_scsi_cmd). 7894 * 2) Initialize the error stats (sd_set_errstats) and partition 7895 * stats (sd_set_pstats)here, following 7896 * cmlb_validate_geometry(), sd_register_devid(), and 7897 * sd_cache_control(). 7898 */ 7899 7900 if (un->un_f_pkstats_enabled && geom_label_valid) { 7901 sd_set_pstats(un); 7902 SD_TRACE(SD_LOG_IO_PARTITION, un, 7903 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7904 } 7905 7906 sd_set_errstats(un); 7907 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7908 "sd_unit_attach: un:0x%p errstats set\n", un); 7909 7910 7911 /* 7912 * After successfully attaching an instance, we record the information 7913 * of how many luns have been attached on the relative target and 7914 * controller for parallel SCSI. This information is used when sd tries 7915 * to set the tagged queuing capability in HBA. 7916 */ 7917 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7918 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7919 } 7920 7921 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7922 "sd_unit_attach: un:0x%p exit success\n", un); 7923 7924 /* Uninitialize sd_ssc_t pointer */ 7925 sd_ssc_fini(ssc); 7926 7927 return (DDI_SUCCESS); 7928 7929 /* 7930 * An error occurred during the attach; clean up & return failure. 7931 */ 7932 7933 devid_failed: 7934 7935 setup_pm_failed: 7936 ddi_remove_minor_node(devi, NULL); 7937 7938 cmlb_attach_failed: 7939 /* 7940 * Cleanup from the scsi_ifsetcap() calls (437868) 7941 */ 7942 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7943 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7944 7945 /* 7946 * Refer to the comments of setting tagged-qing in the beginning of 7947 * sd_unit_attach. We can only disable tagged queuing when there is 7948 * no lun attached on the target. 7949 */ 7950 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7951 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7952 } 7953 7954 if (un->un_f_is_fibre == FALSE) { 7955 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7956 } 7957 7958 spinup_failed: 7959 7960 /* Uninitialize sd_ssc_t pointer */ 7961 sd_ssc_fini(ssc); 7962 7963 mutex_enter(SD_MUTEX(un)); 7964 7965 /* Deallocate SCSI FMA memory spaces */ 7966 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 7967 7968 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7969 if (un->un_direct_priority_timeid != NULL) { 7970 timeout_id_t temp_id = un->un_direct_priority_timeid; 7971 un->un_direct_priority_timeid = NULL; 7972 mutex_exit(SD_MUTEX(un)); 7973 (void) untimeout(temp_id); 7974 mutex_enter(SD_MUTEX(un)); 7975 } 7976 7977 /* Cancel any pending start/stop timeouts */ 7978 if (un->un_startstop_timeid != NULL) { 7979 timeout_id_t temp_id = un->un_startstop_timeid; 7980 un->un_startstop_timeid = NULL; 7981 mutex_exit(SD_MUTEX(un)); 7982 (void) untimeout(temp_id); 7983 mutex_enter(SD_MUTEX(un)); 7984 } 7985 7986 /* Cancel any pending reset-throttle timeouts */ 7987 if (un->un_reset_throttle_timeid != NULL) { 7988 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7989 un->un_reset_throttle_timeid = NULL; 7990 mutex_exit(SD_MUTEX(un)); 7991 (void) untimeout(temp_id); 7992 mutex_enter(SD_MUTEX(un)); 7993 } 7994 7995 /* Cancel any pending retry timeouts */ 7996 if (un->un_retry_timeid != NULL) { 7997 timeout_id_t temp_id = un->un_retry_timeid; 7998 un->un_retry_timeid = NULL; 7999 mutex_exit(SD_MUTEX(un)); 8000 (void) untimeout(temp_id); 8001 mutex_enter(SD_MUTEX(un)); 8002 } 8003 8004 /* Cancel any pending delayed cv broadcast timeouts */ 8005 if (un->un_dcvb_timeid != NULL) { 8006 timeout_id_t temp_id = un->un_dcvb_timeid; 8007 un->un_dcvb_timeid = NULL; 8008 mutex_exit(SD_MUTEX(un)); 8009 (void) untimeout(temp_id); 8010 mutex_enter(SD_MUTEX(un)); 8011 } 8012 8013 mutex_exit(SD_MUTEX(un)); 8014 8015 /* There should not be any in-progress I/O so ASSERT this check */ 8016 ASSERT(un->un_ncmds_in_transport == 0); 8017 ASSERT(un->un_ncmds_in_driver == 0); 8018 8019 /* Do not free the softstate if the callback routine is active */ 8020 sd_sync_with_callback(un); 8021 8022 /* 8023 * Partition stats apparently are not used with removables. These would 8024 * not have been created during attach, so no need to clean them up... 8025 */ 8026 if (un->un_errstats != NULL) { 8027 kstat_delete(un->un_errstats); 8028 un->un_errstats = NULL; 8029 } 8030 8031 create_errstats_failed: 8032 8033 if (un->un_stats != NULL) { 8034 kstat_delete(un->un_stats); 8035 un->un_stats = NULL; 8036 } 8037 8038 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8039 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8040 8041 ddi_prop_remove_all(devi); 8042 sema_destroy(&un->un_semoclose); 8043 cv_destroy(&un->un_state_cv); 8044 8045 getrbuf_failed: 8046 8047 sd_free_rqs(un); 8048 8049 alloc_rqs_failed: 8050 8051 devp->sd_private = NULL; 8052 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8053 8054 get_softstate_failed: 8055 /* 8056 * Note: the man pages are unclear as to whether or not doing a 8057 * ddi_soft_state_free(sd_state, instance) is the right way to 8058 * clean up after the ddi_soft_state_zalloc() if the subsequent 8059 * ddi_get_soft_state() fails. The implication seems to be 8060 * that the get_soft_state cannot fail if the zalloc succeeds. 8061 */ 8062 ddi_soft_state_free(sd_state, instance); 8063 8064 probe_failed: 8065 scsi_unprobe(devp); 8066 8067 return (DDI_FAILURE); 8068 } 8069 8070 8071 /* 8072 * Function: sd_unit_detach 8073 * 8074 * Description: Performs DDI_DETACH processing for sddetach(). 8075 * 8076 * Return Code: DDI_SUCCESS 8077 * DDI_FAILURE 8078 * 8079 * Context: Kernel thread context 8080 */ 8081 8082 static int 8083 sd_unit_detach(dev_info_t *devi) 8084 { 8085 struct scsi_device *devp; 8086 struct sd_lun *un; 8087 int i; 8088 int tgt; 8089 dev_t dev; 8090 dev_info_t *pdip = ddi_get_parent(devi); 8091 int instance = ddi_get_instance(devi); 8092 8093 mutex_enter(&sd_detach_mutex); 8094 8095 /* 8096 * Fail the detach for any of the following: 8097 * - Unable to get the sd_lun struct for the instance 8098 * - A layered driver has an outstanding open on the instance 8099 * - Another thread is already detaching this instance 8100 * - Another thread is currently performing an open 8101 */ 8102 devp = ddi_get_driver_private(devi); 8103 if ((devp == NULL) || 8104 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8105 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8106 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8107 mutex_exit(&sd_detach_mutex); 8108 return (DDI_FAILURE); 8109 } 8110 8111 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8112 8113 /* 8114 * Mark this instance as currently in a detach, to inhibit any 8115 * opens from a layered driver. 8116 */ 8117 un->un_detach_count++; 8118 mutex_exit(&sd_detach_mutex); 8119 8120 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8121 SCSI_ADDR_PROP_TARGET, -1); 8122 8123 dev = sd_make_device(SD_DEVINFO(un)); 8124 8125 #ifndef lint 8126 _NOTE(COMPETING_THREADS_NOW); 8127 #endif 8128 8129 mutex_enter(SD_MUTEX(un)); 8130 8131 /* 8132 * Fail the detach if there are any outstanding layered 8133 * opens on this device. 8134 */ 8135 for (i = 0; i < NDKMAP; i++) { 8136 if (un->un_ocmap.lyropen[i] != 0) { 8137 goto err_notclosed; 8138 } 8139 } 8140 8141 /* 8142 * Verify there are NO outstanding commands issued to this device. 8143 * ie, un_ncmds_in_transport == 0. 8144 * It's possible to have outstanding commands through the physio 8145 * code path, even though everything's closed. 8146 */ 8147 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8148 (un->un_direct_priority_timeid != NULL) || 8149 (un->un_state == SD_STATE_RWAIT)) { 8150 mutex_exit(SD_MUTEX(un)); 8151 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8152 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8153 goto err_stillbusy; 8154 } 8155 8156 /* 8157 * If we have the device reserved, release the reservation. 8158 */ 8159 if ((un->un_resvd_status & SD_RESERVE) && 8160 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8161 mutex_exit(SD_MUTEX(un)); 8162 /* 8163 * Note: sd_reserve_release sends a command to the device 8164 * via the sd_ioctlcmd() path, and can sleep. 8165 */ 8166 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8167 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8168 "sd_dr_detach: Cannot release reservation \n"); 8169 } 8170 } else { 8171 mutex_exit(SD_MUTEX(un)); 8172 } 8173 8174 /* 8175 * Untimeout any reserve recover, throttle reset, restart unit 8176 * and delayed broadcast timeout threads. Protect the timeout pointer 8177 * from getting nulled by their callback functions. 8178 */ 8179 mutex_enter(SD_MUTEX(un)); 8180 if (un->un_resvd_timeid != NULL) { 8181 timeout_id_t temp_id = un->un_resvd_timeid; 8182 un->un_resvd_timeid = NULL; 8183 mutex_exit(SD_MUTEX(un)); 8184 (void) untimeout(temp_id); 8185 mutex_enter(SD_MUTEX(un)); 8186 } 8187 8188 if (un->un_reset_throttle_timeid != NULL) { 8189 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8190 un->un_reset_throttle_timeid = NULL; 8191 mutex_exit(SD_MUTEX(un)); 8192 (void) untimeout(temp_id); 8193 mutex_enter(SD_MUTEX(un)); 8194 } 8195 8196 if (un->un_startstop_timeid != NULL) { 8197 timeout_id_t temp_id = un->un_startstop_timeid; 8198 un->un_startstop_timeid = NULL; 8199 mutex_exit(SD_MUTEX(un)); 8200 (void) untimeout(temp_id); 8201 mutex_enter(SD_MUTEX(un)); 8202 } 8203 8204 if (un->un_dcvb_timeid != NULL) { 8205 timeout_id_t temp_id = un->un_dcvb_timeid; 8206 un->un_dcvb_timeid = NULL; 8207 mutex_exit(SD_MUTEX(un)); 8208 (void) untimeout(temp_id); 8209 } else { 8210 mutex_exit(SD_MUTEX(un)); 8211 } 8212 8213 /* Remove any pending reservation reclaim requests for this device */ 8214 sd_rmv_resv_reclaim_req(dev); 8215 8216 mutex_enter(SD_MUTEX(un)); 8217 8218 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8219 if (un->un_direct_priority_timeid != NULL) { 8220 timeout_id_t temp_id = un->un_direct_priority_timeid; 8221 un->un_direct_priority_timeid = NULL; 8222 mutex_exit(SD_MUTEX(un)); 8223 (void) untimeout(temp_id); 8224 mutex_enter(SD_MUTEX(un)); 8225 } 8226 8227 /* Cancel any active multi-host disk watch thread requests */ 8228 if (un->un_mhd_token != NULL) { 8229 mutex_exit(SD_MUTEX(un)); 8230 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8231 if (scsi_watch_request_terminate(un->un_mhd_token, 8232 SCSI_WATCH_TERMINATE_NOWAIT)) { 8233 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8234 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8235 /* 8236 * Note: We are returning here after having removed 8237 * some driver timeouts above. This is consistent with 8238 * the legacy implementation but perhaps the watch 8239 * terminate call should be made with the wait flag set. 8240 */ 8241 goto err_stillbusy; 8242 } 8243 mutex_enter(SD_MUTEX(un)); 8244 un->un_mhd_token = NULL; 8245 } 8246 8247 if (un->un_swr_token != NULL) { 8248 mutex_exit(SD_MUTEX(un)); 8249 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8250 if (scsi_watch_request_terminate(un->un_swr_token, 8251 SCSI_WATCH_TERMINATE_NOWAIT)) { 8252 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8253 "sd_dr_detach: Cannot cancel swr watch request\n"); 8254 /* 8255 * Note: We are returning here after having removed 8256 * some driver timeouts above. This is consistent with 8257 * the legacy implementation but perhaps the watch 8258 * terminate call should be made with the wait flag set. 8259 */ 8260 goto err_stillbusy; 8261 } 8262 mutex_enter(SD_MUTEX(un)); 8263 un->un_swr_token = NULL; 8264 } 8265 8266 mutex_exit(SD_MUTEX(un)); 8267 8268 /* 8269 * Clear any scsi_reset_notifies. We clear the reset notifies 8270 * if we have not registered one. 8271 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8272 */ 8273 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8274 sd_mhd_reset_notify_cb, (caddr_t)un); 8275 8276 /* 8277 * protect the timeout pointers from getting nulled by 8278 * their callback functions during the cancellation process. 8279 * In such a scenario untimeout can be invoked with a null value. 8280 */ 8281 _NOTE(NO_COMPETING_THREADS_NOW); 8282 8283 mutex_enter(&un->un_pm_mutex); 8284 if (un->un_pm_idle_timeid != NULL) { 8285 timeout_id_t temp_id = un->un_pm_idle_timeid; 8286 un->un_pm_idle_timeid = NULL; 8287 mutex_exit(&un->un_pm_mutex); 8288 8289 /* 8290 * Timeout is active; cancel it. 8291 * Note that it'll never be active on a device 8292 * that does not support PM therefore we don't 8293 * have to check before calling pm_idle_component. 8294 */ 8295 (void) untimeout(temp_id); 8296 (void) pm_idle_component(SD_DEVINFO(un), 0); 8297 mutex_enter(&un->un_pm_mutex); 8298 } 8299 8300 /* 8301 * Check whether there is already a timeout scheduled for power 8302 * management. If yes then don't lower the power here, that's. 8303 * the timeout handler's job. 8304 */ 8305 if (un->un_pm_timeid != NULL) { 8306 timeout_id_t temp_id = un->un_pm_timeid; 8307 un->un_pm_timeid = NULL; 8308 mutex_exit(&un->un_pm_mutex); 8309 /* 8310 * Timeout is active; cancel it. 8311 * Note that it'll never be active on a device 8312 * that does not support PM therefore we don't 8313 * have to check before calling pm_idle_component. 8314 */ 8315 (void) untimeout(temp_id); 8316 (void) pm_idle_component(SD_DEVINFO(un), 0); 8317 8318 } else { 8319 mutex_exit(&un->un_pm_mutex); 8320 if ((un->un_f_pm_is_enabled == TRUE) && 8321 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8322 DDI_SUCCESS)) { 8323 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8324 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8325 /* 8326 * Fix for bug: 4297749, item # 13 8327 * The above test now includes a check to see if PM is 8328 * supported by this device before call 8329 * pm_lower_power(). 8330 * Note, the following is not dead code. The call to 8331 * pm_lower_power above will generate a call back into 8332 * our sdpower routine which might result in a timeout 8333 * handler getting activated. Therefore the following 8334 * code is valid and necessary. 8335 */ 8336 mutex_enter(&un->un_pm_mutex); 8337 if (un->un_pm_timeid != NULL) { 8338 timeout_id_t temp_id = un->un_pm_timeid; 8339 un->un_pm_timeid = NULL; 8340 mutex_exit(&un->un_pm_mutex); 8341 (void) untimeout(temp_id); 8342 (void) pm_idle_component(SD_DEVINFO(un), 0); 8343 } else { 8344 mutex_exit(&un->un_pm_mutex); 8345 } 8346 } 8347 } 8348 8349 /* 8350 * Cleanup from the scsi_ifsetcap() calls (437868) 8351 * Relocated here from above to be after the call to 8352 * pm_lower_power, which was getting errors. 8353 */ 8354 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8355 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8356 8357 /* 8358 * Currently, tagged queuing is supported per target based by HBA. 8359 * Setting this per lun instance actually sets the capability of this 8360 * target in HBA, which affects those luns already attached on the 8361 * same target. So during detach, we can only disable this capability 8362 * only when this is the only lun left on this target. By doing 8363 * this, we assume a target has the same tagged queuing capability 8364 * for every lun. The condition can be removed when HBA is changed to 8365 * support per lun based tagged queuing capability. 8366 */ 8367 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8368 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8369 } 8370 8371 if (un->un_f_is_fibre == FALSE) { 8372 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8373 } 8374 8375 /* 8376 * Remove any event callbacks, fibre only 8377 */ 8378 if (un->un_f_is_fibre == TRUE) { 8379 if ((un->un_insert_event != NULL) && 8380 (ddi_remove_event_handler(un->un_insert_cb_id) != 8381 DDI_SUCCESS)) { 8382 /* 8383 * Note: We are returning here after having done 8384 * substantial cleanup above. This is consistent 8385 * with the legacy implementation but this may not 8386 * be the right thing to do. 8387 */ 8388 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8389 "sd_dr_detach: Cannot cancel insert event\n"); 8390 goto err_remove_event; 8391 } 8392 un->un_insert_event = NULL; 8393 8394 if ((un->un_remove_event != NULL) && 8395 (ddi_remove_event_handler(un->un_remove_cb_id) != 8396 DDI_SUCCESS)) { 8397 /* 8398 * Note: We are returning here after having done 8399 * substantial cleanup above. This is consistent 8400 * with the legacy implementation but this may not 8401 * be the right thing to do. 8402 */ 8403 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8404 "sd_dr_detach: Cannot cancel remove event\n"); 8405 goto err_remove_event; 8406 } 8407 un->un_remove_event = NULL; 8408 } 8409 8410 /* Do not free the softstate if the callback routine is active */ 8411 sd_sync_with_callback(un); 8412 8413 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8414 cmlb_free_handle(&un->un_cmlbhandle); 8415 8416 /* 8417 * Hold the detach mutex here, to make sure that no other threads ever 8418 * can access a (partially) freed soft state structure. 8419 */ 8420 mutex_enter(&sd_detach_mutex); 8421 8422 /* 8423 * Clean up the soft state struct. 8424 * Cleanup is done in reverse order of allocs/inits. 8425 * At this point there should be no competing threads anymore. 8426 */ 8427 8428 scsi_fm_fini(devp); 8429 8430 /* 8431 * Deallocate memory for SCSI FMA. 8432 */ 8433 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8434 8435 /* Unregister and free device id. */ 8436 ddi_devid_unregister(devi); 8437 if (un->un_devid) { 8438 ddi_devid_free(un->un_devid); 8439 un->un_devid = NULL; 8440 } 8441 8442 /* 8443 * Destroy wmap cache if it exists. 8444 */ 8445 if (un->un_wm_cache != NULL) { 8446 kmem_cache_destroy(un->un_wm_cache); 8447 un->un_wm_cache = NULL; 8448 } 8449 8450 /* 8451 * kstat cleanup is done in detach for all device types (4363169). 8452 * We do not want to fail detach if the device kstats are not deleted 8453 * since there is a confusion about the devo_refcnt for the device. 8454 * We just delete the kstats and let detach complete successfully. 8455 */ 8456 if (un->un_stats != NULL) { 8457 kstat_delete(un->un_stats); 8458 un->un_stats = NULL; 8459 } 8460 if (un->un_errstats != NULL) { 8461 kstat_delete(un->un_errstats); 8462 un->un_errstats = NULL; 8463 } 8464 8465 /* Remove partition stats */ 8466 if (un->un_f_pkstats_enabled) { 8467 for (i = 0; i < NSDMAP; i++) { 8468 if (un->un_pstats[i] != NULL) { 8469 kstat_delete(un->un_pstats[i]); 8470 un->un_pstats[i] = NULL; 8471 } 8472 } 8473 } 8474 8475 /* Remove xbuf registration */ 8476 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8477 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8478 8479 /* Remove driver properties */ 8480 ddi_prop_remove_all(devi); 8481 8482 mutex_destroy(&un->un_pm_mutex); 8483 cv_destroy(&un->un_pm_busy_cv); 8484 8485 cv_destroy(&un->un_wcc_cv); 8486 8487 /* Open/close semaphore */ 8488 sema_destroy(&un->un_semoclose); 8489 8490 /* Removable media condvar. */ 8491 cv_destroy(&un->un_state_cv); 8492 8493 /* Suspend/resume condvar. */ 8494 cv_destroy(&un->un_suspend_cv); 8495 cv_destroy(&un->un_disk_busy_cv); 8496 8497 sd_free_rqs(un); 8498 8499 /* Free up soft state */ 8500 devp->sd_private = NULL; 8501 8502 bzero(un, sizeof (struct sd_lun)); 8503 ddi_soft_state_free(sd_state, instance); 8504 8505 mutex_exit(&sd_detach_mutex); 8506 8507 /* This frees up the INQUIRY data associated with the device. */ 8508 scsi_unprobe(devp); 8509 8510 /* 8511 * After successfully detaching an instance, we update the information 8512 * of how many luns have been attached in the relative target and 8513 * controller for parallel SCSI. This information is used when sd tries 8514 * to set the tagged queuing capability in HBA. 8515 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8516 * check if the device is parallel SCSI. However, we don't need to 8517 * check here because we've already checked during attach. No device 8518 * that is not parallel SCSI is in the chain. 8519 */ 8520 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8521 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8522 } 8523 8524 return (DDI_SUCCESS); 8525 8526 err_notclosed: 8527 mutex_exit(SD_MUTEX(un)); 8528 8529 err_stillbusy: 8530 _NOTE(NO_COMPETING_THREADS_NOW); 8531 8532 err_remove_event: 8533 mutex_enter(&sd_detach_mutex); 8534 un->un_detach_count--; 8535 mutex_exit(&sd_detach_mutex); 8536 8537 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8538 return (DDI_FAILURE); 8539 } 8540 8541 8542 /* 8543 * Function: sd_create_errstats 8544 * 8545 * Description: This routine instantiates the device error stats. 8546 * 8547 * Note: During attach the stats are instantiated first so they are 8548 * available for attach-time routines that utilize the driver 8549 * iopath to send commands to the device. The stats are initialized 8550 * separately so data obtained during some attach-time routines is 8551 * available. (4362483) 8552 * 8553 * Arguments: un - driver soft state (unit) structure 8554 * instance - driver instance 8555 * 8556 * Context: Kernel thread context 8557 */ 8558 8559 static void 8560 sd_create_errstats(struct sd_lun *un, int instance) 8561 { 8562 struct sd_errstats *stp; 8563 char kstatmodule_err[KSTAT_STRLEN]; 8564 char kstatname[KSTAT_STRLEN]; 8565 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8566 8567 ASSERT(un != NULL); 8568 8569 if (un->un_errstats != NULL) { 8570 return; 8571 } 8572 8573 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8574 "%serr", sd_label); 8575 (void) snprintf(kstatname, sizeof (kstatname), 8576 "%s%d,err", sd_label, instance); 8577 8578 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8579 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8580 8581 if (un->un_errstats == NULL) { 8582 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8583 "sd_create_errstats: Failed kstat_create\n"); 8584 return; 8585 } 8586 8587 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8588 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8589 KSTAT_DATA_UINT32); 8590 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8591 KSTAT_DATA_UINT32); 8592 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8593 KSTAT_DATA_UINT32); 8594 kstat_named_init(&stp->sd_vid, "Vendor", 8595 KSTAT_DATA_CHAR); 8596 kstat_named_init(&stp->sd_pid, "Product", 8597 KSTAT_DATA_CHAR); 8598 kstat_named_init(&stp->sd_revision, "Revision", 8599 KSTAT_DATA_CHAR); 8600 kstat_named_init(&stp->sd_serial, "Serial No", 8601 KSTAT_DATA_CHAR); 8602 kstat_named_init(&stp->sd_capacity, "Size", 8603 KSTAT_DATA_ULONGLONG); 8604 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8605 KSTAT_DATA_UINT32); 8606 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8607 KSTAT_DATA_UINT32); 8608 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8609 KSTAT_DATA_UINT32); 8610 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8611 KSTAT_DATA_UINT32); 8612 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8613 KSTAT_DATA_UINT32); 8614 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8615 KSTAT_DATA_UINT32); 8616 8617 un->un_errstats->ks_private = un; 8618 un->un_errstats->ks_update = nulldev; 8619 8620 kstat_install(un->un_errstats); 8621 } 8622 8623 8624 /* 8625 * Function: sd_set_errstats 8626 * 8627 * Description: This routine sets the value of the vendor id, product id, 8628 * revision, serial number, and capacity device error stats. 8629 * 8630 * Note: During attach the stats are instantiated first so they are 8631 * available for attach-time routines that utilize the driver 8632 * iopath to send commands to the device. The stats are initialized 8633 * separately so data obtained during some attach-time routines is 8634 * available. (4362483) 8635 * 8636 * Arguments: un - driver soft state (unit) structure 8637 * 8638 * Context: Kernel thread context 8639 */ 8640 8641 static void 8642 sd_set_errstats(struct sd_lun *un) 8643 { 8644 struct sd_errstats *stp; 8645 8646 ASSERT(un != NULL); 8647 ASSERT(un->un_errstats != NULL); 8648 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8649 ASSERT(stp != NULL); 8650 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8651 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8652 (void) strncpy(stp->sd_revision.value.c, 8653 un->un_sd->sd_inq->inq_revision, 4); 8654 8655 /* 8656 * All the errstats are persistent across detach/attach, 8657 * so reset all the errstats here in case of the hot 8658 * replacement of disk drives, except for not changed 8659 * Sun qualified drives. 8660 */ 8661 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8662 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8663 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8664 stp->sd_softerrs.value.ui32 = 0; 8665 stp->sd_harderrs.value.ui32 = 0; 8666 stp->sd_transerrs.value.ui32 = 0; 8667 stp->sd_rq_media_err.value.ui32 = 0; 8668 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8669 stp->sd_rq_nodev_err.value.ui32 = 0; 8670 stp->sd_rq_recov_err.value.ui32 = 0; 8671 stp->sd_rq_illrq_err.value.ui32 = 0; 8672 stp->sd_rq_pfa_err.value.ui32 = 0; 8673 } 8674 8675 /* 8676 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8677 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8678 * (4376302)) 8679 */ 8680 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8681 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8682 sizeof (SD_INQUIRY(un)->inq_serial)); 8683 } 8684 8685 if (un->un_f_blockcount_is_valid != TRUE) { 8686 /* 8687 * Set capacity error stat to 0 for no media. This ensures 8688 * a valid capacity is displayed in response to 'iostat -E' 8689 * when no media is present in the device. 8690 */ 8691 stp->sd_capacity.value.ui64 = 0; 8692 } else { 8693 /* 8694 * Multiply un_blockcount by un->un_sys_blocksize to get 8695 * capacity. 8696 * 8697 * Note: for non-512 blocksize devices "un_blockcount" has been 8698 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8699 * (un_tgt_blocksize / un->un_sys_blocksize). 8700 */ 8701 stp->sd_capacity.value.ui64 = (uint64_t) 8702 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8703 } 8704 } 8705 8706 8707 /* 8708 * Function: sd_set_pstats 8709 * 8710 * Description: This routine instantiates and initializes the partition 8711 * stats for each partition with more than zero blocks. 8712 * (4363169) 8713 * 8714 * Arguments: un - driver soft state (unit) structure 8715 * 8716 * Context: Kernel thread context 8717 */ 8718 8719 static void 8720 sd_set_pstats(struct sd_lun *un) 8721 { 8722 char kstatname[KSTAT_STRLEN]; 8723 int instance; 8724 int i; 8725 diskaddr_t nblks = 0; 8726 char *partname = NULL; 8727 8728 ASSERT(un != NULL); 8729 8730 instance = ddi_get_instance(SD_DEVINFO(un)); 8731 8732 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8733 for (i = 0; i < NSDMAP; i++) { 8734 8735 if (cmlb_partinfo(un->un_cmlbhandle, i, 8736 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8737 continue; 8738 mutex_enter(SD_MUTEX(un)); 8739 8740 if ((un->un_pstats[i] == NULL) && 8741 (nblks != 0)) { 8742 8743 (void) snprintf(kstatname, sizeof (kstatname), 8744 "%s%d,%s", sd_label, instance, 8745 partname); 8746 8747 un->un_pstats[i] = kstat_create(sd_label, 8748 instance, kstatname, "partition", KSTAT_TYPE_IO, 8749 1, KSTAT_FLAG_PERSISTENT); 8750 if (un->un_pstats[i] != NULL) { 8751 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8752 kstat_install(un->un_pstats[i]); 8753 } 8754 } 8755 mutex_exit(SD_MUTEX(un)); 8756 } 8757 } 8758 8759 8760 #if (defined(__fibre)) 8761 /* 8762 * Function: sd_init_event_callbacks 8763 * 8764 * Description: This routine initializes the insertion and removal event 8765 * callbacks. (fibre only) 8766 * 8767 * Arguments: un - driver soft state (unit) structure 8768 * 8769 * Context: Kernel thread context 8770 */ 8771 8772 static void 8773 sd_init_event_callbacks(struct sd_lun *un) 8774 { 8775 ASSERT(un != NULL); 8776 8777 if ((un->un_insert_event == NULL) && 8778 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8779 &un->un_insert_event) == DDI_SUCCESS)) { 8780 /* 8781 * Add the callback for an insertion event 8782 */ 8783 (void) ddi_add_event_handler(SD_DEVINFO(un), 8784 un->un_insert_event, sd_event_callback, (void *)un, 8785 &(un->un_insert_cb_id)); 8786 } 8787 8788 if ((un->un_remove_event == NULL) && 8789 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8790 &un->un_remove_event) == DDI_SUCCESS)) { 8791 /* 8792 * Add the callback for a removal event 8793 */ 8794 (void) ddi_add_event_handler(SD_DEVINFO(un), 8795 un->un_remove_event, sd_event_callback, (void *)un, 8796 &(un->un_remove_cb_id)); 8797 } 8798 } 8799 8800 8801 /* 8802 * Function: sd_event_callback 8803 * 8804 * Description: This routine handles insert/remove events (photon). The 8805 * state is changed to OFFLINE which can be used to supress 8806 * error msgs. (fibre only) 8807 * 8808 * Arguments: un - driver soft state (unit) structure 8809 * 8810 * Context: Callout thread context 8811 */ 8812 /* ARGSUSED */ 8813 static void 8814 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8815 void *bus_impldata) 8816 { 8817 struct sd_lun *un = (struct sd_lun *)arg; 8818 8819 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8820 if (event == un->un_insert_event) { 8821 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8822 mutex_enter(SD_MUTEX(un)); 8823 if (un->un_state == SD_STATE_OFFLINE) { 8824 if (un->un_last_state != SD_STATE_SUSPENDED) { 8825 un->un_state = un->un_last_state; 8826 } else { 8827 /* 8828 * We have gone through SUSPEND/RESUME while 8829 * we were offline. Restore the last state 8830 */ 8831 un->un_state = un->un_save_state; 8832 } 8833 } 8834 mutex_exit(SD_MUTEX(un)); 8835 8836 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8837 } else if (event == un->un_remove_event) { 8838 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8839 mutex_enter(SD_MUTEX(un)); 8840 /* 8841 * We need to handle an event callback that occurs during 8842 * the suspend operation, since we don't prevent it. 8843 */ 8844 if (un->un_state != SD_STATE_OFFLINE) { 8845 if (un->un_state != SD_STATE_SUSPENDED) { 8846 New_state(un, SD_STATE_OFFLINE); 8847 } else { 8848 un->un_last_state = SD_STATE_OFFLINE; 8849 } 8850 } 8851 mutex_exit(SD_MUTEX(un)); 8852 } else { 8853 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8854 "!Unknown event\n"); 8855 } 8856 8857 } 8858 #endif 8859 8860 /* 8861 * Function: sd_cache_control() 8862 * 8863 * Description: This routine is the driver entry point for setting 8864 * read and write caching by modifying the WCE (write cache 8865 * enable) and RCD (read cache disable) bits of mode 8866 * page 8 (MODEPAGE_CACHING). 8867 * 8868 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8869 * structure for this target. 8870 * rcd_flag - flag for controlling the read cache 8871 * wce_flag - flag for controlling the write cache 8872 * 8873 * Return Code: EIO 8874 * code returned by sd_send_scsi_MODE_SENSE and 8875 * sd_send_scsi_MODE_SELECT 8876 * 8877 * Context: Kernel Thread 8878 */ 8879 8880 static int 8881 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8882 { 8883 struct mode_caching *mode_caching_page; 8884 uchar_t *header; 8885 size_t buflen; 8886 int hdrlen; 8887 int bd_len; 8888 int rval = 0; 8889 struct mode_header_grp2 *mhp; 8890 struct sd_lun *un; 8891 int status; 8892 8893 ASSERT(ssc != NULL); 8894 un = ssc->ssc_un; 8895 ASSERT(un != NULL); 8896 8897 /* 8898 * Do a test unit ready, otherwise a mode sense may not work if this 8899 * is the first command sent to the device after boot. 8900 */ 8901 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8902 if (status != 0) 8903 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8904 8905 if (un->un_f_cfg_is_atapi == TRUE) { 8906 hdrlen = MODE_HEADER_LENGTH_GRP2; 8907 } else { 8908 hdrlen = MODE_HEADER_LENGTH; 8909 } 8910 8911 /* 8912 * Allocate memory for the retrieved mode page and its headers. Set 8913 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8914 * we get all of the mode sense data otherwise, the mode select 8915 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8916 */ 8917 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8918 sizeof (struct mode_cache_scsi3); 8919 8920 header = kmem_zalloc(buflen, KM_SLEEP); 8921 8922 /* Get the information from the device. */ 8923 if (un->un_f_cfg_is_atapi == TRUE) { 8924 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 8925 MODEPAGE_CACHING, SD_PATH_DIRECT); 8926 } else { 8927 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 8928 MODEPAGE_CACHING, SD_PATH_DIRECT); 8929 } 8930 8931 if (rval != 0) { 8932 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8933 "sd_cache_control: Mode Sense Failed\n"); 8934 goto mode_sense_failed; 8935 } 8936 8937 /* 8938 * Determine size of Block Descriptors in order to locate 8939 * the mode page data. ATAPI devices return 0, SCSI devices 8940 * should return MODE_BLK_DESC_LENGTH. 8941 */ 8942 if (un->un_f_cfg_is_atapi == TRUE) { 8943 mhp = (struct mode_header_grp2 *)header; 8944 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8945 } else { 8946 bd_len = ((struct mode_header *)header)->bdesc_length; 8947 } 8948 8949 if (bd_len > MODE_BLK_DESC_LENGTH) { 8950 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8951 "sd_cache_control: Mode Sense returned invalid " 8952 "block descriptor length\n"); 8953 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8954 "sd_cache_control: Mode Sense returned invalid " 8955 "block descriptor length"); 8956 rval = EIO; 8957 goto mode_sense_failed; 8958 } 8959 8960 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8961 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8962 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8963 " caching page code mismatch %d\n", 8964 mode_caching_page->mode_page.code); 8965 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8966 "sd_cache_control: Mode Sense caching page code " 8967 "mismatch %d", mode_caching_page->mode_page.code); 8968 rval = EIO; 8969 goto mode_sense_failed; 8970 } 8971 8972 /* Check the relevant bits on successful mode sense. */ 8973 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8974 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8975 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8976 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8977 8978 size_t sbuflen; 8979 uchar_t save_pg; 8980 8981 /* 8982 * Construct select buffer length based on the 8983 * length of the sense data returned. 8984 */ 8985 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8986 sizeof (struct mode_page) + 8987 (int)mode_caching_page->mode_page.length; 8988 8989 /* 8990 * Set the caching bits as requested. 8991 */ 8992 if (rcd_flag == SD_CACHE_ENABLE) 8993 mode_caching_page->rcd = 0; 8994 else if (rcd_flag == SD_CACHE_DISABLE) 8995 mode_caching_page->rcd = 1; 8996 8997 if (wce_flag == SD_CACHE_ENABLE) 8998 mode_caching_page->wce = 1; 8999 else if (wce_flag == SD_CACHE_DISABLE) 9000 mode_caching_page->wce = 0; 9001 9002 /* 9003 * Save the page if the mode sense says the 9004 * drive supports it. 9005 */ 9006 save_pg = mode_caching_page->mode_page.ps ? 9007 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9008 9009 /* Clear reserved bits before mode select. */ 9010 mode_caching_page->mode_page.ps = 0; 9011 9012 /* 9013 * Clear out mode header for mode select. 9014 * The rest of the retrieved page will be reused. 9015 */ 9016 bzero(header, hdrlen); 9017 9018 if (un->un_f_cfg_is_atapi == TRUE) { 9019 mhp = (struct mode_header_grp2 *)header; 9020 mhp->bdesc_length_hi = bd_len >> 8; 9021 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9022 } else { 9023 ((struct mode_header *)header)->bdesc_length = bd_len; 9024 } 9025 9026 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9027 9028 /* Issue mode select to change the cache settings */ 9029 if (un->un_f_cfg_is_atapi == TRUE) { 9030 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9031 sbuflen, save_pg, SD_PATH_DIRECT); 9032 } else { 9033 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9034 sbuflen, save_pg, SD_PATH_DIRECT); 9035 } 9036 9037 } 9038 9039 9040 mode_sense_failed: 9041 9042 kmem_free(header, buflen); 9043 9044 if (rval != 0) { 9045 if (rval == EIO) 9046 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9047 else 9048 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9049 } 9050 return (rval); 9051 } 9052 9053 9054 /* 9055 * Function: sd_get_write_cache_enabled() 9056 * 9057 * Description: This routine is the driver entry point for determining if 9058 * write caching is enabled. It examines the WCE (write cache 9059 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9060 * 9061 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9062 * structure for this target. 9063 * is_enabled - pointer to int where write cache enabled state 9064 * is returned (non-zero -> write cache enabled) 9065 * 9066 * 9067 * Return Code: EIO 9068 * code returned by sd_send_scsi_MODE_SENSE 9069 * 9070 * Context: Kernel Thread 9071 * 9072 * NOTE: If ioctl is added to disable write cache, this sequence should 9073 * be followed so that no locking is required for accesses to 9074 * un->un_f_write_cache_enabled: 9075 * do mode select to clear wce 9076 * do synchronize cache to flush cache 9077 * set un->un_f_write_cache_enabled = FALSE 9078 * 9079 * Conversely, an ioctl to enable the write cache should be done 9080 * in this order: 9081 * set un->un_f_write_cache_enabled = TRUE 9082 * do mode select to set wce 9083 */ 9084 9085 static int 9086 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9087 { 9088 struct mode_caching *mode_caching_page; 9089 uchar_t *header; 9090 size_t buflen; 9091 int hdrlen; 9092 int bd_len; 9093 int rval = 0; 9094 struct sd_lun *un; 9095 int status; 9096 9097 ASSERT(ssc != NULL); 9098 un = ssc->ssc_un; 9099 ASSERT(un != NULL); 9100 ASSERT(is_enabled != NULL); 9101 9102 /* in case of error, flag as enabled */ 9103 *is_enabled = TRUE; 9104 9105 /* 9106 * Do a test unit ready, otherwise a mode sense may not work if this 9107 * is the first command sent to the device after boot. 9108 */ 9109 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9110 9111 if (status != 0) 9112 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9113 9114 if (un->un_f_cfg_is_atapi == TRUE) { 9115 hdrlen = MODE_HEADER_LENGTH_GRP2; 9116 } else { 9117 hdrlen = MODE_HEADER_LENGTH; 9118 } 9119 9120 /* 9121 * Allocate memory for the retrieved mode page and its headers. Set 9122 * a pointer to the page itself. 9123 */ 9124 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9125 header = kmem_zalloc(buflen, KM_SLEEP); 9126 9127 /* Get the information from the device. */ 9128 if (un->un_f_cfg_is_atapi == TRUE) { 9129 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9130 MODEPAGE_CACHING, SD_PATH_DIRECT); 9131 } else { 9132 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9133 MODEPAGE_CACHING, SD_PATH_DIRECT); 9134 } 9135 9136 if (rval != 0) { 9137 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9138 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9139 goto mode_sense_failed; 9140 } 9141 9142 /* 9143 * Determine size of Block Descriptors in order to locate 9144 * the mode page data. ATAPI devices return 0, SCSI devices 9145 * should return MODE_BLK_DESC_LENGTH. 9146 */ 9147 if (un->un_f_cfg_is_atapi == TRUE) { 9148 struct mode_header_grp2 *mhp; 9149 mhp = (struct mode_header_grp2 *)header; 9150 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9151 } else { 9152 bd_len = ((struct mode_header *)header)->bdesc_length; 9153 } 9154 9155 if (bd_len > MODE_BLK_DESC_LENGTH) { 9156 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9157 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9158 "block descriptor length\n"); 9159 /* FMA should make upset complain here */ 9160 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9161 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9162 "block descriptor length %d", bd_len); 9163 rval = EIO; 9164 goto mode_sense_failed; 9165 } 9166 9167 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9168 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9169 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 9170 " caching page code mismatch %d\n", 9171 mode_caching_page->mode_page.code); 9172 /* FMA could make upset complain here */ 9173 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9174 "sd_cache_control: Mode Sense caching page code " 9175 "mismatch %d", mode_caching_page->mode_page.code); 9176 rval = EIO; 9177 goto mode_sense_failed; 9178 } 9179 *is_enabled = mode_caching_page->wce; 9180 9181 mode_sense_failed: 9182 if (rval == 0) { 9183 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9184 } else if (rval == EIO) { 9185 /* 9186 * Some disks do not support mode sense(6), we 9187 * should ignore this kind of error(sense key is 9188 * 0x5 - illegal request). 9189 */ 9190 uint8_t *sensep; 9191 int senlen; 9192 9193 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9194 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9195 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9196 9197 if (senlen > 0 && 9198 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9199 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9200 } else { 9201 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9202 } 9203 } else { 9204 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9205 } 9206 kmem_free(header, buflen); 9207 return (rval); 9208 } 9209 9210 /* 9211 * Function: sd_get_nv_sup() 9212 * 9213 * Description: This routine is the driver entry point for 9214 * determining whether non-volatile cache is supported. This 9215 * determination process works as follows: 9216 * 9217 * 1. sd first queries sd.conf on whether 9218 * suppress_cache_flush bit is set for this device. 9219 * 9220 * 2. if not there, then queries the internal disk table. 9221 * 9222 * 3. if either sd.conf or internal disk table specifies 9223 * cache flush be suppressed, we don't bother checking 9224 * NV_SUP bit. 9225 * 9226 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9227 * the optional INQUIRY VPD page 0x86. If the device 9228 * supports VPD page 0x86, sd examines the NV_SUP 9229 * (non-volatile cache support) bit in the INQUIRY VPD page 9230 * 0x86: 9231 * o If NV_SUP bit is set, sd assumes the device has a 9232 * non-volatile cache and set the 9233 * un_f_sync_nv_supported to TRUE. 9234 * o Otherwise cache is not non-volatile, 9235 * un_f_sync_nv_supported is set to FALSE. 9236 * 9237 * Arguments: un - driver soft state (unit) structure 9238 * 9239 * Return Code: 9240 * 9241 * Context: Kernel Thread 9242 */ 9243 9244 static void 9245 sd_get_nv_sup(sd_ssc_t *ssc) 9246 { 9247 int rval = 0; 9248 uchar_t *inq86 = NULL; 9249 size_t inq86_len = MAX_INQUIRY_SIZE; 9250 size_t inq86_resid = 0; 9251 struct dk_callback *dkc; 9252 struct sd_lun *un; 9253 9254 ASSERT(ssc != NULL); 9255 un = ssc->ssc_un; 9256 ASSERT(un != NULL); 9257 9258 mutex_enter(SD_MUTEX(un)); 9259 9260 /* 9261 * Be conservative on the device's support of 9262 * SYNC_NV bit: un_f_sync_nv_supported is 9263 * initialized to be false. 9264 */ 9265 un->un_f_sync_nv_supported = FALSE; 9266 9267 /* 9268 * If either sd.conf or internal disk table 9269 * specifies cache flush be suppressed, then 9270 * we don't bother checking NV_SUP bit. 9271 */ 9272 if (un->un_f_suppress_cache_flush == TRUE) { 9273 mutex_exit(SD_MUTEX(un)); 9274 return; 9275 } 9276 9277 if (sd_check_vpd_page_support(ssc) == 0 && 9278 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9279 mutex_exit(SD_MUTEX(un)); 9280 /* collect page 86 data if available */ 9281 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9282 9283 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9284 0x01, 0x86, &inq86_resid); 9285 9286 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9287 SD_TRACE(SD_LOG_COMMON, un, 9288 "sd_get_nv_sup: \ 9289 successfully get VPD page: %x \ 9290 PAGE LENGTH: %x BYTE 6: %x\n", 9291 inq86[1], inq86[3], inq86[6]); 9292 9293 mutex_enter(SD_MUTEX(un)); 9294 /* 9295 * check the value of NV_SUP bit: only if the device 9296 * reports NV_SUP bit to be 1, the 9297 * un_f_sync_nv_supported bit will be set to true. 9298 */ 9299 if (inq86[6] & SD_VPD_NV_SUP) { 9300 un->un_f_sync_nv_supported = TRUE; 9301 } 9302 mutex_exit(SD_MUTEX(un)); 9303 } else if (rval != 0) { 9304 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9305 } 9306 9307 kmem_free(inq86, inq86_len); 9308 } else { 9309 mutex_exit(SD_MUTEX(un)); 9310 } 9311 9312 /* 9313 * Send a SYNC CACHE command to check whether 9314 * SYNC_NV bit is supported. This command should have 9315 * un_f_sync_nv_supported set to correct value. 9316 */ 9317 mutex_enter(SD_MUTEX(un)); 9318 if (un->un_f_sync_nv_supported) { 9319 mutex_exit(SD_MUTEX(un)); 9320 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9321 dkc->dkc_flag = FLUSH_VOLATILE; 9322 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9323 9324 /* 9325 * Send a TEST UNIT READY command to the device. This should 9326 * clear any outstanding UNIT ATTENTION that may be present. 9327 */ 9328 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9329 if (rval != 0) 9330 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9331 9332 kmem_free(dkc, sizeof (struct dk_callback)); 9333 } else { 9334 mutex_exit(SD_MUTEX(un)); 9335 } 9336 9337 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9338 un_f_suppress_cache_flush is set to %d\n", 9339 un->un_f_suppress_cache_flush); 9340 } 9341 9342 /* 9343 * Function: sd_make_device 9344 * 9345 * Description: Utility routine to return the Solaris device number from 9346 * the data in the device's dev_info structure. 9347 * 9348 * Return Code: The Solaris device number 9349 * 9350 * Context: Any 9351 */ 9352 9353 static dev_t 9354 sd_make_device(dev_info_t *devi) 9355 { 9356 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9357 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9358 } 9359 9360 9361 /* 9362 * Function: sd_pm_entry 9363 * 9364 * Description: Called at the start of a new command to manage power 9365 * and busy status of a device. This includes determining whether 9366 * the current power state of the device is sufficient for 9367 * performing the command or whether it must be changed. 9368 * The PM framework is notified appropriately. 9369 * Only with a return status of DDI_SUCCESS will the 9370 * component be busy to the framework. 9371 * 9372 * All callers of sd_pm_entry must check the return status 9373 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9374 * of DDI_FAILURE indicates the device failed to power up. 9375 * In this case un_pm_count has been adjusted so the result 9376 * on exit is still powered down, ie. count is less than 0. 9377 * Calling sd_pm_exit with this count value hits an ASSERT. 9378 * 9379 * Return Code: DDI_SUCCESS or DDI_FAILURE 9380 * 9381 * Context: Kernel thread context. 9382 */ 9383 9384 static int 9385 sd_pm_entry(struct sd_lun *un) 9386 { 9387 int return_status = DDI_SUCCESS; 9388 9389 ASSERT(!mutex_owned(SD_MUTEX(un))); 9390 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9391 9392 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9393 9394 if (un->un_f_pm_is_enabled == FALSE) { 9395 SD_TRACE(SD_LOG_IO_PM, un, 9396 "sd_pm_entry: exiting, PM not enabled\n"); 9397 return (return_status); 9398 } 9399 9400 /* 9401 * Just increment a counter if PM is enabled. On the transition from 9402 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9403 * the count with each IO and mark the device as idle when the count 9404 * hits 0. 9405 * 9406 * If the count is less than 0 the device is powered down. If a powered 9407 * down device is successfully powered up then the count must be 9408 * incremented to reflect the power up. Note that it'll get incremented 9409 * a second time to become busy. 9410 * 9411 * Because the following has the potential to change the device state 9412 * and must release the un_pm_mutex to do so, only one thread can be 9413 * allowed through at a time. 9414 */ 9415 9416 mutex_enter(&un->un_pm_mutex); 9417 while (un->un_pm_busy == TRUE) { 9418 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9419 } 9420 un->un_pm_busy = TRUE; 9421 9422 if (un->un_pm_count < 1) { 9423 9424 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9425 9426 /* 9427 * Indicate we are now busy so the framework won't attempt to 9428 * power down the device. This call will only fail if either 9429 * we passed a bad component number or the device has no 9430 * components. Neither of these should ever happen. 9431 */ 9432 mutex_exit(&un->un_pm_mutex); 9433 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9434 ASSERT(return_status == DDI_SUCCESS); 9435 9436 mutex_enter(&un->un_pm_mutex); 9437 9438 if (un->un_pm_count < 0) { 9439 mutex_exit(&un->un_pm_mutex); 9440 9441 SD_TRACE(SD_LOG_IO_PM, un, 9442 "sd_pm_entry: power up component\n"); 9443 9444 /* 9445 * pm_raise_power will cause sdpower to be called 9446 * which brings the device power level to the 9447 * desired state, ON in this case. If successful, 9448 * un_pm_count and un_power_level will be updated 9449 * appropriately. 9450 */ 9451 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9452 SD_SPINDLE_ON); 9453 9454 mutex_enter(&un->un_pm_mutex); 9455 9456 if (return_status != DDI_SUCCESS) { 9457 /* 9458 * Power up failed. 9459 * Idle the device and adjust the count 9460 * so the result on exit is that we're 9461 * still powered down, ie. count is less than 0. 9462 */ 9463 SD_TRACE(SD_LOG_IO_PM, un, 9464 "sd_pm_entry: power up failed," 9465 " idle the component\n"); 9466 9467 (void) pm_idle_component(SD_DEVINFO(un), 0); 9468 un->un_pm_count--; 9469 } else { 9470 /* 9471 * Device is powered up, verify the 9472 * count is non-negative. 9473 * This is debug only. 9474 */ 9475 ASSERT(un->un_pm_count == 0); 9476 } 9477 } 9478 9479 if (return_status == DDI_SUCCESS) { 9480 /* 9481 * For performance, now that the device has been tagged 9482 * as busy, and it's known to be powered up, update the 9483 * chain types to use jump tables that do not include 9484 * pm. This significantly lowers the overhead and 9485 * therefore improves performance. 9486 */ 9487 9488 mutex_exit(&un->un_pm_mutex); 9489 mutex_enter(SD_MUTEX(un)); 9490 SD_TRACE(SD_LOG_IO_PM, un, 9491 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9492 un->un_uscsi_chain_type); 9493 9494 if (un->un_f_non_devbsize_supported) { 9495 un->un_buf_chain_type = 9496 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9497 } else { 9498 un->un_buf_chain_type = 9499 SD_CHAIN_INFO_DISK_NO_PM; 9500 } 9501 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9502 9503 SD_TRACE(SD_LOG_IO_PM, un, 9504 " changed uscsi_chain_type to %d\n", 9505 un->un_uscsi_chain_type); 9506 mutex_exit(SD_MUTEX(un)); 9507 mutex_enter(&un->un_pm_mutex); 9508 9509 if (un->un_pm_idle_timeid == NULL) { 9510 /* 300 ms. */ 9511 un->un_pm_idle_timeid = 9512 timeout(sd_pm_idletimeout_handler, un, 9513 (drv_usectohz((clock_t)300000))); 9514 /* 9515 * Include an extra call to busy which keeps the 9516 * device busy with-respect-to the PM layer 9517 * until the timer fires, at which time it'll 9518 * get the extra idle call. 9519 */ 9520 (void) pm_busy_component(SD_DEVINFO(un), 0); 9521 } 9522 } 9523 } 9524 un->un_pm_busy = FALSE; 9525 /* Next... */ 9526 cv_signal(&un->un_pm_busy_cv); 9527 9528 un->un_pm_count++; 9529 9530 SD_TRACE(SD_LOG_IO_PM, un, 9531 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9532 9533 mutex_exit(&un->un_pm_mutex); 9534 9535 return (return_status); 9536 } 9537 9538 9539 /* 9540 * Function: sd_pm_exit 9541 * 9542 * Description: Called at the completion of a command to manage busy 9543 * status for the device. If the device becomes idle the 9544 * PM framework is notified. 9545 * 9546 * Context: Kernel thread context 9547 */ 9548 9549 static void 9550 sd_pm_exit(struct sd_lun *un) 9551 { 9552 ASSERT(!mutex_owned(SD_MUTEX(un))); 9553 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9554 9555 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9556 9557 /* 9558 * After attach the following flag is only read, so don't 9559 * take the penalty of acquiring a mutex for it. 9560 */ 9561 if (un->un_f_pm_is_enabled == TRUE) { 9562 9563 mutex_enter(&un->un_pm_mutex); 9564 un->un_pm_count--; 9565 9566 SD_TRACE(SD_LOG_IO_PM, un, 9567 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9568 9569 ASSERT(un->un_pm_count >= 0); 9570 if (un->un_pm_count == 0) { 9571 mutex_exit(&un->un_pm_mutex); 9572 9573 SD_TRACE(SD_LOG_IO_PM, un, 9574 "sd_pm_exit: idle component\n"); 9575 9576 (void) pm_idle_component(SD_DEVINFO(un), 0); 9577 9578 } else { 9579 mutex_exit(&un->un_pm_mutex); 9580 } 9581 } 9582 9583 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9584 } 9585 9586 9587 /* 9588 * Function: sdopen 9589 * 9590 * Description: Driver's open(9e) entry point function. 9591 * 9592 * Arguments: dev_i - pointer to device number 9593 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9594 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9595 * cred_p - user credential pointer 9596 * 9597 * Return Code: EINVAL 9598 * ENXIO 9599 * EIO 9600 * EROFS 9601 * EBUSY 9602 * 9603 * Context: Kernel thread context 9604 */ 9605 /* ARGSUSED */ 9606 static int 9607 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9608 { 9609 struct sd_lun *un; 9610 int nodelay; 9611 int part; 9612 uint64_t partmask; 9613 int instance; 9614 dev_t dev; 9615 int rval = EIO; 9616 diskaddr_t nblks = 0; 9617 diskaddr_t label_cap; 9618 9619 /* Validate the open type */ 9620 if (otyp >= OTYPCNT) { 9621 return (EINVAL); 9622 } 9623 9624 dev = *dev_p; 9625 instance = SDUNIT(dev); 9626 mutex_enter(&sd_detach_mutex); 9627 9628 /* 9629 * Fail the open if there is no softstate for the instance, or 9630 * if another thread somewhere is trying to detach the instance. 9631 */ 9632 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9633 (un->un_detach_count != 0)) { 9634 mutex_exit(&sd_detach_mutex); 9635 /* 9636 * The probe cache only needs to be cleared when open (9e) fails 9637 * with ENXIO (4238046). 9638 */ 9639 /* 9640 * un-conditionally clearing probe cache is ok with 9641 * separate sd/ssd binaries 9642 * x86 platform can be an issue with both parallel 9643 * and fibre in 1 binary 9644 */ 9645 sd_scsi_clear_probe_cache(); 9646 return (ENXIO); 9647 } 9648 9649 /* 9650 * The un_layer_count is to prevent another thread in specfs from 9651 * trying to detach the instance, which can happen when we are 9652 * called from a higher-layer driver instead of thru specfs. 9653 * This will not be needed when DDI provides a layered driver 9654 * interface that allows specfs to know that an instance is in 9655 * use by a layered driver & should not be detached. 9656 * 9657 * Note: the semantics for layered driver opens are exactly one 9658 * close for every open. 9659 */ 9660 if (otyp == OTYP_LYR) { 9661 un->un_layer_count++; 9662 } 9663 9664 /* 9665 * Keep a count of the current # of opens in progress. This is because 9666 * some layered drivers try to call us as a regular open. This can 9667 * cause problems that we cannot prevent, however by keeping this count 9668 * we can at least keep our open and detach routines from racing against 9669 * each other under such conditions. 9670 */ 9671 un->un_opens_in_progress++; 9672 mutex_exit(&sd_detach_mutex); 9673 9674 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9675 part = SDPART(dev); 9676 partmask = 1 << part; 9677 9678 /* 9679 * We use a semaphore here in order to serialize 9680 * open and close requests on the device. 9681 */ 9682 sema_p(&un->un_semoclose); 9683 9684 mutex_enter(SD_MUTEX(un)); 9685 9686 /* 9687 * All device accesses go thru sdstrategy() where we check 9688 * on suspend status but there could be a scsi_poll command, 9689 * which bypasses sdstrategy(), so we need to check pm 9690 * status. 9691 */ 9692 9693 if (!nodelay) { 9694 while ((un->un_state == SD_STATE_SUSPENDED) || 9695 (un->un_state == SD_STATE_PM_CHANGING)) { 9696 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9697 } 9698 9699 mutex_exit(SD_MUTEX(un)); 9700 if (sd_pm_entry(un) != DDI_SUCCESS) { 9701 rval = EIO; 9702 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9703 "sdopen: sd_pm_entry failed\n"); 9704 goto open_failed_with_pm; 9705 } 9706 mutex_enter(SD_MUTEX(un)); 9707 } 9708 9709 /* check for previous exclusive open */ 9710 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9711 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9712 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9713 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9714 9715 if (un->un_exclopen & (partmask)) { 9716 goto excl_open_fail; 9717 } 9718 9719 if (flag & FEXCL) { 9720 int i; 9721 if (un->un_ocmap.lyropen[part]) { 9722 goto excl_open_fail; 9723 } 9724 for (i = 0; i < (OTYPCNT - 1); i++) { 9725 if (un->un_ocmap.regopen[i] & (partmask)) { 9726 goto excl_open_fail; 9727 } 9728 } 9729 } 9730 9731 /* 9732 * Check the write permission if this is a removable media device, 9733 * NDELAY has not been set, and writable permission is requested. 9734 * 9735 * Note: If NDELAY was set and this is write-protected media the WRITE 9736 * attempt will fail with EIO as part of the I/O processing. This is a 9737 * more permissive implementation that allows the open to succeed and 9738 * WRITE attempts to fail when appropriate. 9739 */ 9740 if (un->un_f_chk_wp_open) { 9741 if ((flag & FWRITE) && (!nodelay)) { 9742 mutex_exit(SD_MUTEX(un)); 9743 /* 9744 * Defer the check for write permission on writable 9745 * DVD drive till sdstrategy and will not fail open even 9746 * if FWRITE is set as the device can be writable 9747 * depending upon the media and the media can change 9748 * after the call to open(). 9749 */ 9750 if (un->un_f_dvdram_writable_device == FALSE) { 9751 if (ISCD(un) || sr_check_wp(dev)) { 9752 rval = EROFS; 9753 mutex_enter(SD_MUTEX(un)); 9754 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9755 "write to cd or write protected media\n"); 9756 goto open_fail; 9757 } 9758 } 9759 mutex_enter(SD_MUTEX(un)); 9760 } 9761 } 9762 9763 /* 9764 * If opening in NDELAY/NONBLOCK mode, just return. 9765 * Check if disk is ready and has a valid geometry later. 9766 */ 9767 if (!nodelay) { 9768 sd_ssc_t *ssc; 9769 9770 mutex_exit(SD_MUTEX(un)); 9771 ssc = sd_ssc_init(un); 9772 rval = sd_ready_and_valid(ssc, part); 9773 sd_ssc_fini(ssc); 9774 mutex_enter(SD_MUTEX(un)); 9775 /* 9776 * Fail if device is not ready or if the number of disk 9777 * blocks is zero or negative for non CD devices. 9778 */ 9779 9780 nblks = 0; 9781 9782 if (rval == SD_READY_VALID && (!ISCD(un))) { 9783 /* if cmlb_partinfo fails, nblks remains 0 */ 9784 mutex_exit(SD_MUTEX(un)); 9785 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9786 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9787 mutex_enter(SD_MUTEX(un)); 9788 } 9789 9790 if ((rval != SD_READY_VALID) || 9791 (!ISCD(un) && nblks <= 0)) { 9792 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9793 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9794 "device not ready or invalid disk block value\n"); 9795 goto open_fail; 9796 } 9797 #if defined(__i386) || defined(__amd64) 9798 } else { 9799 uchar_t *cp; 9800 /* 9801 * x86 requires special nodelay handling, so that p0 is 9802 * always defined and accessible. 9803 * Invalidate geometry only if device is not already open. 9804 */ 9805 cp = &un->un_ocmap.chkd[0]; 9806 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9807 if (*cp != (uchar_t)0) { 9808 break; 9809 } 9810 cp++; 9811 } 9812 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9813 mutex_exit(SD_MUTEX(un)); 9814 cmlb_invalidate(un->un_cmlbhandle, 9815 (void *)SD_PATH_DIRECT); 9816 mutex_enter(SD_MUTEX(un)); 9817 } 9818 9819 #endif 9820 } 9821 9822 if (otyp == OTYP_LYR) { 9823 un->un_ocmap.lyropen[part]++; 9824 } else { 9825 un->un_ocmap.regopen[otyp] |= partmask; 9826 } 9827 9828 /* Set up open and exclusive open flags */ 9829 if (flag & FEXCL) { 9830 un->un_exclopen |= (partmask); 9831 } 9832 9833 /* 9834 * If the lun is EFI labeled and lun capacity is greater than the 9835 * capacity contained in the label, log a sys-event to notify the 9836 * interested module. 9837 * To avoid an infinite loop of logging sys-event, we only log the 9838 * event when the lun is not opened in NDELAY mode. The event handler 9839 * should open the lun in NDELAY mode. 9840 */ 9841 if (!(flag & FNDELAY)) { 9842 mutex_exit(SD_MUTEX(un)); 9843 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9844 (void*)SD_PATH_DIRECT) == 0) { 9845 mutex_enter(SD_MUTEX(un)); 9846 if (un->un_f_blockcount_is_valid && 9847 un->un_blockcount > label_cap) { 9848 mutex_exit(SD_MUTEX(un)); 9849 sd_log_lun_expansion_event(un, 9850 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9851 mutex_enter(SD_MUTEX(un)); 9852 } 9853 } else { 9854 mutex_enter(SD_MUTEX(un)); 9855 } 9856 } 9857 9858 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9859 "open of part %d type %d\n", part, otyp); 9860 9861 mutex_exit(SD_MUTEX(un)); 9862 if (!nodelay) { 9863 sd_pm_exit(un); 9864 } 9865 9866 sema_v(&un->un_semoclose); 9867 9868 mutex_enter(&sd_detach_mutex); 9869 un->un_opens_in_progress--; 9870 mutex_exit(&sd_detach_mutex); 9871 9872 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9873 return (DDI_SUCCESS); 9874 9875 excl_open_fail: 9876 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9877 rval = EBUSY; 9878 9879 open_fail: 9880 mutex_exit(SD_MUTEX(un)); 9881 9882 /* 9883 * On a failed open we must exit the pm management. 9884 */ 9885 if (!nodelay) { 9886 sd_pm_exit(un); 9887 } 9888 open_failed_with_pm: 9889 sema_v(&un->un_semoclose); 9890 9891 mutex_enter(&sd_detach_mutex); 9892 un->un_opens_in_progress--; 9893 if (otyp == OTYP_LYR) { 9894 un->un_layer_count--; 9895 } 9896 mutex_exit(&sd_detach_mutex); 9897 9898 return (rval); 9899 } 9900 9901 9902 /* 9903 * Function: sdclose 9904 * 9905 * Description: Driver's close(9e) entry point function. 9906 * 9907 * Arguments: dev - device number 9908 * flag - file status flag, informational only 9909 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9910 * cred_p - user credential pointer 9911 * 9912 * Return Code: ENXIO 9913 * 9914 * Context: Kernel thread context 9915 */ 9916 /* ARGSUSED */ 9917 static int 9918 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9919 { 9920 struct sd_lun *un; 9921 uchar_t *cp; 9922 int part; 9923 int nodelay; 9924 int rval = 0; 9925 9926 /* Validate the open type */ 9927 if (otyp >= OTYPCNT) { 9928 return (ENXIO); 9929 } 9930 9931 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9932 return (ENXIO); 9933 } 9934 9935 part = SDPART(dev); 9936 nodelay = flag & (FNDELAY | FNONBLOCK); 9937 9938 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9939 "sdclose: close of part %d type %d\n", part, otyp); 9940 9941 /* 9942 * We use a semaphore here in order to serialize 9943 * open and close requests on the device. 9944 */ 9945 sema_p(&un->un_semoclose); 9946 9947 mutex_enter(SD_MUTEX(un)); 9948 9949 /* Don't proceed if power is being changed. */ 9950 while (un->un_state == SD_STATE_PM_CHANGING) { 9951 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9952 } 9953 9954 if (un->un_exclopen & (1 << part)) { 9955 un->un_exclopen &= ~(1 << part); 9956 } 9957 9958 /* Update the open partition map */ 9959 if (otyp == OTYP_LYR) { 9960 un->un_ocmap.lyropen[part] -= 1; 9961 } else { 9962 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9963 } 9964 9965 cp = &un->un_ocmap.chkd[0]; 9966 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9967 if (*cp != NULL) { 9968 break; 9969 } 9970 cp++; 9971 } 9972 9973 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9974 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9975 9976 /* 9977 * We avoid persistance upon the last close, and set 9978 * the throttle back to the maximum. 9979 */ 9980 un->un_throttle = un->un_saved_throttle; 9981 9982 if (un->un_state == SD_STATE_OFFLINE) { 9983 if (un->un_f_is_fibre == FALSE) { 9984 scsi_log(SD_DEVINFO(un), sd_label, 9985 CE_WARN, "offline\n"); 9986 } 9987 mutex_exit(SD_MUTEX(un)); 9988 cmlb_invalidate(un->un_cmlbhandle, 9989 (void *)SD_PATH_DIRECT); 9990 mutex_enter(SD_MUTEX(un)); 9991 9992 } else { 9993 /* 9994 * Flush any outstanding writes in NVRAM cache. 9995 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9996 * cmd, it may not work for non-Pluto devices. 9997 * SYNCHRONIZE CACHE is not required for removables, 9998 * except DVD-RAM drives. 9999 * 10000 * Also note: because SYNCHRONIZE CACHE is currently 10001 * the only command issued here that requires the 10002 * drive be powered up, only do the power up before 10003 * sending the Sync Cache command. If additional 10004 * commands are added which require a powered up 10005 * drive, the following sequence may have to change. 10006 * 10007 * And finally, note that parallel SCSI on SPARC 10008 * only issues a Sync Cache to DVD-RAM, a newly 10009 * supported device. 10010 */ 10011 #if defined(__i386) || defined(__amd64) 10012 if ((un->un_f_sync_cache_supported && 10013 un->un_f_sync_cache_required) || 10014 un->un_f_dvdram_writable_device == TRUE) { 10015 #else 10016 if (un->un_f_dvdram_writable_device == TRUE) { 10017 #endif 10018 mutex_exit(SD_MUTEX(un)); 10019 if (sd_pm_entry(un) == DDI_SUCCESS) { 10020 rval = 10021 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10022 NULL); 10023 /* ignore error if not supported */ 10024 if (rval == ENOTSUP) { 10025 rval = 0; 10026 } else if (rval != 0) { 10027 rval = EIO; 10028 } 10029 sd_pm_exit(un); 10030 } else { 10031 rval = EIO; 10032 } 10033 mutex_enter(SD_MUTEX(un)); 10034 } 10035 10036 /* 10037 * For devices which supports DOOR_LOCK, send an ALLOW 10038 * MEDIA REMOVAL command, but don't get upset if it 10039 * fails. We need to raise the power of the drive before 10040 * we can call sd_send_scsi_DOORLOCK() 10041 */ 10042 if (un->un_f_doorlock_supported) { 10043 mutex_exit(SD_MUTEX(un)); 10044 if (sd_pm_entry(un) == DDI_SUCCESS) { 10045 sd_ssc_t *ssc; 10046 10047 ssc = sd_ssc_init(un); 10048 rval = sd_send_scsi_DOORLOCK(ssc, 10049 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10050 if (rval != 0) 10051 sd_ssc_assessment(ssc, 10052 SD_FMT_IGNORE); 10053 sd_ssc_fini(ssc); 10054 10055 sd_pm_exit(un); 10056 if (ISCD(un) && (rval != 0) && 10057 (nodelay != 0)) { 10058 rval = ENXIO; 10059 } 10060 } else { 10061 rval = EIO; 10062 } 10063 mutex_enter(SD_MUTEX(un)); 10064 } 10065 10066 /* 10067 * If a device has removable media, invalidate all 10068 * parameters related to media, such as geometry, 10069 * blocksize, and blockcount. 10070 */ 10071 if (un->un_f_has_removable_media) { 10072 sr_ejected(un); 10073 } 10074 10075 /* 10076 * Destroy the cache (if it exists) which was 10077 * allocated for the write maps since this is 10078 * the last close for this media. 10079 */ 10080 if (un->un_wm_cache) { 10081 /* 10082 * Check if there are pending commands. 10083 * and if there are give a warning and 10084 * do not destroy the cache. 10085 */ 10086 if (un->un_ncmds_in_driver > 0) { 10087 scsi_log(SD_DEVINFO(un), 10088 sd_label, CE_WARN, 10089 "Unable to clean up memory " 10090 "because of pending I/O\n"); 10091 } else { 10092 kmem_cache_destroy( 10093 un->un_wm_cache); 10094 un->un_wm_cache = NULL; 10095 } 10096 } 10097 } 10098 } 10099 10100 mutex_exit(SD_MUTEX(un)); 10101 sema_v(&un->un_semoclose); 10102 10103 if (otyp == OTYP_LYR) { 10104 mutex_enter(&sd_detach_mutex); 10105 /* 10106 * The detach routine may run when the layer count 10107 * drops to zero. 10108 */ 10109 un->un_layer_count--; 10110 mutex_exit(&sd_detach_mutex); 10111 } 10112 10113 return (rval); 10114 } 10115 10116 10117 /* 10118 * Function: sd_ready_and_valid 10119 * 10120 * Description: Test if device is ready and has a valid geometry. 10121 * 10122 * Arguments: ssc - sd_ssc_t will contain un 10123 * un - driver soft state (unit) structure 10124 * 10125 * Return Code: SD_READY_VALID ready and valid label 10126 * SD_NOT_READY_VALID not ready, no label 10127 * SD_RESERVED_BY_OTHERS reservation conflict 10128 * 10129 * Context: Never called at interrupt context. 10130 */ 10131 10132 static int 10133 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10134 { 10135 struct sd_errstats *stp; 10136 uint64_t capacity; 10137 uint_t lbasize; 10138 int rval = SD_READY_VALID; 10139 char name_str[48]; 10140 int is_valid; 10141 struct sd_lun *un; 10142 int status; 10143 10144 ASSERT(ssc != NULL); 10145 un = ssc->ssc_un; 10146 ASSERT(un != NULL); 10147 ASSERT(!mutex_owned(SD_MUTEX(un))); 10148 10149 mutex_enter(SD_MUTEX(un)); 10150 /* 10151 * If a device has removable media, we must check if media is 10152 * ready when checking if this device is ready and valid. 10153 */ 10154 if (un->un_f_has_removable_media) { 10155 mutex_exit(SD_MUTEX(un)); 10156 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10157 10158 if (status != 0) { 10159 rval = SD_NOT_READY_VALID; 10160 mutex_enter(SD_MUTEX(un)); 10161 10162 /* Ignore all failed status for removalbe media */ 10163 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10164 10165 goto done; 10166 } 10167 10168 is_valid = SD_IS_VALID_LABEL(un); 10169 mutex_enter(SD_MUTEX(un)); 10170 if (!is_valid || 10171 (un->un_f_blockcount_is_valid == FALSE) || 10172 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10173 10174 /* capacity has to be read every open. */ 10175 mutex_exit(SD_MUTEX(un)); 10176 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10177 &lbasize, SD_PATH_DIRECT); 10178 10179 if (status != 0) { 10180 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10181 10182 cmlb_invalidate(un->un_cmlbhandle, 10183 (void *)SD_PATH_DIRECT); 10184 mutex_enter(SD_MUTEX(un)); 10185 rval = SD_NOT_READY_VALID; 10186 10187 goto done; 10188 } else { 10189 mutex_enter(SD_MUTEX(un)); 10190 sd_update_block_info(un, lbasize, capacity); 10191 } 10192 } 10193 10194 /* 10195 * Check if the media in the device is writable or not. 10196 */ 10197 if (!is_valid && ISCD(un)) { 10198 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10199 } 10200 10201 } else { 10202 /* 10203 * Do a test unit ready to clear any unit attention from non-cd 10204 * devices. 10205 */ 10206 mutex_exit(SD_MUTEX(un)); 10207 10208 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10209 if (status != 0) { 10210 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10211 } 10212 10213 mutex_enter(SD_MUTEX(un)); 10214 } 10215 10216 10217 /* 10218 * If this is a non 512 block device, allocate space for 10219 * the wmap cache. This is being done here since every time 10220 * a media is changed this routine will be called and the 10221 * block size is a function of media rather than device. 10222 */ 10223 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10224 if (!(un->un_wm_cache)) { 10225 (void) snprintf(name_str, sizeof (name_str), 10226 "%s%d_cache", 10227 ddi_driver_name(SD_DEVINFO(un)), 10228 ddi_get_instance(SD_DEVINFO(un))); 10229 un->un_wm_cache = kmem_cache_create( 10230 name_str, sizeof (struct sd_w_map), 10231 8, sd_wm_cache_constructor, 10232 sd_wm_cache_destructor, NULL, 10233 (void *)un, NULL, 0); 10234 if (!(un->un_wm_cache)) { 10235 rval = ENOMEM; 10236 goto done; 10237 } 10238 } 10239 } 10240 10241 if (un->un_state == SD_STATE_NORMAL) { 10242 /* 10243 * If the target is not yet ready here (defined by a TUR 10244 * failure), invalidate the geometry and print an 'offline' 10245 * message. This is a legacy message, as the state of the 10246 * target is not actually changed to SD_STATE_OFFLINE. 10247 * 10248 * If the TUR fails for EACCES (Reservation Conflict), 10249 * SD_RESERVED_BY_OTHERS will be returned to indicate 10250 * reservation conflict. If the TUR fails for other 10251 * reasons, SD_NOT_READY_VALID will be returned. 10252 */ 10253 int err; 10254 10255 mutex_exit(SD_MUTEX(un)); 10256 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10257 mutex_enter(SD_MUTEX(un)); 10258 10259 if (err != 0) { 10260 mutex_exit(SD_MUTEX(un)); 10261 cmlb_invalidate(un->un_cmlbhandle, 10262 (void *)SD_PATH_DIRECT); 10263 mutex_enter(SD_MUTEX(un)); 10264 if (err == EACCES) { 10265 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10266 "reservation conflict\n"); 10267 rval = SD_RESERVED_BY_OTHERS; 10268 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10269 } else { 10270 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10271 "drive offline\n"); 10272 rval = SD_NOT_READY_VALID; 10273 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10274 } 10275 goto done; 10276 } 10277 } 10278 10279 if (un->un_f_format_in_progress == FALSE) { 10280 mutex_exit(SD_MUTEX(un)); 10281 10282 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10283 NULL, (void *) SD_PATH_DIRECT) != 0) { 10284 rval = SD_NOT_READY_VALID; 10285 mutex_enter(SD_MUTEX(un)); 10286 10287 goto done; 10288 } 10289 if (un->un_f_pkstats_enabled) { 10290 sd_set_pstats(un); 10291 SD_TRACE(SD_LOG_IO_PARTITION, un, 10292 "sd_ready_and_valid: un:0x%p pstats created and " 10293 "set\n", un); 10294 } 10295 mutex_enter(SD_MUTEX(un)); 10296 } 10297 10298 /* 10299 * If this device supports DOOR_LOCK command, try and send 10300 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10301 * if it fails. For a CD, however, it is an error 10302 */ 10303 if (un->un_f_doorlock_supported) { 10304 mutex_exit(SD_MUTEX(un)); 10305 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10306 SD_PATH_DIRECT); 10307 10308 if ((status != 0) && ISCD(un)) { 10309 rval = SD_NOT_READY_VALID; 10310 mutex_enter(SD_MUTEX(un)); 10311 10312 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10313 10314 goto done; 10315 } else if (status != 0) 10316 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10317 mutex_enter(SD_MUTEX(un)); 10318 } 10319 10320 /* The state has changed, inform the media watch routines */ 10321 un->un_mediastate = DKIO_INSERTED; 10322 cv_broadcast(&un->un_state_cv); 10323 rval = SD_READY_VALID; 10324 10325 done: 10326 10327 /* 10328 * Initialize the capacity kstat value, if no media previously 10329 * (capacity kstat is 0) and a media has been inserted 10330 * (un_blockcount > 0). 10331 */ 10332 if (un->un_errstats != NULL) { 10333 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10334 if ((stp->sd_capacity.value.ui64 == 0) && 10335 (un->un_f_blockcount_is_valid == TRUE)) { 10336 stp->sd_capacity.value.ui64 = 10337 (uint64_t)((uint64_t)un->un_blockcount * 10338 un->un_sys_blocksize); 10339 } 10340 } 10341 10342 mutex_exit(SD_MUTEX(un)); 10343 return (rval); 10344 } 10345 10346 10347 /* 10348 * Function: sdmin 10349 * 10350 * Description: Routine to limit the size of a data transfer. Used in 10351 * conjunction with physio(9F). 10352 * 10353 * Arguments: bp - pointer to the indicated buf(9S) struct. 10354 * 10355 * Context: Kernel thread context. 10356 */ 10357 10358 static void 10359 sdmin(struct buf *bp) 10360 { 10361 struct sd_lun *un; 10362 int instance; 10363 10364 instance = SDUNIT(bp->b_edev); 10365 10366 un = ddi_get_soft_state(sd_state, instance); 10367 ASSERT(un != NULL); 10368 10369 if (bp->b_bcount > un->un_max_xfer_size) { 10370 bp->b_bcount = un->un_max_xfer_size; 10371 } 10372 } 10373 10374 10375 /* 10376 * Function: sdread 10377 * 10378 * Description: Driver's read(9e) entry point function. 10379 * 10380 * Arguments: dev - device number 10381 * uio - structure pointer describing where data is to be stored 10382 * in user's space 10383 * cred_p - user credential pointer 10384 * 10385 * Return Code: ENXIO 10386 * EIO 10387 * EINVAL 10388 * value returned by physio 10389 * 10390 * Context: Kernel thread context. 10391 */ 10392 /* ARGSUSED */ 10393 static int 10394 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10395 { 10396 struct sd_lun *un = NULL; 10397 int secmask; 10398 int err = 0; 10399 sd_ssc_t *ssc; 10400 10401 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10402 return (ENXIO); 10403 } 10404 10405 ASSERT(!mutex_owned(SD_MUTEX(un))); 10406 10407 10408 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10409 mutex_enter(SD_MUTEX(un)); 10410 /* 10411 * Because the call to sd_ready_and_valid will issue I/O we 10412 * must wait here if either the device is suspended or 10413 * if it's power level is changing. 10414 */ 10415 while ((un->un_state == SD_STATE_SUSPENDED) || 10416 (un->un_state == SD_STATE_PM_CHANGING)) { 10417 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10418 } 10419 un->un_ncmds_in_driver++; 10420 mutex_exit(SD_MUTEX(un)); 10421 10422 /* Initialize sd_ssc_t for internal uscsi commands */ 10423 ssc = sd_ssc_init(un); 10424 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10425 err = EIO; 10426 } else { 10427 err = 0; 10428 } 10429 sd_ssc_fini(ssc); 10430 10431 mutex_enter(SD_MUTEX(un)); 10432 un->un_ncmds_in_driver--; 10433 ASSERT(un->un_ncmds_in_driver >= 0); 10434 mutex_exit(SD_MUTEX(un)); 10435 if (err != 0) 10436 return (err); 10437 } 10438 10439 /* 10440 * Read requests are restricted to multiples of the system block size. 10441 */ 10442 secmask = un->un_sys_blocksize - 1; 10443 10444 if (uio->uio_loffset & ((offset_t)(secmask))) { 10445 SD_ERROR(SD_LOG_READ_WRITE, un, 10446 "sdread: file offset not modulo %d\n", 10447 un->un_sys_blocksize); 10448 err = EINVAL; 10449 } else if (uio->uio_iov->iov_len & (secmask)) { 10450 SD_ERROR(SD_LOG_READ_WRITE, un, 10451 "sdread: transfer length not modulo %d\n", 10452 un->un_sys_blocksize); 10453 err = EINVAL; 10454 } else { 10455 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10456 } 10457 10458 return (err); 10459 } 10460 10461 10462 /* 10463 * Function: sdwrite 10464 * 10465 * Description: Driver's write(9e) entry point function. 10466 * 10467 * Arguments: dev - device number 10468 * uio - structure pointer describing where data is stored in 10469 * user's space 10470 * cred_p - user credential pointer 10471 * 10472 * Return Code: ENXIO 10473 * EIO 10474 * EINVAL 10475 * value returned by physio 10476 * 10477 * Context: Kernel thread context. 10478 */ 10479 /* ARGSUSED */ 10480 static int 10481 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10482 { 10483 struct sd_lun *un = NULL; 10484 int secmask; 10485 int err = 0; 10486 sd_ssc_t *ssc; 10487 10488 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10489 return (ENXIO); 10490 } 10491 10492 ASSERT(!mutex_owned(SD_MUTEX(un))); 10493 10494 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10495 mutex_enter(SD_MUTEX(un)); 10496 /* 10497 * Because the call to sd_ready_and_valid will issue I/O we 10498 * must wait here if either the device is suspended or 10499 * if it's power level is changing. 10500 */ 10501 while ((un->un_state == SD_STATE_SUSPENDED) || 10502 (un->un_state == SD_STATE_PM_CHANGING)) { 10503 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10504 } 10505 un->un_ncmds_in_driver++; 10506 mutex_exit(SD_MUTEX(un)); 10507 10508 /* Initialize sd_ssc_t for internal uscsi commands */ 10509 ssc = sd_ssc_init(un); 10510 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10511 err = EIO; 10512 } else { 10513 err = 0; 10514 } 10515 sd_ssc_fini(ssc); 10516 10517 mutex_enter(SD_MUTEX(un)); 10518 un->un_ncmds_in_driver--; 10519 ASSERT(un->un_ncmds_in_driver >= 0); 10520 mutex_exit(SD_MUTEX(un)); 10521 if (err != 0) 10522 return (err); 10523 } 10524 10525 /* 10526 * Write requests are restricted to multiples of the system block size. 10527 */ 10528 secmask = un->un_sys_blocksize - 1; 10529 10530 if (uio->uio_loffset & ((offset_t)(secmask))) { 10531 SD_ERROR(SD_LOG_READ_WRITE, un, 10532 "sdwrite: file offset not modulo %d\n", 10533 un->un_sys_blocksize); 10534 err = EINVAL; 10535 } else if (uio->uio_iov->iov_len & (secmask)) { 10536 SD_ERROR(SD_LOG_READ_WRITE, un, 10537 "sdwrite: transfer length not modulo %d\n", 10538 un->un_sys_blocksize); 10539 err = EINVAL; 10540 } else { 10541 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10542 } 10543 10544 return (err); 10545 } 10546 10547 10548 /* 10549 * Function: sdaread 10550 * 10551 * Description: Driver's aread(9e) entry point function. 10552 * 10553 * Arguments: dev - device number 10554 * aio - structure pointer describing where data is to be stored 10555 * cred_p - user credential pointer 10556 * 10557 * Return Code: ENXIO 10558 * EIO 10559 * EINVAL 10560 * value returned by aphysio 10561 * 10562 * Context: Kernel thread context. 10563 */ 10564 /* ARGSUSED */ 10565 static int 10566 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10567 { 10568 struct sd_lun *un = NULL; 10569 struct uio *uio = aio->aio_uio; 10570 int secmask; 10571 int err = 0; 10572 sd_ssc_t *ssc; 10573 10574 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10575 return (ENXIO); 10576 } 10577 10578 ASSERT(!mutex_owned(SD_MUTEX(un))); 10579 10580 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10581 mutex_enter(SD_MUTEX(un)); 10582 /* 10583 * Because the call to sd_ready_and_valid will issue I/O we 10584 * must wait here if either the device is suspended or 10585 * if it's power level is changing. 10586 */ 10587 while ((un->un_state == SD_STATE_SUSPENDED) || 10588 (un->un_state == SD_STATE_PM_CHANGING)) { 10589 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10590 } 10591 un->un_ncmds_in_driver++; 10592 mutex_exit(SD_MUTEX(un)); 10593 10594 /* Initialize sd_ssc_t for internal uscsi commands */ 10595 ssc = sd_ssc_init(un); 10596 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10597 err = EIO; 10598 } else { 10599 err = 0; 10600 } 10601 sd_ssc_fini(ssc); 10602 10603 mutex_enter(SD_MUTEX(un)); 10604 un->un_ncmds_in_driver--; 10605 ASSERT(un->un_ncmds_in_driver >= 0); 10606 mutex_exit(SD_MUTEX(un)); 10607 if (err != 0) 10608 return (err); 10609 } 10610 10611 /* 10612 * Read requests are restricted to multiples of the system block size. 10613 */ 10614 secmask = un->un_sys_blocksize - 1; 10615 10616 if (uio->uio_loffset & ((offset_t)(secmask))) { 10617 SD_ERROR(SD_LOG_READ_WRITE, un, 10618 "sdaread: file offset not modulo %d\n", 10619 un->un_sys_blocksize); 10620 err = EINVAL; 10621 } else if (uio->uio_iov->iov_len & (secmask)) { 10622 SD_ERROR(SD_LOG_READ_WRITE, un, 10623 "sdaread: transfer length not modulo %d\n", 10624 un->un_sys_blocksize); 10625 err = EINVAL; 10626 } else { 10627 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10628 } 10629 10630 return (err); 10631 } 10632 10633 10634 /* 10635 * Function: sdawrite 10636 * 10637 * Description: Driver's awrite(9e) entry point function. 10638 * 10639 * Arguments: dev - device number 10640 * aio - structure pointer describing where data is stored 10641 * cred_p - user credential pointer 10642 * 10643 * Return Code: ENXIO 10644 * EIO 10645 * EINVAL 10646 * value returned by aphysio 10647 * 10648 * Context: Kernel thread context. 10649 */ 10650 /* ARGSUSED */ 10651 static int 10652 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10653 { 10654 struct sd_lun *un = NULL; 10655 struct uio *uio = aio->aio_uio; 10656 int secmask; 10657 int err = 0; 10658 sd_ssc_t *ssc; 10659 10660 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10661 return (ENXIO); 10662 } 10663 10664 ASSERT(!mutex_owned(SD_MUTEX(un))); 10665 10666 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10667 mutex_enter(SD_MUTEX(un)); 10668 /* 10669 * Because the call to sd_ready_and_valid will issue I/O we 10670 * must wait here if either the device is suspended or 10671 * if it's power level is changing. 10672 */ 10673 while ((un->un_state == SD_STATE_SUSPENDED) || 10674 (un->un_state == SD_STATE_PM_CHANGING)) { 10675 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10676 } 10677 un->un_ncmds_in_driver++; 10678 mutex_exit(SD_MUTEX(un)); 10679 10680 /* Initialize sd_ssc_t for internal uscsi commands */ 10681 ssc = sd_ssc_init(un); 10682 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10683 err = EIO; 10684 } else { 10685 err = 0; 10686 } 10687 sd_ssc_fini(ssc); 10688 10689 mutex_enter(SD_MUTEX(un)); 10690 un->un_ncmds_in_driver--; 10691 ASSERT(un->un_ncmds_in_driver >= 0); 10692 mutex_exit(SD_MUTEX(un)); 10693 if (err != 0) 10694 return (err); 10695 } 10696 10697 /* 10698 * Write requests are restricted to multiples of the system block size. 10699 */ 10700 secmask = un->un_sys_blocksize - 1; 10701 10702 if (uio->uio_loffset & ((offset_t)(secmask))) { 10703 SD_ERROR(SD_LOG_READ_WRITE, un, 10704 "sdawrite: file offset not modulo %d\n", 10705 un->un_sys_blocksize); 10706 err = EINVAL; 10707 } else if (uio->uio_iov->iov_len & (secmask)) { 10708 SD_ERROR(SD_LOG_READ_WRITE, un, 10709 "sdawrite: transfer length not modulo %d\n", 10710 un->un_sys_blocksize); 10711 err = EINVAL; 10712 } else { 10713 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10714 } 10715 10716 return (err); 10717 } 10718 10719 10720 10721 10722 10723 /* 10724 * Driver IO processing follows the following sequence: 10725 * 10726 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10727 * | | ^ 10728 * v v | 10729 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10730 * | | | | 10731 * v | | | 10732 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10733 * | | ^ ^ 10734 * v v | | 10735 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10736 * | | | | 10737 * +---+ | +------------+ +-------+ 10738 * | | | | 10739 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10740 * | v | | 10741 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10742 * | | ^ | 10743 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10744 * | v | | 10745 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10746 * | | ^ | 10747 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10748 * | v | | 10749 * | sd_checksum_iostart() sd_checksum_iodone() | 10750 * | | ^ | 10751 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10752 * | v | | 10753 * | sd_pm_iostart() sd_pm_iodone() | 10754 * | | ^ | 10755 * | | | | 10756 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10757 * | ^ 10758 * v | 10759 * sd_core_iostart() | 10760 * | | 10761 * | +------>(*destroypkt)() 10762 * +-> sd_start_cmds() <-+ | | 10763 * | | | v 10764 * | | | scsi_destroy_pkt(9F) 10765 * | | | 10766 * +->(*initpkt)() +- sdintr() 10767 * | | | | 10768 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10769 * | +-> scsi_setup_cdb(9F) | 10770 * | | 10771 * +--> scsi_transport(9F) | 10772 * | | 10773 * +----> SCSA ---->+ 10774 * 10775 * 10776 * This code is based upon the following presumptions: 10777 * 10778 * - iostart and iodone functions operate on buf(9S) structures. These 10779 * functions perform the necessary operations on the buf(9S) and pass 10780 * them along to the next function in the chain by using the macros 10781 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10782 * (for iodone side functions). 10783 * 10784 * - The iostart side functions may sleep. The iodone side functions 10785 * are called under interrupt context and may NOT sleep. Therefore 10786 * iodone side functions also may not call iostart side functions. 10787 * (NOTE: iostart side functions should NOT sleep for memory, as 10788 * this could result in deadlock.) 10789 * 10790 * - An iostart side function may call its corresponding iodone side 10791 * function directly (if necessary). 10792 * 10793 * - In the event of an error, an iostart side function can return a buf(9S) 10794 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10795 * b_error in the usual way of course). 10796 * 10797 * - The taskq mechanism may be used by the iodone side functions to dispatch 10798 * requests to the iostart side functions. The iostart side functions in 10799 * this case would be called under the context of a taskq thread, so it's 10800 * OK for them to block/sleep/spin in this case. 10801 * 10802 * - iostart side functions may allocate "shadow" buf(9S) structs and 10803 * pass them along to the next function in the chain. The corresponding 10804 * iodone side functions must coalesce the "shadow" bufs and return 10805 * the "original" buf to the next higher layer. 10806 * 10807 * - The b_private field of the buf(9S) struct holds a pointer to 10808 * an sd_xbuf struct, which contains information needed to 10809 * construct the scsi_pkt for the command. 10810 * 10811 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10812 * layer must acquire & release the SD_MUTEX(un) as needed. 10813 */ 10814 10815 10816 /* 10817 * Create taskq for all targets in the system. This is created at 10818 * _init(9E) and destroyed at _fini(9E). 10819 * 10820 * Note: here we set the minalloc to a reasonably high number to ensure that 10821 * we will have an adequate supply of task entries available at interrupt time. 10822 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10823 * sd_create_taskq(). Since we do not want to sleep for allocations at 10824 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10825 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10826 * requests any one instant in time. 10827 */ 10828 #define SD_TASKQ_NUMTHREADS 8 10829 #define SD_TASKQ_MINALLOC 256 10830 #define SD_TASKQ_MAXALLOC 256 10831 10832 static taskq_t *sd_tq = NULL; 10833 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10834 10835 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10836 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10837 10838 /* 10839 * The following task queue is being created for the write part of 10840 * read-modify-write of non-512 block size devices. 10841 * Limit the number of threads to 1 for now. This number has been chosen 10842 * considering the fact that it applies only to dvd ram drives/MO drives 10843 * currently. Performance for which is not main criteria at this stage. 10844 * Note: It needs to be explored if we can use a single taskq in future 10845 */ 10846 #define SD_WMR_TASKQ_NUMTHREADS 1 10847 static taskq_t *sd_wmr_tq = NULL; 10848 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10849 10850 /* 10851 * Function: sd_taskq_create 10852 * 10853 * Description: Create taskq thread(s) and preallocate task entries 10854 * 10855 * Return Code: Returns a pointer to the allocated taskq_t. 10856 * 10857 * Context: Can sleep. Requires blockable context. 10858 * 10859 * Notes: - The taskq() facility currently is NOT part of the DDI. 10860 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10861 * - taskq_create() will block for memory, also it will panic 10862 * if it cannot create the requested number of threads. 10863 * - Currently taskq_create() creates threads that cannot be 10864 * swapped. 10865 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10866 * supply of taskq entries at interrupt time (ie, so that we 10867 * do not have to sleep for memory) 10868 */ 10869 10870 static void 10871 sd_taskq_create(void) 10872 { 10873 char taskq_name[TASKQ_NAMELEN]; 10874 10875 ASSERT(sd_tq == NULL); 10876 ASSERT(sd_wmr_tq == NULL); 10877 10878 (void) snprintf(taskq_name, sizeof (taskq_name), 10879 "%s_drv_taskq", sd_label); 10880 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10881 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10882 TASKQ_PREPOPULATE)); 10883 10884 (void) snprintf(taskq_name, sizeof (taskq_name), 10885 "%s_rmw_taskq", sd_label); 10886 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10887 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10888 TASKQ_PREPOPULATE)); 10889 } 10890 10891 10892 /* 10893 * Function: sd_taskq_delete 10894 * 10895 * Description: Complementary cleanup routine for sd_taskq_create(). 10896 * 10897 * Context: Kernel thread context. 10898 */ 10899 10900 static void 10901 sd_taskq_delete(void) 10902 { 10903 ASSERT(sd_tq != NULL); 10904 ASSERT(sd_wmr_tq != NULL); 10905 taskq_destroy(sd_tq); 10906 taskq_destroy(sd_wmr_tq); 10907 sd_tq = NULL; 10908 sd_wmr_tq = NULL; 10909 } 10910 10911 10912 /* 10913 * Function: sdstrategy 10914 * 10915 * Description: Driver's strategy (9E) entry point function. 10916 * 10917 * Arguments: bp - pointer to buf(9S) 10918 * 10919 * Return Code: Always returns zero 10920 * 10921 * Context: Kernel thread context. 10922 */ 10923 10924 static int 10925 sdstrategy(struct buf *bp) 10926 { 10927 struct sd_lun *un; 10928 10929 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10930 if (un == NULL) { 10931 bioerror(bp, EIO); 10932 bp->b_resid = bp->b_bcount; 10933 biodone(bp); 10934 return (0); 10935 } 10936 /* As was done in the past, fail new cmds. if state is dumping. */ 10937 if (un->un_state == SD_STATE_DUMPING) { 10938 bioerror(bp, ENXIO); 10939 bp->b_resid = bp->b_bcount; 10940 biodone(bp); 10941 return (0); 10942 } 10943 10944 ASSERT(!mutex_owned(SD_MUTEX(un))); 10945 10946 /* 10947 * Commands may sneak in while we released the mutex in 10948 * DDI_SUSPEND, we should block new commands. However, old 10949 * commands that are still in the driver at this point should 10950 * still be allowed to drain. 10951 */ 10952 mutex_enter(SD_MUTEX(un)); 10953 /* 10954 * Must wait here if either the device is suspended or 10955 * if it's power level is changing. 10956 */ 10957 while ((un->un_state == SD_STATE_SUSPENDED) || 10958 (un->un_state == SD_STATE_PM_CHANGING)) { 10959 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10960 } 10961 10962 un->un_ncmds_in_driver++; 10963 10964 /* 10965 * atapi: Since we are running the CD for now in PIO mode we need to 10966 * call bp_mapin here to avoid bp_mapin called interrupt context under 10967 * the HBA's init_pkt routine. 10968 */ 10969 if (un->un_f_cfg_is_atapi == TRUE) { 10970 mutex_exit(SD_MUTEX(un)); 10971 bp_mapin(bp); 10972 mutex_enter(SD_MUTEX(un)); 10973 } 10974 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10975 un->un_ncmds_in_driver); 10976 10977 if (bp->b_flags & B_WRITE) 10978 un->un_f_sync_cache_required = TRUE; 10979 10980 mutex_exit(SD_MUTEX(un)); 10981 10982 /* 10983 * This will (eventually) allocate the sd_xbuf area and 10984 * call sd_xbuf_strategy(). We just want to return the 10985 * result of ddi_xbuf_qstrategy so that we have an opt- 10986 * imized tail call which saves us a stack frame. 10987 */ 10988 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10989 } 10990 10991 10992 /* 10993 * Function: sd_xbuf_strategy 10994 * 10995 * Description: Function for initiating IO operations via the 10996 * ddi_xbuf_qstrategy() mechanism. 10997 * 10998 * Context: Kernel thread context. 10999 */ 11000 11001 static void 11002 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11003 { 11004 struct sd_lun *un = arg; 11005 11006 ASSERT(bp != NULL); 11007 ASSERT(xp != NULL); 11008 ASSERT(un != NULL); 11009 ASSERT(!mutex_owned(SD_MUTEX(un))); 11010 11011 /* 11012 * Initialize the fields in the xbuf and save a pointer to the 11013 * xbuf in bp->b_private. 11014 */ 11015 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11016 11017 /* Send the buf down the iostart chain */ 11018 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11019 } 11020 11021 11022 /* 11023 * Function: sd_xbuf_init 11024 * 11025 * Description: Prepare the given sd_xbuf struct for use. 11026 * 11027 * Arguments: un - ptr to softstate 11028 * bp - ptr to associated buf(9S) 11029 * xp - ptr to associated sd_xbuf 11030 * chain_type - IO chain type to use: 11031 * SD_CHAIN_NULL 11032 * SD_CHAIN_BUFIO 11033 * SD_CHAIN_USCSI 11034 * SD_CHAIN_DIRECT 11035 * SD_CHAIN_DIRECT_PRIORITY 11036 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11037 * initialization; may be NULL if none. 11038 * 11039 * Context: Kernel thread context 11040 */ 11041 11042 static void 11043 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11044 uchar_t chain_type, void *pktinfop) 11045 { 11046 int index; 11047 11048 ASSERT(un != NULL); 11049 ASSERT(bp != NULL); 11050 ASSERT(xp != NULL); 11051 11052 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11053 bp, chain_type); 11054 11055 xp->xb_un = un; 11056 xp->xb_pktp = NULL; 11057 xp->xb_pktinfo = pktinfop; 11058 xp->xb_private = bp->b_private; 11059 xp->xb_blkno = (daddr_t)bp->b_blkno; 11060 11061 /* 11062 * Set up the iostart and iodone chain indexes in the xbuf, based 11063 * upon the specified chain type to use. 11064 */ 11065 switch (chain_type) { 11066 case SD_CHAIN_NULL: 11067 /* 11068 * Fall thru to just use the values for the buf type, even 11069 * tho for the NULL chain these values will never be used. 11070 */ 11071 /* FALLTHRU */ 11072 case SD_CHAIN_BUFIO: 11073 index = un->un_buf_chain_type; 11074 break; 11075 case SD_CHAIN_USCSI: 11076 index = un->un_uscsi_chain_type; 11077 break; 11078 case SD_CHAIN_DIRECT: 11079 index = un->un_direct_chain_type; 11080 break; 11081 case SD_CHAIN_DIRECT_PRIORITY: 11082 index = un->un_priority_chain_type; 11083 break; 11084 default: 11085 /* We're really broken if we ever get here... */ 11086 panic("sd_xbuf_init: illegal chain type!"); 11087 /*NOTREACHED*/ 11088 } 11089 11090 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11091 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11092 11093 /* 11094 * It might be a bit easier to simply bzero the entire xbuf above, 11095 * but it turns out that since we init a fair number of members anyway, 11096 * we save a fair number cycles by doing explicit assignment of zero. 11097 */ 11098 xp->xb_pkt_flags = 0; 11099 xp->xb_dma_resid = 0; 11100 xp->xb_retry_count = 0; 11101 xp->xb_victim_retry_count = 0; 11102 xp->xb_ua_retry_count = 0; 11103 xp->xb_nr_retry_count = 0; 11104 xp->xb_sense_bp = NULL; 11105 xp->xb_sense_status = 0; 11106 xp->xb_sense_state = 0; 11107 xp->xb_sense_resid = 0; 11108 xp->xb_ena = 0; 11109 11110 bp->b_private = xp; 11111 bp->b_flags &= ~(B_DONE | B_ERROR); 11112 bp->b_resid = 0; 11113 bp->av_forw = NULL; 11114 bp->av_back = NULL; 11115 bioerror(bp, 0); 11116 11117 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11118 } 11119 11120 11121 /* 11122 * Function: sd_uscsi_strategy 11123 * 11124 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11125 * 11126 * Arguments: bp - buf struct ptr 11127 * 11128 * Return Code: Always returns 0 11129 * 11130 * Context: Kernel thread context 11131 */ 11132 11133 static int 11134 sd_uscsi_strategy(struct buf *bp) 11135 { 11136 struct sd_lun *un; 11137 struct sd_uscsi_info *uip; 11138 struct sd_xbuf *xp; 11139 uchar_t chain_type; 11140 uchar_t cmd; 11141 11142 ASSERT(bp != NULL); 11143 11144 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11145 if (un == NULL) { 11146 bioerror(bp, EIO); 11147 bp->b_resid = bp->b_bcount; 11148 biodone(bp); 11149 return (0); 11150 } 11151 11152 ASSERT(!mutex_owned(SD_MUTEX(un))); 11153 11154 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11155 11156 /* 11157 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11158 */ 11159 ASSERT(bp->b_private != NULL); 11160 uip = (struct sd_uscsi_info *)bp->b_private; 11161 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11162 11163 mutex_enter(SD_MUTEX(un)); 11164 /* 11165 * atapi: Since we are running the CD for now in PIO mode we need to 11166 * call bp_mapin here to avoid bp_mapin called interrupt context under 11167 * the HBA's init_pkt routine. 11168 */ 11169 if (un->un_f_cfg_is_atapi == TRUE) { 11170 mutex_exit(SD_MUTEX(un)); 11171 bp_mapin(bp); 11172 mutex_enter(SD_MUTEX(un)); 11173 } 11174 un->un_ncmds_in_driver++; 11175 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11176 un->un_ncmds_in_driver); 11177 11178 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11179 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11180 un->un_f_sync_cache_required = TRUE; 11181 11182 mutex_exit(SD_MUTEX(un)); 11183 11184 switch (uip->ui_flags) { 11185 case SD_PATH_DIRECT: 11186 chain_type = SD_CHAIN_DIRECT; 11187 break; 11188 case SD_PATH_DIRECT_PRIORITY: 11189 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11190 break; 11191 default: 11192 chain_type = SD_CHAIN_USCSI; 11193 break; 11194 } 11195 11196 /* 11197 * We may allocate extra buf for external USCSI commands. If the 11198 * application asks for bigger than 20-byte sense data via USCSI, 11199 * SCSA layer will allocate 252 bytes sense buf for that command. 11200 */ 11201 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11202 SENSE_LENGTH) { 11203 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11204 MAX_SENSE_LENGTH, KM_SLEEP); 11205 } else { 11206 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11207 } 11208 11209 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11210 11211 /* Use the index obtained within xbuf_init */ 11212 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11213 11214 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11215 11216 return (0); 11217 } 11218 11219 /* 11220 * Function: sd_send_scsi_cmd 11221 * 11222 * Description: Runs a USCSI command for user (when called thru sdioctl), 11223 * or for the driver 11224 * 11225 * Arguments: dev - the dev_t for the device 11226 * incmd - ptr to a valid uscsi_cmd struct 11227 * flag - bit flag, indicating open settings, 32/64 bit type 11228 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11229 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11230 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11231 * to use the USCSI "direct" chain and bypass the normal 11232 * command waitq. 11233 * 11234 * Return Code: 0 - successful completion of the given command 11235 * EIO - scsi_uscsi_handle_command() failed 11236 * ENXIO - soft state not found for specified dev 11237 * EINVAL 11238 * EFAULT - copyin/copyout error 11239 * return code of scsi_uscsi_handle_command(): 11240 * EIO 11241 * ENXIO 11242 * EACCES 11243 * 11244 * Context: Waits for command to complete. Can sleep. 11245 */ 11246 11247 static int 11248 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11249 enum uio_seg dataspace, int path_flag) 11250 { 11251 struct sd_lun *un; 11252 sd_ssc_t *ssc; 11253 int rval; 11254 11255 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11256 if (un == NULL) { 11257 return (ENXIO); 11258 } 11259 11260 /* 11261 * Using sd_ssc_send to handle uscsi cmd 11262 */ 11263 ssc = sd_ssc_init(un); 11264 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11265 sd_ssc_fini(ssc); 11266 11267 return (rval); 11268 } 11269 11270 /* 11271 * Function: sd_ssc_init 11272 * 11273 * Description: Uscsi end-user call this function to initialize necessary 11274 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11275 * 11276 * The return value of sd_send_scsi_cmd will be treated as a 11277 * fault in various conditions. Even it is not Zero, some 11278 * callers may ignore the return value. That is to say, we can 11279 * not make an accurate assessment in sdintr, since if a 11280 * command is failed in sdintr it does not mean the caller of 11281 * sd_send_scsi_cmd will treat it as a real failure. 11282 * 11283 * To avoid printing too many error logs for a failed uscsi 11284 * packet that the caller may not treat it as a failure, the 11285 * sd will keep silent for handling all uscsi commands. 11286 * 11287 * During detach->attach and attach-open, for some types of 11288 * problems, the driver should be providing information about 11289 * the problem encountered. Device use USCSI_SILENT, which 11290 * suppresses all driver information. The result is that no 11291 * information about the problem is available. Being 11292 * completely silent during this time is inappropriate. The 11293 * driver needs a more selective filter than USCSI_SILENT, so 11294 * that information related to faults is provided. 11295 * 11296 * To make the accurate accessment, the caller of 11297 * sd_send_scsi_USCSI_CMD should take the ownership and 11298 * get necessary information to print error messages. 11299 * 11300 * If we want to print necessary info of uscsi command, we need to 11301 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11302 * assessment. We use sd_ssc_init to alloc necessary 11303 * structs for sending an uscsi command and we are also 11304 * responsible for free the memory by calling 11305 * sd_ssc_fini. 11306 * 11307 * The calling secquences will look like: 11308 * sd_ssc_init-> 11309 * 11310 * ... 11311 * 11312 * sd_send_scsi_USCSI_CMD-> 11313 * sd_ssc_send-> - - - sdintr 11314 * ... 11315 * 11316 * if we think the return value should be treated as a 11317 * failure, we make the accessment here and print out 11318 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11319 * 11320 * ... 11321 * 11322 * sd_ssc_fini 11323 * 11324 * 11325 * Arguments: un - pointer to driver soft state (unit) structure for this 11326 * target. 11327 * 11328 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11329 * uscsi_cmd and sd_uscsi_info. 11330 * NULL - if can not alloc memory for sd_ssc_t struct 11331 * 11332 * Context: Kernel Thread. 11333 */ 11334 static sd_ssc_t * 11335 sd_ssc_init(struct sd_lun *un) 11336 { 11337 sd_ssc_t *ssc; 11338 struct uscsi_cmd *ucmdp; 11339 struct sd_uscsi_info *uip; 11340 11341 ASSERT(un != NULL); 11342 ASSERT(!mutex_owned(SD_MUTEX(un))); 11343 11344 /* 11345 * Allocate sd_ssc_t structure 11346 */ 11347 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11348 11349 /* 11350 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11351 */ 11352 ucmdp = scsi_uscsi_alloc(); 11353 11354 /* 11355 * Allocate sd_uscsi_info structure 11356 */ 11357 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11358 11359 ssc->ssc_uscsi_cmd = ucmdp; 11360 ssc->ssc_uscsi_info = uip; 11361 ssc->ssc_un = un; 11362 11363 return (ssc); 11364 } 11365 11366 /* 11367 * Function: sd_ssc_fini 11368 * 11369 * Description: To free sd_ssc_t and it's hanging off 11370 * 11371 * Arguments: ssc - struct pointer of sd_ssc_t. 11372 */ 11373 static void 11374 sd_ssc_fini(sd_ssc_t *ssc) 11375 { 11376 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11377 11378 if (ssc->ssc_uscsi_info != NULL) { 11379 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11380 ssc->ssc_uscsi_info = NULL; 11381 } 11382 11383 kmem_free(ssc, sizeof (sd_ssc_t)); 11384 ssc = NULL; 11385 } 11386 11387 /* 11388 * Function: sd_ssc_send 11389 * 11390 * Description: Runs a USCSI command for user when called through sdioctl, 11391 * or for the driver. 11392 * 11393 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11394 * sd_uscsi_info in. 11395 * incmd - ptr to a valid uscsi_cmd struct 11396 * flag - bit flag, indicating open settings, 32/64 bit type 11397 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11398 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11399 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11400 * to use the USCSI "direct" chain and bypass the normal 11401 * command waitq. 11402 * 11403 * Return Code: 0 - successful completion of the given command 11404 * EIO - scsi_uscsi_handle_command() failed 11405 * ENXIO - soft state not found for specified dev 11406 * EINVAL 11407 * EFAULT - copyin/copyout error 11408 * return code of scsi_uscsi_handle_command(): 11409 * EIO 11410 * ENXIO 11411 * EACCES 11412 * 11413 * Context: Kernel Thread; 11414 * Waits for command to complete. Can sleep. 11415 */ 11416 static int 11417 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11418 enum uio_seg dataspace, int path_flag) 11419 { 11420 struct sd_uscsi_info *uip; 11421 struct uscsi_cmd *uscmd; 11422 struct sd_lun *un; 11423 dev_t dev; 11424 11425 int format = 0; 11426 int rval; 11427 11428 ASSERT(ssc != NULL); 11429 un = ssc->ssc_un; 11430 ASSERT(un != NULL); 11431 uscmd = ssc->ssc_uscsi_cmd; 11432 ASSERT(uscmd != NULL); 11433 ASSERT(!mutex_owned(SD_MUTEX(un))); 11434 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11435 /* 11436 * If enter here, it indicates that the previous uscsi 11437 * command has not been processed by sd_ssc_assessment. 11438 * This is violating our rules of FMA telemetry processing. 11439 * We should print out this message and the last undisposed 11440 * uscsi command. 11441 */ 11442 if (uscmd->uscsi_cdb != NULL) { 11443 SD_INFO(SD_LOG_SDTEST, un, 11444 "sd_ssc_send is missing the alternative " 11445 "sd_ssc_assessment when running command 0x%x.\n", 11446 uscmd->uscsi_cdb[0]); 11447 } 11448 /* 11449 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11450 * the initial status. 11451 */ 11452 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11453 } 11454 11455 /* 11456 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11457 * followed to avoid missing FMA telemetries. 11458 */ 11459 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11460 11461 #ifdef SDDEBUG 11462 switch (dataspace) { 11463 case UIO_USERSPACE: 11464 SD_TRACE(SD_LOG_IO, un, 11465 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11466 break; 11467 case UIO_SYSSPACE: 11468 SD_TRACE(SD_LOG_IO, un, 11469 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11470 break; 11471 default: 11472 SD_TRACE(SD_LOG_IO, un, 11473 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11474 break; 11475 } 11476 #endif 11477 11478 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11479 SD_ADDRESS(un), &uscmd); 11480 if (rval != 0) { 11481 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11482 "scsi_uscsi_alloc_and_copyin failed\n", un); 11483 return (rval); 11484 } 11485 11486 if ((uscmd->uscsi_cdb != NULL) && 11487 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11488 mutex_enter(SD_MUTEX(un)); 11489 un->un_f_format_in_progress = TRUE; 11490 mutex_exit(SD_MUTEX(un)); 11491 format = 1; 11492 } 11493 11494 /* 11495 * Allocate an sd_uscsi_info struct and fill it with the info 11496 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11497 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11498 * since we allocate the buf here in this function, we do not 11499 * need to preserve the prior contents of b_private. 11500 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11501 */ 11502 uip = ssc->ssc_uscsi_info; 11503 uip->ui_flags = path_flag; 11504 uip->ui_cmdp = uscmd; 11505 11506 /* 11507 * Commands sent with priority are intended for error recovery 11508 * situations, and do not have retries performed. 11509 */ 11510 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11511 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11512 } 11513 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11514 11515 dev = SD_GET_DEV(un); 11516 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11517 sd_uscsi_strategy, NULL, uip); 11518 11519 /* 11520 * mark ssc_flags right after handle_cmd to make sure 11521 * the uscsi has been sent 11522 */ 11523 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11524 11525 #ifdef SDDEBUG 11526 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11527 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11528 uscmd->uscsi_status, uscmd->uscsi_resid); 11529 if (uscmd->uscsi_bufaddr != NULL) { 11530 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11531 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11532 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11533 if (dataspace == UIO_SYSSPACE) { 11534 SD_DUMP_MEMORY(un, SD_LOG_IO, 11535 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11536 uscmd->uscsi_buflen, SD_LOG_HEX); 11537 } 11538 } 11539 #endif 11540 11541 if (format == 1) { 11542 mutex_enter(SD_MUTEX(un)); 11543 un->un_f_format_in_progress = FALSE; 11544 mutex_exit(SD_MUTEX(un)); 11545 } 11546 11547 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11548 11549 return (rval); 11550 } 11551 11552 /* 11553 * Function: sd_ssc_print 11554 * 11555 * Description: Print information available to the console. 11556 * 11557 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11558 * sd_uscsi_info in. 11559 * sd_severity - log level. 11560 * Context: Kernel thread or interrupt context. 11561 */ 11562 static void 11563 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11564 { 11565 struct uscsi_cmd *ucmdp; 11566 struct scsi_device *devp; 11567 dev_info_t *devinfo; 11568 uchar_t *sensep; 11569 int senlen; 11570 union scsi_cdb *cdbp; 11571 uchar_t com; 11572 extern struct scsi_key_strings scsi_cmds[]; 11573 11574 ASSERT(ssc != NULL); 11575 11576 ucmdp = ssc->ssc_uscsi_cmd; 11577 devp = SD_SCSI_DEVP(ssc->ssc_un); 11578 devinfo = SD_DEVINFO(ssc->ssc_un); 11579 ASSERT(ucmdp != NULL); 11580 ASSERT(devp != NULL); 11581 ASSERT(devinfo != NULL); 11582 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11583 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11584 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11585 11586 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11587 if (cdbp == NULL) 11588 return; 11589 /* We don't print log if no sense data available. */ 11590 if (senlen == 0) 11591 sensep = NULL; 11592 com = cdbp->scc_cmd; 11593 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11594 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11595 } 11596 11597 /* 11598 * Function: sd_ssc_assessment 11599 * 11600 * Description: We use this function to make an assessment at the point 11601 * where SD driver may encounter a potential error. 11602 * 11603 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11604 * sd_uscsi_info in. 11605 * tp_assess - a hint of strategy for ereport posting. 11606 * Possible values of tp_assess include: 11607 * SD_FMT_IGNORE - we don't post any ereport because we're 11608 * sure that it is ok to ignore the underlying problems. 11609 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11610 * but it might be not correct to ignore the underlying hardware 11611 * error. 11612 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11613 * payload driver-assessment of value "fail" or 11614 * "fatal"(depending on what information we have here). This 11615 * assessment value is usually set when SD driver think there 11616 * is a potential error occurred(Typically, when return value 11617 * of the SCSI command is EIO). 11618 * SD_FMT_STANDARD - we will post an ereport with the payload 11619 * driver-assessment of value "info". This assessment value is 11620 * set when the SCSI command returned successfully and with 11621 * sense data sent back. 11622 * 11623 * Context: Kernel thread. 11624 */ 11625 static void 11626 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11627 { 11628 int senlen = 0; 11629 struct uscsi_cmd *ucmdp = NULL; 11630 struct sd_lun *un; 11631 11632 ASSERT(ssc != NULL); 11633 un = ssc->ssc_un; 11634 ASSERT(un != NULL); 11635 ucmdp = ssc->ssc_uscsi_cmd; 11636 ASSERT(ucmdp != NULL); 11637 11638 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11639 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11640 } else { 11641 /* 11642 * If enter here, it indicates that we have a wrong 11643 * calling sequence of sd_ssc_send and sd_ssc_assessment, 11644 * both of which should be called in a pair in case of 11645 * loss of FMA telemetries. 11646 */ 11647 if (ucmdp->uscsi_cdb != NULL) { 11648 SD_INFO(SD_LOG_SDTEST, un, 11649 "sd_ssc_assessment is missing the " 11650 "alternative sd_ssc_send when running 0x%x, " 11651 "or there are superfluous sd_ssc_assessment for " 11652 "the same sd_ssc_send.\n", 11653 ucmdp->uscsi_cdb[0]); 11654 } 11655 /* 11656 * Set the ssc_flags to the initial value to avoid passing 11657 * down dirty flags to the following sd_ssc_send function. 11658 */ 11659 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11660 return; 11661 } 11662 11663 /* 11664 * We don't handle a non-disk drive(CD-ROM, removable media). 11665 * Clear the ssc_flags before return in case we've set 11666 * SSC_FLAGS_INVALID_DATA which should be skipped for a non-disk 11667 * driver. 11668 */ 11669 if (ISCD(un) || un->un_f_has_removable_media) { 11670 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11671 return; 11672 } 11673 11674 /* 11675 * Only handle an issued command which is waiting for assessment. 11676 * A command which is not issued will not have 11677 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 11678 */ 11679 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11680 sd_ssc_print(ssc, SCSI_ERR_INFO); 11681 return; 11682 } else { 11683 /* 11684 * For an issued command, we should clear this flag in 11685 * order to make the sd_ssc_t structure be used off 11686 * multiple uscsi commands. 11687 */ 11688 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11689 } 11690 11691 /* 11692 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 11693 * commands here. And we should clear the ssc_flags before return. 11694 */ 11695 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11696 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11697 return; 11698 } 11699 11700 switch (tp_assess) { 11701 case SD_FMT_IGNORE: 11702 case SD_FMT_IGNORE_COMPROMISE: 11703 break; 11704 case SD_FMT_STATUS_CHECK: 11705 /* 11706 * For a failed command(including the succeeded command 11707 * with invalid data sent back). 11708 */ 11709 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11710 break; 11711 case SD_FMT_STANDARD: 11712 /* 11713 * Always for the succeeded commands probably with sense 11714 * data sent back. 11715 * Limitation: 11716 * We can only handle a succeeded command with sense 11717 * data sent back when auto-request-sense is enabled. 11718 */ 11719 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11720 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11721 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11722 (un->un_f_arq_enabled == TRUE) && 11723 senlen > 0 && 11724 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11725 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11726 } 11727 break; 11728 default: 11729 /* 11730 * Should not have other type of assessment. 11731 */ 11732 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11733 "sd_ssc_assessment got wrong " 11734 "sd_type_assessment %d.\n", tp_assess); 11735 break; 11736 } 11737 /* 11738 * Clear up the ssc_flags before return. 11739 */ 11740 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11741 } 11742 11743 /* 11744 * Function: sd_ssc_post 11745 * 11746 * Description: 1. read the driver property to get fm-scsi-log flag. 11747 * 2. print log if fm_log_capable is non-zero. 11748 * 3. call sd_ssc_ereport_post to post ereport if possible. 11749 * 11750 * Context: May be called from kernel thread or interrupt context. 11751 */ 11752 static void 11753 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11754 { 11755 struct sd_lun *un; 11756 int fm_scsi_log = 0; 11757 int sd_severity; 11758 11759 ASSERT(ssc != NULL); 11760 un = ssc->ssc_un; 11761 ASSERT(un != NULL); 11762 11763 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 11764 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 11765 11766 if (fm_scsi_log != 0) { 11767 switch (sd_assess) { 11768 case SD_FM_DRV_FATAL: 11769 sd_severity = SCSI_ERR_FATAL; 11770 break; 11771 case SD_FM_DRV_RECOVERY: 11772 sd_severity = SCSI_ERR_RECOVERED; 11773 break; 11774 case SD_FM_DRV_RETRY: 11775 sd_severity = SCSI_ERR_RETRYABLE; 11776 break; 11777 case SD_FM_DRV_NOTICE: 11778 sd_severity = SCSI_ERR_INFO; 11779 break; 11780 default: 11781 sd_severity = SCSI_ERR_UNKNOWN; 11782 } 11783 /* print log */ 11784 sd_ssc_print(ssc, sd_severity); 11785 } 11786 11787 /* always post ereport */ 11788 sd_ssc_ereport_post(ssc, sd_assess); 11789 } 11790 11791 /* 11792 * Function: sd_ssc_set_info 11793 * 11794 * Description: Mark ssc_flags and set ssc_info which would be the 11795 * payload of uderr ereport. This function will cause 11796 * sd_ssc_ereport_post to post uderr ereport only. 11797 * 11798 * Context: Kernel thread or interrupt context 11799 */ 11800 static void 11801 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, const char *fmt, ...) 11802 { 11803 va_list ap; 11804 11805 ASSERT(ssc != NULL); 11806 11807 ssc->ssc_flags |= ssc_flags; 11808 va_start(ap, fmt); 11809 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11810 va_end(ap); 11811 } 11812 11813 /* 11814 * Function: sd_buf_iodone 11815 * 11816 * Description: Frees the sd_xbuf & returns the buf to its originator. 11817 * 11818 * Context: May be called from interrupt context. 11819 */ 11820 /* ARGSUSED */ 11821 static void 11822 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11823 { 11824 struct sd_xbuf *xp; 11825 11826 ASSERT(un != NULL); 11827 ASSERT(bp != NULL); 11828 ASSERT(!mutex_owned(SD_MUTEX(un))); 11829 11830 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11831 11832 xp = SD_GET_XBUF(bp); 11833 ASSERT(xp != NULL); 11834 11835 mutex_enter(SD_MUTEX(un)); 11836 11837 /* 11838 * Grab time when the cmd completed. 11839 * This is used for determining if the system has been 11840 * idle long enough to make it idle to the PM framework. 11841 * This is for lowering the overhead, and therefore improving 11842 * performance per I/O operation. 11843 */ 11844 un->un_pm_idle_time = ddi_get_time(); 11845 11846 un->un_ncmds_in_driver--; 11847 ASSERT(un->un_ncmds_in_driver >= 0); 11848 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11849 un->un_ncmds_in_driver); 11850 11851 mutex_exit(SD_MUTEX(un)); 11852 11853 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11854 biodone(bp); /* bp is gone after this */ 11855 11856 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11857 } 11858 11859 11860 /* 11861 * Function: sd_uscsi_iodone 11862 * 11863 * Description: Frees the sd_xbuf & returns the buf to its originator. 11864 * 11865 * Context: May be called from interrupt context. 11866 */ 11867 /* ARGSUSED */ 11868 static void 11869 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11870 { 11871 struct sd_xbuf *xp; 11872 11873 ASSERT(un != NULL); 11874 ASSERT(bp != NULL); 11875 11876 xp = SD_GET_XBUF(bp); 11877 ASSERT(xp != NULL); 11878 ASSERT(!mutex_owned(SD_MUTEX(un))); 11879 11880 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11881 11882 bp->b_private = xp->xb_private; 11883 11884 mutex_enter(SD_MUTEX(un)); 11885 11886 /* 11887 * Grab time when the cmd completed. 11888 * This is used for determining if the system has been 11889 * idle long enough to make it idle to the PM framework. 11890 * This is for lowering the overhead, and therefore improving 11891 * performance per I/O operation. 11892 */ 11893 un->un_pm_idle_time = ddi_get_time(); 11894 11895 un->un_ncmds_in_driver--; 11896 ASSERT(un->un_ncmds_in_driver >= 0); 11897 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11898 un->un_ncmds_in_driver); 11899 11900 mutex_exit(SD_MUTEX(un)); 11901 11902 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11903 SENSE_LENGTH) { 11904 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11905 MAX_SENSE_LENGTH); 11906 } else { 11907 kmem_free(xp, sizeof (struct sd_xbuf)); 11908 } 11909 11910 biodone(bp); 11911 11912 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11913 } 11914 11915 11916 /* 11917 * Function: sd_mapblockaddr_iostart 11918 * 11919 * Description: Verify request lies within the partition limits for 11920 * the indicated minor device. Issue "overrun" buf if 11921 * request would exceed partition range. Converts 11922 * partition-relative block address to absolute. 11923 * 11924 * Context: Can sleep 11925 * 11926 * Issues: This follows what the old code did, in terms of accessing 11927 * some of the partition info in the unit struct without holding 11928 * the mutext. This is a general issue, if the partition info 11929 * can be altered while IO is in progress... as soon as we send 11930 * a buf, its partitioning can be invalid before it gets to the 11931 * device. Probably the right fix is to move partitioning out 11932 * of the driver entirely. 11933 */ 11934 11935 static void 11936 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11937 { 11938 diskaddr_t nblocks; /* #blocks in the given partition */ 11939 daddr_t blocknum; /* Block number specified by the buf */ 11940 size_t requested_nblocks; 11941 size_t available_nblocks; 11942 int partition; 11943 diskaddr_t partition_offset; 11944 struct sd_xbuf *xp; 11945 11946 ASSERT(un != NULL); 11947 ASSERT(bp != NULL); 11948 ASSERT(!mutex_owned(SD_MUTEX(un))); 11949 11950 SD_TRACE(SD_LOG_IO_PARTITION, un, 11951 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11952 11953 xp = SD_GET_XBUF(bp); 11954 ASSERT(xp != NULL); 11955 11956 /* 11957 * If the geometry is not indicated as valid, attempt to access 11958 * the unit & verify the geometry/label. This can be the case for 11959 * removable-media devices, of if the device was opened in 11960 * NDELAY/NONBLOCK mode. 11961 */ 11962 partition = SDPART(bp->b_edev); 11963 11964 if (!SD_IS_VALID_LABEL(un)) { 11965 sd_ssc_t *ssc; 11966 /* 11967 * Initialize sd_ssc_t for internal uscsi commands 11968 * In case of potential porformance issue, we need 11969 * to alloc memory only if there is invalid label 11970 */ 11971 ssc = sd_ssc_init(un); 11972 11973 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 11974 /* 11975 * For removable devices it is possible to start an 11976 * I/O without a media by opening the device in nodelay 11977 * mode. Also for writable CDs there can be many 11978 * scenarios where there is no geometry yet but volume 11979 * manager is trying to issue a read() just because 11980 * it can see TOC on the CD. So do not print a message 11981 * for removables. 11982 */ 11983 if (!un->un_f_has_removable_media) { 11984 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11985 "i/o to invalid geometry\n"); 11986 } 11987 bioerror(bp, EIO); 11988 bp->b_resid = bp->b_bcount; 11989 SD_BEGIN_IODONE(index, un, bp); 11990 11991 sd_ssc_fini(ssc); 11992 return; 11993 } 11994 sd_ssc_fini(ssc); 11995 } 11996 11997 nblocks = 0; 11998 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11999 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12000 12001 /* 12002 * blocknum is the starting block number of the request. At this 12003 * point it is still relative to the start of the minor device. 12004 */ 12005 blocknum = xp->xb_blkno; 12006 12007 /* 12008 * Legacy: If the starting block number is one past the last block 12009 * in the partition, do not set B_ERROR in the buf. 12010 */ 12011 if (blocknum == nblocks) { 12012 goto error_exit; 12013 } 12014 12015 /* 12016 * Confirm that the first block of the request lies within the 12017 * partition limits. Also the requested number of bytes must be 12018 * a multiple of the system block size. 12019 */ 12020 if ((blocknum < 0) || (blocknum >= nblocks) || 12021 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 12022 bp->b_flags |= B_ERROR; 12023 goto error_exit; 12024 } 12025 12026 /* 12027 * If the requsted # blocks exceeds the available # blocks, that 12028 * is an overrun of the partition. 12029 */ 12030 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 12031 available_nblocks = (size_t)(nblocks - blocknum); 12032 ASSERT(nblocks >= blocknum); 12033 12034 if (requested_nblocks > available_nblocks) { 12035 /* 12036 * Allocate an "overrun" buf to allow the request to proceed 12037 * for the amount of space available in the partition. The 12038 * amount not transferred will be added into the b_resid 12039 * when the operation is complete. The overrun buf 12040 * replaces the original buf here, and the original buf 12041 * is saved inside the overrun buf, for later use. 12042 */ 12043 size_t resid = SD_SYSBLOCKS2BYTES(un, 12044 (offset_t)(requested_nblocks - available_nblocks)); 12045 size_t count = bp->b_bcount - resid; 12046 /* 12047 * Note: count is an unsigned entity thus it'll NEVER 12048 * be less than 0 so ASSERT the original values are 12049 * correct. 12050 */ 12051 ASSERT(bp->b_bcount >= resid); 12052 12053 bp = sd_bioclone_alloc(bp, count, blocknum, 12054 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12055 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12056 ASSERT(xp != NULL); 12057 } 12058 12059 /* At this point there should be no residual for this buf. */ 12060 ASSERT(bp->b_resid == 0); 12061 12062 /* Convert the block number to an absolute address. */ 12063 xp->xb_blkno += partition_offset; 12064 12065 SD_NEXT_IOSTART(index, un, bp); 12066 12067 SD_TRACE(SD_LOG_IO_PARTITION, un, 12068 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12069 12070 return; 12071 12072 error_exit: 12073 bp->b_resid = bp->b_bcount; 12074 SD_BEGIN_IODONE(index, un, bp); 12075 SD_TRACE(SD_LOG_IO_PARTITION, un, 12076 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12077 } 12078 12079 12080 /* 12081 * Function: sd_mapblockaddr_iodone 12082 * 12083 * Description: Completion-side processing for partition management. 12084 * 12085 * Context: May be called under interrupt context 12086 */ 12087 12088 static void 12089 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12090 { 12091 /* int partition; */ /* Not used, see below. */ 12092 ASSERT(un != NULL); 12093 ASSERT(bp != NULL); 12094 ASSERT(!mutex_owned(SD_MUTEX(un))); 12095 12096 SD_TRACE(SD_LOG_IO_PARTITION, un, 12097 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12098 12099 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12100 /* 12101 * We have an "overrun" buf to deal with... 12102 */ 12103 struct sd_xbuf *xp; 12104 struct buf *obp; /* ptr to the original buf */ 12105 12106 xp = SD_GET_XBUF(bp); 12107 ASSERT(xp != NULL); 12108 12109 /* Retrieve the pointer to the original buf */ 12110 obp = (struct buf *)xp->xb_private; 12111 ASSERT(obp != NULL); 12112 12113 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12114 bioerror(obp, bp->b_error); 12115 12116 sd_bioclone_free(bp); 12117 12118 /* 12119 * Get back the original buf. 12120 * Note that since the restoration of xb_blkno below 12121 * was removed, the sd_xbuf is not needed. 12122 */ 12123 bp = obp; 12124 /* 12125 * xp = SD_GET_XBUF(bp); 12126 * ASSERT(xp != NULL); 12127 */ 12128 } 12129 12130 /* 12131 * Convert sd->xb_blkno back to a minor-device relative value. 12132 * Note: this has been commented out, as it is not needed in the 12133 * current implementation of the driver (ie, since this function 12134 * is at the top of the layering chains, so the info will be 12135 * discarded) and it is in the "hot" IO path. 12136 * 12137 * partition = getminor(bp->b_edev) & SDPART_MASK; 12138 * xp->xb_blkno -= un->un_offset[partition]; 12139 */ 12140 12141 SD_NEXT_IODONE(index, un, bp); 12142 12143 SD_TRACE(SD_LOG_IO_PARTITION, un, 12144 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12145 } 12146 12147 12148 /* 12149 * Function: sd_mapblocksize_iostart 12150 * 12151 * Description: Convert between system block size (un->un_sys_blocksize) 12152 * and target block size (un->un_tgt_blocksize). 12153 * 12154 * Context: Can sleep to allocate resources. 12155 * 12156 * Assumptions: A higher layer has already performed any partition validation, 12157 * and converted the xp->xb_blkno to an absolute value relative 12158 * to the start of the device. 12159 * 12160 * It is also assumed that the higher layer has implemented 12161 * an "overrun" mechanism for the case where the request would 12162 * read/write beyond the end of a partition. In this case we 12163 * assume (and ASSERT) that bp->b_resid == 0. 12164 * 12165 * Note: The implementation for this routine assumes the target 12166 * block size remains constant between allocation and transport. 12167 */ 12168 12169 static void 12170 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12171 { 12172 struct sd_mapblocksize_info *bsp; 12173 struct sd_xbuf *xp; 12174 offset_t first_byte; 12175 daddr_t start_block, end_block; 12176 daddr_t request_bytes; 12177 ushort_t is_aligned = FALSE; 12178 12179 ASSERT(un != NULL); 12180 ASSERT(bp != NULL); 12181 ASSERT(!mutex_owned(SD_MUTEX(un))); 12182 ASSERT(bp->b_resid == 0); 12183 12184 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12185 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12186 12187 /* 12188 * For a non-writable CD, a write request is an error 12189 */ 12190 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12191 (un->un_f_mmc_writable_media == FALSE)) { 12192 bioerror(bp, EIO); 12193 bp->b_resid = bp->b_bcount; 12194 SD_BEGIN_IODONE(index, un, bp); 12195 return; 12196 } 12197 12198 /* 12199 * We do not need a shadow buf if the device is using 12200 * un->un_sys_blocksize as its block size or if bcount == 0. 12201 * In this case there is no layer-private data block allocated. 12202 */ 12203 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12204 (bp->b_bcount == 0)) { 12205 goto done; 12206 } 12207 12208 #if defined(__i386) || defined(__amd64) 12209 /* We do not support non-block-aligned transfers for ROD devices */ 12210 ASSERT(!ISROD(un)); 12211 #endif 12212 12213 xp = SD_GET_XBUF(bp); 12214 ASSERT(xp != NULL); 12215 12216 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12217 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12218 un->un_tgt_blocksize, un->un_sys_blocksize); 12219 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12220 "request start block:0x%x\n", xp->xb_blkno); 12221 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12222 "request len:0x%x\n", bp->b_bcount); 12223 12224 /* 12225 * Allocate the layer-private data area for the mapblocksize layer. 12226 * Layers are allowed to use the xp_private member of the sd_xbuf 12227 * struct to store the pointer to their layer-private data block, but 12228 * each layer also has the responsibility of restoring the prior 12229 * contents of xb_private before returning the buf/xbuf to the 12230 * higher layer that sent it. 12231 * 12232 * Here we save the prior contents of xp->xb_private into the 12233 * bsp->mbs_oprivate field of our layer-private data area. This value 12234 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12235 * the layer-private area and returning the buf/xbuf to the layer 12236 * that sent it. 12237 * 12238 * Note that here we use kmem_zalloc for the allocation as there are 12239 * parts of the mapblocksize code that expect certain fields to be 12240 * zero unless explicitly set to a required value. 12241 */ 12242 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12243 bsp->mbs_oprivate = xp->xb_private; 12244 xp->xb_private = bsp; 12245 12246 /* 12247 * This treats the data on the disk (target) as an array of bytes. 12248 * first_byte is the byte offset, from the beginning of the device, 12249 * to the location of the request. This is converted from a 12250 * un->un_sys_blocksize block address to a byte offset, and then back 12251 * to a block address based upon a un->un_tgt_blocksize block size. 12252 * 12253 * xp->xb_blkno should be absolute upon entry into this function, 12254 * but, but it is based upon partitions that use the "system" 12255 * block size. It must be adjusted to reflect the block size of 12256 * the target. 12257 * 12258 * Note that end_block is actually the block that follows the last 12259 * block of the request, but that's what is needed for the computation. 12260 */ 12261 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12262 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12263 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12264 un->un_tgt_blocksize; 12265 12266 /* request_bytes is rounded up to a multiple of the target block size */ 12267 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12268 12269 /* 12270 * See if the starting address of the request and the request 12271 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12272 * then we do not need to allocate a shadow buf to handle the request. 12273 */ 12274 if (((first_byte % un->un_tgt_blocksize) == 0) && 12275 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12276 is_aligned = TRUE; 12277 } 12278 12279 if ((bp->b_flags & B_READ) == 0) { 12280 /* 12281 * Lock the range for a write operation. An aligned request is 12282 * considered a simple write; otherwise the request must be a 12283 * read-modify-write. 12284 */ 12285 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12286 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12287 } 12288 12289 /* 12290 * Alloc a shadow buf if the request is not aligned. Also, this is 12291 * where the READ command is generated for a read-modify-write. (The 12292 * write phase is deferred until after the read completes.) 12293 */ 12294 if (is_aligned == FALSE) { 12295 12296 struct sd_mapblocksize_info *shadow_bsp; 12297 struct sd_xbuf *shadow_xp; 12298 struct buf *shadow_bp; 12299 12300 /* 12301 * Allocate the shadow buf and it associated xbuf. Note that 12302 * after this call the xb_blkno value in both the original 12303 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12304 * same: absolute relative to the start of the device, and 12305 * adjusted for the target block size. The b_blkno in the 12306 * shadow buf will also be set to this value. We should never 12307 * change b_blkno in the original bp however. 12308 * 12309 * Note also that the shadow buf will always need to be a 12310 * READ command, regardless of whether the incoming command 12311 * is a READ or a WRITE. 12312 */ 12313 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12314 xp->xb_blkno, 12315 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12316 12317 shadow_xp = SD_GET_XBUF(shadow_bp); 12318 12319 /* 12320 * Allocate the layer-private data for the shadow buf. 12321 * (No need to preserve xb_private in the shadow xbuf.) 12322 */ 12323 shadow_xp->xb_private = shadow_bsp = 12324 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12325 12326 /* 12327 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12328 * to figure out where the start of the user data is (based upon 12329 * the system block size) in the data returned by the READ 12330 * command (which will be based upon the target blocksize). Note 12331 * that this is only really used if the request is unaligned. 12332 */ 12333 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12334 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12335 ASSERT((bsp->mbs_copy_offset >= 0) && 12336 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12337 12338 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12339 12340 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12341 12342 /* Transfer the wmap (if any) to the shadow buf */ 12343 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12344 bsp->mbs_wmp = NULL; 12345 12346 /* 12347 * The shadow buf goes on from here in place of the 12348 * original buf. 12349 */ 12350 shadow_bsp->mbs_orig_bp = bp; 12351 bp = shadow_bp; 12352 } 12353 12354 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12355 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12356 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12357 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12358 request_bytes); 12359 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12360 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12361 12362 done: 12363 SD_NEXT_IOSTART(index, un, bp); 12364 12365 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12366 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12367 } 12368 12369 12370 /* 12371 * Function: sd_mapblocksize_iodone 12372 * 12373 * Description: Completion side processing for block-size mapping. 12374 * 12375 * Context: May be called under interrupt context 12376 */ 12377 12378 static void 12379 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12380 { 12381 struct sd_mapblocksize_info *bsp; 12382 struct sd_xbuf *xp; 12383 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12384 struct buf *orig_bp; /* ptr to the original buf */ 12385 offset_t shadow_end; 12386 offset_t request_end; 12387 offset_t shadow_start; 12388 ssize_t copy_offset; 12389 size_t copy_length; 12390 size_t shortfall; 12391 uint_t is_write; /* TRUE if this bp is a WRITE */ 12392 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12393 12394 ASSERT(un != NULL); 12395 ASSERT(bp != NULL); 12396 12397 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12398 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12399 12400 /* 12401 * There is no shadow buf or layer-private data if the target is 12402 * using un->un_sys_blocksize as its block size or if bcount == 0. 12403 */ 12404 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12405 (bp->b_bcount == 0)) { 12406 goto exit; 12407 } 12408 12409 xp = SD_GET_XBUF(bp); 12410 ASSERT(xp != NULL); 12411 12412 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12413 bsp = xp->xb_private; 12414 12415 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12416 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12417 12418 if (is_write) { 12419 /* 12420 * For a WRITE request we must free up the block range that 12421 * we have locked up. This holds regardless of whether this is 12422 * an aligned write request or a read-modify-write request. 12423 */ 12424 sd_range_unlock(un, bsp->mbs_wmp); 12425 bsp->mbs_wmp = NULL; 12426 } 12427 12428 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12429 /* 12430 * An aligned read or write command will have no shadow buf; 12431 * there is not much else to do with it. 12432 */ 12433 goto done; 12434 } 12435 12436 orig_bp = bsp->mbs_orig_bp; 12437 ASSERT(orig_bp != NULL); 12438 orig_xp = SD_GET_XBUF(orig_bp); 12439 ASSERT(orig_xp != NULL); 12440 ASSERT(!mutex_owned(SD_MUTEX(un))); 12441 12442 if (!is_write && has_wmap) { 12443 /* 12444 * A READ with a wmap means this is the READ phase of a 12445 * read-modify-write. If an error occurred on the READ then 12446 * we do not proceed with the WRITE phase or copy any data. 12447 * Just release the write maps and return with an error. 12448 */ 12449 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12450 orig_bp->b_resid = orig_bp->b_bcount; 12451 bioerror(orig_bp, bp->b_error); 12452 sd_range_unlock(un, bsp->mbs_wmp); 12453 goto freebuf_done; 12454 } 12455 } 12456 12457 /* 12458 * Here is where we set up to copy the data from the shadow buf 12459 * into the space associated with the original buf. 12460 * 12461 * To deal with the conversion between block sizes, these 12462 * computations treat the data as an array of bytes, with the 12463 * first byte (byte 0) corresponding to the first byte in the 12464 * first block on the disk. 12465 */ 12466 12467 /* 12468 * shadow_start and shadow_len indicate the location and size of 12469 * the data returned with the shadow IO request. 12470 */ 12471 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12472 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12473 12474 /* 12475 * copy_offset gives the offset (in bytes) from the start of the first 12476 * block of the READ request to the beginning of the data. We retrieve 12477 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12478 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12479 * data to be copied (in bytes). 12480 */ 12481 copy_offset = bsp->mbs_copy_offset; 12482 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12483 copy_length = orig_bp->b_bcount; 12484 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12485 12486 /* 12487 * Set up the resid and error fields of orig_bp as appropriate. 12488 */ 12489 if (shadow_end >= request_end) { 12490 /* We got all the requested data; set resid to zero */ 12491 orig_bp->b_resid = 0; 12492 } else { 12493 /* 12494 * We failed to get enough data to fully satisfy the original 12495 * request. Just copy back whatever data we got and set 12496 * up the residual and error code as required. 12497 * 12498 * 'shortfall' is the amount by which the data received with the 12499 * shadow buf has "fallen short" of the requested amount. 12500 */ 12501 shortfall = (size_t)(request_end - shadow_end); 12502 12503 if (shortfall > orig_bp->b_bcount) { 12504 /* 12505 * We did not get enough data to even partially 12506 * fulfill the original request. The residual is 12507 * equal to the amount requested. 12508 */ 12509 orig_bp->b_resid = orig_bp->b_bcount; 12510 } else { 12511 /* 12512 * We did not get all the data that we requested 12513 * from the device, but we will try to return what 12514 * portion we did get. 12515 */ 12516 orig_bp->b_resid = shortfall; 12517 } 12518 ASSERT(copy_length >= orig_bp->b_resid); 12519 copy_length -= orig_bp->b_resid; 12520 } 12521 12522 /* Propagate the error code from the shadow buf to the original buf */ 12523 bioerror(orig_bp, bp->b_error); 12524 12525 if (is_write) { 12526 goto freebuf_done; /* No data copying for a WRITE */ 12527 } 12528 12529 if (has_wmap) { 12530 /* 12531 * This is a READ command from the READ phase of a 12532 * read-modify-write request. We have to copy the data given 12533 * by the user OVER the data returned by the READ command, 12534 * then convert the command from a READ to a WRITE and send 12535 * it back to the target. 12536 */ 12537 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12538 copy_length); 12539 12540 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12541 12542 /* 12543 * Dispatch the WRITE command to the taskq thread, which 12544 * will in turn send the command to the target. When the 12545 * WRITE command completes, we (sd_mapblocksize_iodone()) 12546 * will get called again as part of the iodone chain 12547 * processing for it. Note that we will still be dealing 12548 * with the shadow buf at that point. 12549 */ 12550 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12551 KM_NOSLEEP) != 0) { 12552 /* 12553 * Dispatch was successful so we are done. Return 12554 * without going any higher up the iodone chain. Do 12555 * not free up any layer-private data until after the 12556 * WRITE completes. 12557 */ 12558 return; 12559 } 12560 12561 /* 12562 * Dispatch of the WRITE command failed; set up the error 12563 * condition and send this IO back up the iodone chain. 12564 */ 12565 bioerror(orig_bp, EIO); 12566 orig_bp->b_resid = orig_bp->b_bcount; 12567 12568 } else { 12569 /* 12570 * This is a regular READ request (ie, not a RMW). Copy the 12571 * data from the shadow buf into the original buf. The 12572 * copy_offset compensates for any "misalignment" between the 12573 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12574 * original buf (with its un->un_sys_blocksize blocks). 12575 */ 12576 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12577 copy_length); 12578 } 12579 12580 freebuf_done: 12581 12582 /* 12583 * At this point we still have both the shadow buf AND the original 12584 * buf to deal with, as well as the layer-private data area in each. 12585 * Local variables are as follows: 12586 * 12587 * bp -- points to shadow buf 12588 * xp -- points to xbuf of shadow buf 12589 * bsp -- points to layer-private data area of shadow buf 12590 * orig_bp -- points to original buf 12591 * 12592 * First free the shadow buf and its associated xbuf, then free the 12593 * layer-private data area from the shadow buf. There is no need to 12594 * restore xb_private in the shadow xbuf. 12595 */ 12596 sd_shadow_buf_free(bp); 12597 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12598 12599 /* 12600 * Now update the local variables to point to the original buf, xbuf, 12601 * and layer-private area. 12602 */ 12603 bp = orig_bp; 12604 xp = SD_GET_XBUF(bp); 12605 ASSERT(xp != NULL); 12606 ASSERT(xp == orig_xp); 12607 bsp = xp->xb_private; 12608 ASSERT(bsp != NULL); 12609 12610 done: 12611 /* 12612 * Restore xb_private to whatever it was set to by the next higher 12613 * layer in the chain, then free the layer-private data area. 12614 */ 12615 xp->xb_private = bsp->mbs_oprivate; 12616 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12617 12618 exit: 12619 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12620 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12621 12622 SD_NEXT_IODONE(index, un, bp); 12623 } 12624 12625 12626 /* 12627 * Function: sd_checksum_iostart 12628 * 12629 * Description: A stub function for a layer that's currently not used. 12630 * For now just a placeholder. 12631 * 12632 * Context: Kernel thread context 12633 */ 12634 12635 static void 12636 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12637 { 12638 ASSERT(un != NULL); 12639 ASSERT(bp != NULL); 12640 ASSERT(!mutex_owned(SD_MUTEX(un))); 12641 SD_NEXT_IOSTART(index, un, bp); 12642 } 12643 12644 12645 /* 12646 * Function: sd_checksum_iodone 12647 * 12648 * Description: A stub function for a layer that's currently not used. 12649 * For now just a placeholder. 12650 * 12651 * Context: May be called under interrupt context 12652 */ 12653 12654 static void 12655 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12656 { 12657 ASSERT(un != NULL); 12658 ASSERT(bp != NULL); 12659 ASSERT(!mutex_owned(SD_MUTEX(un))); 12660 SD_NEXT_IODONE(index, un, bp); 12661 } 12662 12663 12664 /* 12665 * Function: sd_checksum_uscsi_iostart 12666 * 12667 * Description: A stub function for a layer that's currently not used. 12668 * For now just a placeholder. 12669 * 12670 * Context: Kernel thread context 12671 */ 12672 12673 static void 12674 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12675 { 12676 ASSERT(un != NULL); 12677 ASSERT(bp != NULL); 12678 ASSERT(!mutex_owned(SD_MUTEX(un))); 12679 SD_NEXT_IOSTART(index, un, bp); 12680 } 12681 12682 12683 /* 12684 * Function: sd_checksum_uscsi_iodone 12685 * 12686 * Description: A stub function for a layer that's currently not used. 12687 * For now just a placeholder. 12688 * 12689 * Context: May be called under interrupt context 12690 */ 12691 12692 static void 12693 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12694 { 12695 ASSERT(un != NULL); 12696 ASSERT(bp != NULL); 12697 ASSERT(!mutex_owned(SD_MUTEX(un))); 12698 SD_NEXT_IODONE(index, un, bp); 12699 } 12700 12701 12702 /* 12703 * Function: sd_pm_iostart 12704 * 12705 * Description: iostart-side routine for Power mangement. 12706 * 12707 * Context: Kernel thread context 12708 */ 12709 12710 static void 12711 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12712 { 12713 ASSERT(un != NULL); 12714 ASSERT(bp != NULL); 12715 ASSERT(!mutex_owned(SD_MUTEX(un))); 12716 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12717 12718 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12719 12720 if (sd_pm_entry(un) != DDI_SUCCESS) { 12721 /* 12722 * Set up to return the failed buf back up the 'iodone' 12723 * side of the calling chain. 12724 */ 12725 bioerror(bp, EIO); 12726 bp->b_resid = bp->b_bcount; 12727 12728 SD_BEGIN_IODONE(index, un, bp); 12729 12730 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12731 return; 12732 } 12733 12734 SD_NEXT_IOSTART(index, un, bp); 12735 12736 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12737 } 12738 12739 12740 /* 12741 * Function: sd_pm_iodone 12742 * 12743 * Description: iodone-side routine for power mangement. 12744 * 12745 * Context: may be called from interrupt context 12746 */ 12747 12748 static void 12749 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12750 { 12751 ASSERT(un != NULL); 12752 ASSERT(bp != NULL); 12753 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12754 12755 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12756 12757 /* 12758 * After attach the following flag is only read, so don't 12759 * take the penalty of acquiring a mutex for it. 12760 */ 12761 if (un->un_f_pm_is_enabled == TRUE) { 12762 sd_pm_exit(un); 12763 } 12764 12765 SD_NEXT_IODONE(index, un, bp); 12766 12767 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12768 } 12769 12770 12771 /* 12772 * Function: sd_core_iostart 12773 * 12774 * Description: Primary driver function for enqueuing buf(9S) structs from 12775 * the system and initiating IO to the target device 12776 * 12777 * Context: Kernel thread context. Can sleep. 12778 * 12779 * Assumptions: - The given xp->xb_blkno is absolute 12780 * (ie, relative to the start of the device). 12781 * - The IO is to be done using the native blocksize of 12782 * the device, as specified in un->un_tgt_blocksize. 12783 */ 12784 /* ARGSUSED */ 12785 static void 12786 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12787 { 12788 struct sd_xbuf *xp; 12789 12790 ASSERT(un != NULL); 12791 ASSERT(bp != NULL); 12792 ASSERT(!mutex_owned(SD_MUTEX(un))); 12793 ASSERT(bp->b_resid == 0); 12794 12795 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12796 12797 xp = SD_GET_XBUF(bp); 12798 ASSERT(xp != NULL); 12799 12800 mutex_enter(SD_MUTEX(un)); 12801 12802 /* 12803 * If we are currently in the failfast state, fail any new IO 12804 * that has B_FAILFAST set, then return. 12805 */ 12806 if ((bp->b_flags & B_FAILFAST) && 12807 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12808 mutex_exit(SD_MUTEX(un)); 12809 bioerror(bp, EIO); 12810 bp->b_resid = bp->b_bcount; 12811 SD_BEGIN_IODONE(index, un, bp); 12812 return; 12813 } 12814 12815 if (SD_IS_DIRECT_PRIORITY(xp)) { 12816 /* 12817 * Priority command -- transport it immediately. 12818 * 12819 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12820 * because all direct priority commands should be associated 12821 * with error recovery actions which we don't want to retry. 12822 */ 12823 sd_start_cmds(un, bp); 12824 } else { 12825 /* 12826 * Normal command -- add it to the wait queue, then start 12827 * transporting commands from the wait queue. 12828 */ 12829 sd_add_buf_to_waitq(un, bp); 12830 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12831 sd_start_cmds(un, NULL); 12832 } 12833 12834 mutex_exit(SD_MUTEX(un)); 12835 12836 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12837 } 12838 12839 12840 /* 12841 * Function: sd_init_cdb_limits 12842 * 12843 * Description: This is to handle scsi_pkt initialization differences 12844 * between the driver platforms. 12845 * 12846 * Legacy behaviors: 12847 * 12848 * If the block number or the sector count exceeds the 12849 * capabilities of a Group 0 command, shift over to a 12850 * Group 1 command. We don't blindly use Group 1 12851 * commands because a) some drives (CDC Wren IVs) get a 12852 * bit confused, and b) there is probably a fair amount 12853 * of speed difference for a target to receive and decode 12854 * a 10 byte command instead of a 6 byte command. 12855 * 12856 * The xfer time difference of 6 vs 10 byte CDBs is 12857 * still significant so this code is still worthwhile. 12858 * 10 byte CDBs are very inefficient with the fas HBA driver 12859 * and older disks. Each CDB byte took 1 usec with some 12860 * popular disks. 12861 * 12862 * Context: Must be called at attach time 12863 */ 12864 12865 static void 12866 sd_init_cdb_limits(struct sd_lun *un) 12867 { 12868 int hba_cdb_limit; 12869 12870 /* 12871 * Use CDB_GROUP1 commands for most devices except for 12872 * parallel SCSI fixed drives in which case we get better 12873 * performance using CDB_GROUP0 commands (where applicable). 12874 */ 12875 un->un_mincdb = SD_CDB_GROUP1; 12876 #if !defined(__fibre) 12877 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12878 !un->un_f_has_removable_media) { 12879 un->un_mincdb = SD_CDB_GROUP0; 12880 } 12881 #endif 12882 12883 /* 12884 * Try to read the max-cdb-length supported by HBA. 12885 */ 12886 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 12887 if (0 >= un->un_max_hba_cdb) { 12888 un->un_max_hba_cdb = CDB_GROUP4; 12889 hba_cdb_limit = SD_CDB_GROUP4; 12890 } else if (0 < un->un_max_hba_cdb && 12891 un->un_max_hba_cdb < CDB_GROUP1) { 12892 hba_cdb_limit = SD_CDB_GROUP0; 12893 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 12894 un->un_max_hba_cdb < CDB_GROUP5) { 12895 hba_cdb_limit = SD_CDB_GROUP1; 12896 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 12897 un->un_max_hba_cdb < CDB_GROUP4) { 12898 hba_cdb_limit = SD_CDB_GROUP5; 12899 } else { 12900 hba_cdb_limit = SD_CDB_GROUP4; 12901 } 12902 12903 /* 12904 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12905 * commands for fixed disks unless we are building for a 32 bit 12906 * kernel. 12907 */ 12908 #ifdef _LP64 12909 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12910 min(hba_cdb_limit, SD_CDB_GROUP4); 12911 #else 12912 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12913 min(hba_cdb_limit, SD_CDB_GROUP1); 12914 #endif 12915 12916 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12917 ? sizeof (struct scsi_arq_status) : 1); 12918 un->un_cmd_timeout = (ushort_t)sd_io_time; 12919 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12920 } 12921 12922 12923 /* 12924 * Function: sd_initpkt_for_buf 12925 * 12926 * Description: Allocate and initialize for transport a scsi_pkt struct, 12927 * based upon the info specified in the given buf struct. 12928 * 12929 * Assumes the xb_blkno in the request is absolute (ie, 12930 * relative to the start of the device (NOT partition!). 12931 * Also assumes that the request is using the native block 12932 * size of the device (as returned by the READ CAPACITY 12933 * command). 12934 * 12935 * Return Code: SD_PKT_ALLOC_SUCCESS 12936 * SD_PKT_ALLOC_FAILURE 12937 * SD_PKT_ALLOC_FAILURE_NO_DMA 12938 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12939 * 12940 * Context: Kernel thread and may be called from software interrupt context 12941 * as part of a sdrunout callback. This function may not block or 12942 * call routines that block 12943 */ 12944 12945 static int 12946 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12947 { 12948 struct sd_xbuf *xp; 12949 struct scsi_pkt *pktp = NULL; 12950 struct sd_lun *un; 12951 size_t blockcount; 12952 daddr_t startblock; 12953 int rval; 12954 int cmd_flags; 12955 12956 ASSERT(bp != NULL); 12957 ASSERT(pktpp != NULL); 12958 xp = SD_GET_XBUF(bp); 12959 ASSERT(xp != NULL); 12960 un = SD_GET_UN(bp); 12961 ASSERT(un != NULL); 12962 ASSERT(mutex_owned(SD_MUTEX(un))); 12963 ASSERT(bp->b_resid == 0); 12964 12965 SD_TRACE(SD_LOG_IO_CORE, un, 12966 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12967 12968 mutex_exit(SD_MUTEX(un)); 12969 12970 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12971 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12972 /* 12973 * Already have a scsi_pkt -- just need DMA resources. 12974 * We must recompute the CDB in case the mapping returns 12975 * a nonzero pkt_resid. 12976 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12977 * that is being retried, the unmap/remap of the DMA resouces 12978 * will result in the entire transfer starting over again 12979 * from the very first block. 12980 */ 12981 ASSERT(xp->xb_pktp != NULL); 12982 pktp = xp->xb_pktp; 12983 } else { 12984 pktp = NULL; 12985 } 12986 #endif /* __i386 || __amd64 */ 12987 12988 startblock = xp->xb_blkno; /* Absolute block num. */ 12989 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12990 12991 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12992 12993 /* 12994 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12995 * call scsi_init_pkt, and build the CDB. 12996 */ 12997 rval = sd_setup_rw_pkt(un, &pktp, bp, 12998 cmd_flags, sdrunout, (caddr_t)un, 12999 startblock, blockcount); 13000 13001 if (rval == 0) { 13002 /* 13003 * Success. 13004 * 13005 * If partial DMA is being used and required for this transfer. 13006 * set it up here. 13007 */ 13008 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13009 (pktp->pkt_resid != 0)) { 13010 13011 /* 13012 * Save the CDB length and pkt_resid for the 13013 * next xfer 13014 */ 13015 xp->xb_dma_resid = pktp->pkt_resid; 13016 13017 /* rezero resid */ 13018 pktp->pkt_resid = 0; 13019 13020 } else { 13021 xp->xb_dma_resid = 0; 13022 } 13023 13024 pktp->pkt_flags = un->un_tagflags; 13025 pktp->pkt_time = un->un_cmd_timeout; 13026 pktp->pkt_comp = sdintr; 13027 13028 pktp->pkt_private = bp; 13029 *pktpp = pktp; 13030 13031 SD_TRACE(SD_LOG_IO_CORE, un, 13032 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13033 13034 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13035 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13036 #endif 13037 13038 mutex_enter(SD_MUTEX(un)); 13039 return (SD_PKT_ALLOC_SUCCESS); 13040 13041 } 13042 13043 /* 13044 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13045 * from sd_setup_rw_pkt. 13046 */ 13047 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13048 13049 if (rval == SD_PKT_ALLOC_FAILURE) { 13050 *pktpp = NULL; 13051 /* 13052 * Set the driver state to RWAIT to indicate the driver 13053 * is waiting on resource allocations. The driver will not 13054 * suspend, pm_suspend, or detatch while the state is RWAIT. 13055 */ 13056 mutex_enter(SD_MUTEX(un)); 13057 New_state(un, SD_STATE_RWAIT); 13058 13059 SD_ERROR(SD_LOG_IO_CORE, un, 13060 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13061 13062 if ((bp->b_flags & B_ERROR) != 0) { 13063 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13064 } 13065 return (SD_PKT_ALLOC_FAILURE); 13066 } else { 13067 /* 13068 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13069 * 13070 * This should never happen. Maybe someone messed with the 13071 * kernel's minphys? 13072 */ 13073 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13074 "Request rejected: too large for CDB: " 13075 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13076 SD_ERROR(SD_LOG_IO_CORE, un, 13077 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13078 mutex_enter(SD_MUTEX(un)); 13079 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13080 13081 } 13082 } 13083 13084 13085 /* 13086 * Function: sd_destroypkt_for_buf 13087 * 13088 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13089 * 13090 * Context: Kernel thread or interrupt context 13091 */ 13092 13093 static void 13094 sd_destroypkt_for_buf(struct buf *bp) 13095 { 13096 ASSERT(bp != NULL); 13097 ASSERT(SD_GET_UN(bp) != NULL); 13098 13099 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13100 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13101 13102 ASSERT(SD_GET_PKTP(bp) != NULL); 13103 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13104 13105 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13106 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13107 } 13108 13109 /* 13110 * Function: sd_setup_rw_pkt 13111 * 13112 * Description: Determines appropriate CDB group for the requested LBA 13113 * and transfer length, calls scsi_init_pkt, and builds 13114 * the CDB. Do not use for partial DMA transfers except 13115 * for the initial transfer since the CDB size must 13116 * remain constant. 13117 * 13118 * Context: Kernel thread and may be called from software interrupt 13119 * context as part of a sdrunout callback. This function may not 13120 * block or call routines that block 13121 */ 13122 13123 13124 int 13125 sd_setup_rw_pkt(struct sd_lun *un, 13126 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13127 int (*callback)(caddr_t), caddr_t callback_arg, 13128 diskaddr_t lba, uint32_t blockcount) 13129 { 13130 struct scsi_pkt *return_pktp; 13131 union scsi_cdb *cdbp; 13132 struct sd_cdbinfo *cp = NULL; 13133 int i; 13134 13135 /* 13136 * See which size CDB to use, based upon the request. 13137 */ 13138 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13139 13140 /* 13141 * Check lba and block count against sd_cdbtab limits. 13142 * In the partial DMA case, we have to use the same size 13143 * CDB for all the transfers. Check lba + blockcount 13144 * against the max LBA so we know that segment of the 13145 * transfer can use the CDB we select. 13146 */ 13147 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13148 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13149 13150 /* 13151 * The command will fit into the CDB type 13152 * specified by sd_cdbtab[i]. 13153 */ 13154 cp = sd_cdbtab + i; 13155 13156 /* 13157 * Call scsi_init_pkt so we can fill in the 13158 * CDB. 13159 */ 13160 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13161 bp, cp->sc_grpcode, un->un_status_len, 0, 13162 flags, callback, callback_arg); 13163 13164 if (return_pktp != NULL) { 13165 13166 /* 13167 * Return new value of pkt 13168 */ 13169 *pktpp = return_pktp; 13170 13171 /* 13172 * To be safe, zero the CDB insuring there is 13173 * no leftover data from a previous command. 13174 */ 13175 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13176 13177 /* 13178 * Handle partial DMA mapping 13179 */ 13180 if (return_pktp->pkt_resid != 0) { 13181 13182 /* 13183 * Not going to xfer as many blocks as 13184 * originally expected 13185 */ 13186 blockcount -= 13187 SD_BYTES2TGTBLOCKS(un, 13188 return_pktp->pkt_resid); 13189 } 13190 13191 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13192 13193 /* 13194 * Set command byte based on the CDB 13195 * type we matched. 13196 */ 13197 cdbp->scc_cmd = cp->sc_grpmask | 13198 ((bp->b_flags & B_READ) ? 13199 SCMD_READ : SCMD_WRITE); 13200 13201 SD_FILL_SCSI1_LUN(un, return_pktp); 13202 13203 /* 13204 * Fill in LBA and length 13205 */ 13206 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13207 (cp->sc_grpcode == CDB_GROUP4) || 13208 (cp->sc_grpcode == CDB_GROUP0) || 13209 (cp->sc_grpcode == CDB_GROUP5)); 13210 13211 if (cp->sc_grpcode == CDB_GROUP1) { 13212 FORMG1ADDR(cdbp, lba); 13213 FORMG1COUNT(cdbp, blockcount); 13214 return (0); 13215 } else if (cp->sc_grpcode == CDB_GROUP4) { 13216 FORMG4LONGADDR(cdbp, lba); 13217 FORMG4COUNT(cdbp, blockcount); 13218 return (0); 13219 } else if (cp->sc_grpcode == CDB_GROUP0) { 13220 FORMG0ADDR(cdbp, lba); 13221 FORMG0COUNT(cdbp, blockcount); 13222 return (0); 13223 } else if (cp->sc_grpcode == CDB_GROUP5) { 13224 FORMG5ADDR(cdbp, lba); 13225 FORMG5COUNT(cdbp, blockcount); 13226 return (0); 13227 } 13228 13229 /* 13230 * It should be impossible to not match one 13231 * of the CDB types above, so we should never 13232 * reach this point. Set the CDB command byte 13233 * to test-unit-ready to avoid writing 13234 * to somewhere we don't intend. 13235 */ 13236 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13237 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13238 } else { 13239 /* 13240 * Couldn't get scsi_pkt 13241 */ 13242 return (SD_PKT_ALLOC_FAILURE); 13243 } 13244 } 13245 } 13246 13247 /* 13248 * None of the available CDB types were suitable. This really 13249 * should never happen: on a 64 bit system we support 13250 * READ16/WRITE16 which will hold an entire 64 bit disk address 13251 * and on a 32 bit system we will refuse to bind to a device 13252 * larger than 2TB so addresses will never be larger than 32 bits. 13253 */ 13254 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13255 } 13256 13257 /* 13258 * Function: sd_setup_next_rw_pkt 13259 * 13260 * Description: Setup packet for partial DMA transfers, except for the 13261 * initial transfer. sd_setup_rw_pkt should be used for 13262 * the initial transfer. 13263 * 13264 * Context: Kernel thread and may be called from interrupt context. 13265 */ 13266 13267 int 13268 sd_setup_next_rw_pkt(struct sd_lun *un, 13269 struct scsi_pkt *pktp, struct buf *bp, 13270 diskaddr_t lba, uint32_t blockcount) 13271 { 13272 uchar_t com; 13273 union scsi_cdb *cdbp; 13274 uchar_t cdb_group_id; 13275 13276 ASSERT(pktp != NULL); 13277 ASSERT(pktp->pkt_cdbp != NULL); 13278 13279 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13280 com = cdbp->scc_cmd; 13281 cdb_group_id = CDB_GROUPID(com); 13282 13283 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13284 (cdb_group_id == CDB_GROUPID_1) || 13285 (cdb_group_id == CDB_GROUPID_4) || 13286 (cdb_group_id == CDB_GROUPID_5)); 13287 13288 /* 13289 * Move pkt to the next portion of the xfer. 13290 * func is NULL_FUNC so we do not have to release 13291 * the disk mutex here. 13292 */ 13293 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13294 NULL_FUNC, NULL) == pktp) { 13295 /* Success. Handle partial DMA */ 13296 if (pktp->pkt_resid != 0) { 13297 blockcount -= 13298 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13299 } 13300 13301 cdbp->scc_cmd = com; 13302 SD_FILL_SCSI1_LUN(un, pktp); 13303 if (cdb_group_id == CDB_GROUPID_1) { 13304 FORMG1ADDR(cdbp, lba); 13305 FORMG1COUNT(cdbp, blockcount); 13306 return (0); 13307 } else if (cdb_group_id == CDB_GROUPID_4) { 13308 FORMG4LONGADDR(cdbp, lba); 13309 FORMG4COUNT(cdbp, blockcount); 13310 return (0); 13311 } else if (cdb_group_id == CDB_GROUPID_0) { 13312 FORMG0ADDR(cdbp, lba); 13313 FORMG0COUNT(cdbp, blockcount); 13314 return (0); 13315 } else if (cdb_group_id == CDB_GROUPID_5) { 13316 FORMG5ADDR(cdbp, lba); 13317 FORMG5COUNT(cdbp, blockcount); 13318 return (0); 13319 } 13320 13321 /* Unreachable */ 13322 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13323 } 13324 13325 /* 13326 * Error setting up next portion of cmd transfer. 13327 * Something is definitely very wrong and this 13328 * should not happen. 13329 */ 13330 return (SD_PKT_ALLOC_FAILURE); 13331 } 13332 13333 /* 13334 * Function: sd_initpkt_for_uscsi 13335 * 13336 * Description: Allocate and initialize for transport a scsi_pkt struct, 13337 * based upon the info specified in the given uscsi_cmd struct. 13338 * 13339 * Return Code: SD_PKT_ALLOC_SUCCESS 13340 * SD_PKT_ALLOC_FAILURE 13341 * SD_PKT_ALLOC_FAILURE_NO_DMA 13342 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13343 * 13344 * Context: Kernel thread and may be called from software interrupt context 13345 * as part of a sdrunout callback. This function may not block or 13346 * call routines that block 13347 */ 13348 13349 static int 13350 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13351 { 13352 struct uscsi_cmd *uscmd; 13353 struct sd_xbuf *xp; 13354 struct scsi_pkt *pktp; 13355 struct sd_lun *un; 13356 uint32_t flags = 0; 13357 13358 ASSERT(bp != NULL); 13359 ASSERT(pktpp != NULL); 13360 xp = SD_GET_XBUF(bp); 13361 ASSERT(xp != NULL); 13362 un = SD_GET_UN(bp); 13363 ASSERT(un != NULL); 13364 ASSERT(mutex_owned(SD_MUTEX(un))); 13365 13366 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13367 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13368 ASSERT(uscmd != NULL); 13369 13370 SD_TRACE(SD_LOG_IO_CORE, un, 13371 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13372 13373 /* 13374 * Allocate the scsi_pkt for the command. 13375 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13376 * during scsi_init_pkt time and will continue to use the 13377 * same path as long as the same scsi_pkt is used without 13378 * intervening scsi_dma_free(). Since uscsi command does 13379 * not call scsi_dmafree() before retry failed command, it 13380 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13381 * set such that scsi_vhci can use other available path for 13382 * retry. Besides, ucsci command does not allow DMA breakup, 13383 * so there is no need to set PKT_DMA_PARTIAL flag. 13384 */ 13385 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13386 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13387 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13388 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13389 - sizeof (struct scsi_extended_sense)), 0, 13390 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13391 sdrunout, (caddr_t)un); 13392 } else { 13393 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13394 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13395 sizeof (struct scsi_arq_status), 0, 13396 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13397 sdrunout, (caddr_t)un); 13398 } 13399 13400 if (pktp == NULL) { 13401 *pktpp = NULL; 13402 /* 13403 * Set the driver state to RWAIT to indicate the driver 13404 * is waiting on resource allocations. The driver will not 13405 * suspend, pm_suspend, or detatch while the state is RWAIT. 13406 */ 13407 New_state(un, SD_STATE_RWAIT); 13408 13409 SD_ERROR(SD_LOG_IO_CORE, un, 13410 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13411 13412 if ((bp->b_flags & B_ERROR) != 0) { 13413 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13414 } 13415 return (SD_PKT_ALLOC_FAILURE); 13416 } 13417 13418 /* 13419 * We do not do DMA breakup for USCSI commands, so return failure 13420 * here if all the needed DMA resources were not allocated. 13421 */ 13422 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13423 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13424 scsi_destroy_pkt(pktp); 13425 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13426 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13427 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13428 } 13429 13430 /* Init the cdb from the given uscsi struct */ 13431 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13432 uscmd->uscsi_cdb[0], 0, 0, 0); 13433 13434 SD_FILL_SCSI1_LUN(un, pktp); 13435 13436 /* 13437 * Set up the optional USCSI flags. See the uscsi (7I) man page 13438 * for listing of the supported flags. 13439 */ 13440 13441 if (uscmd->uscsi_flags & USCSI_SILENT) { 13442 flags |= FLAG_SILENT; 13443 } 13444 13445 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13446 flags |= FLAG_DIAGNOSE; 13447 } 13448 13449 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13450 flags |= FLAG_ISOLATE; 13451 } 13452 13453 if (un->un_f_is_fibre == FALSE) { 13454 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13455 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13456 } 13457 } 13458 13459 /* 13460 * Set the pkt flags here so we save time later. 13461 * Note: These flags are NOT in the uscsi man page!!! 13462 */ 13463 if (uscmd->uscsi_flags & USCSI_HEAD) { 13464 flags |= FLAG_HEAD; 13465 } 13466 13467 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13468 flags |= FLAG_NOINTR; 13469 } 13470 13471 /* 13472 * For tagged queueing, things get a bit complicated. 13473 * Check first for head of queue and last for ordered queue. 13474 * If neither head nor order, use the default driver tag flags. 13475 */ 13476 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13477 if (uscmd->uscsi_flags & USCSI_HTAG) { 13478 flags |= FLAG_HTAG; 13479 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13480 flags |= FLAG_OTAG; 13481 } else { 13482 flags |= un->un_tagflags & FLAG_TAGMASK; 13483 } 13484 } 13485 13486 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13487 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13488 } 13489 13490 pktp->pkt_flags = flags; 13491 13492 /* Transfer uscsi information to scsi_pkt */ 13493 (void) scsi_uscsi_pktinit(uscmd, pktp); 13494 13495 /* Copy the caller's CDB into the pkt... */ 13496 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13497 13498 if (uscmd->uscsi_timeout == 0) { 13499 pktp->pkt_time = un->un_uscsi_timeout; 13500 } else { 13501 pktp->pkt_time = uscmd->uscsi_timeout; 13502 } 13503 13504 /* need it later to identify USCSI request in sdintr */ 13505 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13506 13507 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13508 13509 pktp->pkt_private = bp; 13510 pktp->pkt_comp = sdintr; 13511 *pktpp = pktp; 13512 13513 SD_TRACE(SD_LOG_IO_CORE, un, 13514 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13515 13516 return (SD_PKT_ALLOC_SUCCESS); 13517 } 13518 13519 13520 /* 13521 * Function: sd_destroypkt_for_uscsi 13522 * 13523 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13524 * IOs.. Also saves relevant info into the associated uscsi_cmd 13525 * struct. 13526 * 13527 * Context: May be called under interrupt context 13528 */ 13529 13530 static void 13531 sd_destroypkt_for_uscsi(struct buf *bp) 13532 { 13533 struct uscsi_cmd *uscmd; 13534 struct sd_xbuf *xp; 13535 struct scsi_pkt *pktp; 13536 struct sd_lun *un; 13537 struct sd_uscsi_info *suip; 13538 13539 ASSERT(bp != NULL); 13540 xp = SD_GET_XBUF(bp); 13541 ASSERT(xp != NULL); 13542 un = SD_GET_UN(bp); 13543 ASSERT(un != NULL); 13544 ASSERT(!mutex_owned(SD_MUTEX(un))); 13545 pktp = SD_GET_PKTP(bp); 13546 ASSERT(pktp != NULL); 13547 13548 SD_TRACE(SD_LOG_IO_CORE, un, 13549 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13550 13551 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13552 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13553 ASSERT(uscmd != NULL); 13554 13555 /* Save the status and the residual into the uscsi_cmd struct */ 13556 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13557 uscmd->uscsi_resid = bp->b_resid; 13558 13559 /* Transfer scsi_pkt information to uscsi */ 13560 (void) scsi_uscsi_pktfini(pktp, uscmd); 13561 13562 /* 13563 * If enabled, copy any saved sense data into the area specified 13564 * by the uscsi command. 13565 */ 13566 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13567 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13568 /* 13569 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13570 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13571 */ 13572 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13573 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13574 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13575 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13576 MAX_SENSE_LENGTH); 13577 } else { 13578 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13579 SENSE_LENGTH); 13580 } 13581 } 13582 /* 13583 * The following assignments are for SCSI FMA. 13584 */ 13585 ASSERT(xp->xb_private != NULL); 13586 suip = (struct sd_uscsi_info *)xp->xb_private; 13587 suip->ui_pkt_reason = pktp->pkt_reason; 13588 suip->ui_pkt_state = pktp->pkt_state; 13589 suip->ui_pkt_statistics = pktp->pkt_statistics; 13590 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13591 13592 /* We are done with the scsi_pkt; free it now */ 13593 ASSERT(SD_GET_PKTP(bp) != NULL); 13594 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13595 13596 SD_TRACE(SD_LOG_IO_CORE, un, 13597 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13598 } 13599 13600 13601 /* 13602 * Function: sd_bioclone_alloc 13603 * 13604 * Description: Allocate a buf(9S) and init it as per the given buf 13605 * and the various arguments. The associated sd_xbuf 13606 * struct is (nearly) duplicated. The struct buf *bp 13607 * argument is saved in new_xp->xb_private. 13608 * 13609 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13610 * datalen - size of data area for the shadow bp 13611 * blkno - starting LBA 13612 * func - function pointer for b_iodone in the shadow buf. (May 13613 * be NULL if none.) 13614 * 13615 * Return Code: Pointer to allocates buf(9S) struct 13616 * 13617 * Context: Can sleep. 13618 */ 13619 13620 static struct buf * 13621 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13622 daddr_t blkno, int (*func)(struct buf *)) 13623 { 13624 struct sd_lun *un; 13625 struct sd_xbuf *xp; 13626 struct sd_xbuf *new_xp; 13627 struct buf *new_bp; 13628 13629 ASSERT(bp != NULL); 13630 xp = SD_GET_XBUF(bp); 13631 ASSERT(xp != NULL); 13632 un = SD_GET_UN(bp); 13633 ASSERT(un != NULL); 13634 ASSERT(!mutex_owned(SD_MUTEX(un))); 13635 13636 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13637 NULL, KM_SLEEP); 13638 13639 new_bp->b_lblkno = blkno; 13640 13641 /* 13642 * Allocate an xbuf for the shadow bp and copy the contents of the 13643 * original xbuf into it. 13644 */ 13645 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13646 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13647 13648 /* 13649 * The given bp is automatically saved in the xb_private member 13650 * of the new xbuf. Callers are allowed to depend on this. 13651 */ 13652 new_xp->xb_private = bp; 13653 13654 new_bp->b_private = new_xp; 13655 13656 return (new_bp); 13657 } 13658 13659 /* 13660 * Function: sd_shadow_buf_alloc 13661 * 13662 * Description: Allocate a buf(9S) and init it as per the given buf 13663 * and the various arguments. The associated sd_xbuf 13664 * struct is (nearly) duplicated. The struct buf *bp 13665 * argument is saved in new_xp->xb_private. 13666 * 13667 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13668 * datalen - size of data area for the shadow bp 13669 * bflags - B_READ or B_WRITE (pseudo flag) 13670 * blkno - starting LBA 13671 * func - function pointer for b_iodone in the shadow buf. (May 13672 * be NULL if none.) 13673 * 13674 * Return Code: Pointer to allocates buf(9S) struct 13675 * 13676 * Context: Can sleep. 13677 */ 13678 13679 static struct buf * 13680 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13681 daddr_t blkno, int (*func)(struct buf *)) 13682 { 13683 struct sd_lun *un; 13684 struct sd_xbuf *xp; 13685 struct sd_xbuf *new_xp; 13686 struct buf *new_bp; 13687 13688 ASSERT(bp != NULL); 13689 xp = SD_GET_XBUF(bp); 13690 ASSERT(xp != NULL); 13691 un = SD_GET_UN(bp); 13692 ASSERT(un != NULL); 13693 ASSERT(!mutex_owned(SD_MUTEX(un))); 13694 13695 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13696 bp_mapin(bp); 13697 } 13698 13699 bflags &= (B_READ | B_WRITE); 13700 #if defined(__i386) || defined(__amd64) 13701 new_bp = getrbuf(KM_SLEEP); 13702 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13703 new_bp->b_bcount = datalen; 13704 new_bp->b_flags = bflags | 13705 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13706 #else 13707 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13708 datalen, bflags, SLEEP_FUNC, NULL); 13709 #endif 13710 new_bp->av_forw = NULL; 13711 new_bp->av_back = NULL; 13712 new_bp->b_dev = bp->b_dev; 13713 new_bp->b_blkno = blkno; 13714 new_bp->b_iodone = func; 13715 new_bp->b_edev = bp->b_edev; 13716 new_bp->b_resid = 0; 13717 13718 /* We need to preserve the B_FAILFAST flag */ 13719 if (bp->b_flags & B_FAILFAST) { 13720 new_bp->b_flags |= B_FAILFAST; 13721 } 13722 13723 /* 13724 * Allocate an xbuf for the shadow bp and copy the contents of the 13725 * original xbuf into it. 13726 */ 13727 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13728 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13729 13730 /* Need later to copy data between the shadow buf & original buf! */ 13731 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13732 13733 /* 13734 * The given bp is automatically saved in the xb_private member 13735 * of the new xbuf. Callers are allowed to depend on this. 13736 */ 13737 new_xp->xb_private = bp; 13738 13739 new_bp->b_private = new_xp; 13740 13741 return (new_bp); 13742 } 13743 13744 /* 13745 * Function: sd_bioclone_free 13746 * 13747 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13748 * in the larger than partition operation. 13749 * 13750 * Context: May be called under interrupt context 13751 */ 13752 13753 static void 13754 sd_bioclone_free(struct buf *bp) 13755 { 13756 struct sd_xbuf *xp; 13757 13758 ASSERT(bp != NULL); 13759 xp = SD_GET_XBUF(bp); 13760 ASSERT(xp != NULL); 13761 13762 /* 13763 * Call bp_mapout() before freeing the buf, in case a lower 13764 * layer or HBA had done a bp_mapin(). we must do this here 13765 * as we are the "originator" of the shadow buf. 13766 */ 13767 bp_mapout(bp); 13768 13769 /* 13770 * Null out b_iodone before freeing the bp, to ensure that the driver 13771 * never gets confused by a stale value in this field. (Just a little 13772 * extra defensiveness here.) 13773 */ 13774 bp->b_iodone = NULL; 13775 13776 freerbuf(bp); 13777 13778 kmem_free(xp, sizeof (struct sd_xbuf)); 13779 } 13780 13781 /* 13782 * Function: sd_shadow_buf_free 13783 * 13784 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13785 * 13786 * Context: May be called under interrupt context 13787 */ 13788 13789 static void 13790 sd_shadow_buf_free(struct buf *bp) 13791 { 13792 struct sd_xbuf *xp; 13793 13794 ASSERT(bp != NULL); 13795 xp = SD_GET_XBUF(bp); 13796 ASSERT(xp != NULL); 13797 13798 #if defined(__sparc) 13799 /* 13800 * Call bp_mapout() before freeing the buf, in case a lower 13801 * layer or HBA had done a bp_mapin(). we must do this here 13802 * as we are the "originator" of the shadow buf. 13803 */ 13804 bp_mapout(bp); 13805 #endif 13806 13807 /* 13808 * Null out b_iodone before freeing the bp, to ensure that the driver 13809 * never gets confused by a stale value in this field. (Just a little 13810 * extra defensiveness here.) 13811 */ 13812 bp->b_iodone = NULL; 13813 13814 #if defined(__i386) || defined(__amd64) 13815 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13816 freerbuf(bp); 13817 #else 13818 scsi_free_consistent_buf(bp); 13819 #endif 13820 13821 kmem_free(xp, sizeof (struct sd_xbuf)); 13822 } 13823 13824 13825 /* 13826 * Function: sd_print_transport_rejected_message 13827 * 13828 * Description: This implements the ludicrously complex rules for printing 13829 * a "transport rejected" message. This is to address the 13830 * specific problem of having a flood of this error message 13831 * produced when a failover occurs. 13832 * 13833 * Context: Any. 13834 */ 13835 13836 static void 13837 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13838 int code) 13839 { 13840 ASSERT(un != NULL); 13841 ASSERT(mutex_owned(SD_MUTEX(un))); 13842 ASSERT(xp != NULL); 13843 13844 /* 13845 * Print the "transport rejected" message under the following 13846 * conditions: 13847 * 13848 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13849 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13850 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13851 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13852 * scsi_transport(9F) (which indicates that the target might have 13853 * gone off-line). This uses the un->un_tran_fatal_count 13854 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13855 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13856 * from scsi_transport(). 13857 * 13858 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13859 * the preceeding cases in order for the message to be printed. 13860 */ 13861 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13862 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13863 (code != TRAN_FATAL_ERROR) || 13864 (un->un_tran_fatal_count == 1)) { 13865 switch (code) { 13866 case TRAN_BADPKT: 13867 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13868 "transport rejected bad packet\n"); 13869 break; 13870 case TRAN_FATAL_ERROR: 13871 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13872 "transport rejected fatal error\n"); 13873 break; 13874 default: 13875 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13876 "transport rejected (%d)\n", code); 13877 break; 13878 } 13879 } 13880 } 13881 } 13882 13883 13884 /* 13885 * Function: sd_add_buf_to_waitq 13886 * 13887 * Description: Add the given buf(9S) struct to the wait queue for the 13888 * instance. If sorting is enabled, then the buf is added 13889 * to the queue via an elevator sort algorithm (a la 13890 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13891 * If sorting is not enabled, then the buf is just added 13892 * to the end of the wait queue. 13893 * 13894 * Return Code: void 13895 * 13896 * Context: Does not sleep/block, therefore technically can be called 13897 * from any context. However if sorting is enabled then the 13898 * execution time is indeterminate, and may take long if 13899 * the wait queue grows large. 13900 */ 13901 13902 static void 13903 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13904 { 13905 struct buf *ap; 13906 13907 ASSERT(bp != NULL); 13908 ASSERT(un != NULL); 13909 ASSERT(mutex_owned(SD_MUTEX(un))); 13910 13911 /* If the queue is empty, add the buf as the only entry & return. */ 13912 if (un->un_waitq_headp == NULL) { 13913 ASSERT(un->un_waitq_tailp == NULL); 13914 un->un_waitq_headp = un->un_waitq_tailp = bp; 13915 bp->av_forw = NULL; 13916 return; 13917 } 13918 13919 ASSERT(un->un_waitq_tailp != NULL); 13920 13921 /* 13922 * If sorting is disabled, just add the buf to the tail end of 13923 * the wait queue and return. 13924 */ 13925 if (un->un_f_disksort_disabled) { 13926 un->un_waitq_tailp->av_forw = bp; 13927 un->un_waitq_tailp = bp; 13928 bp->av_forw = NULL; 13929 return; 13930 } 13931 13932 /* 13933 * Sort thru the list of requests currently on the wait queue 13934 * and add the new buf request at the appropriate position. 13935 * 13936 * The un->un_waitq_headp is an activity chain pointer on which 13937 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13938 * first queue holds those requests which are positioned after 13939 * the current SD_GET_BLKNO() (in the first request); the second holds 13940 * requests which came in after their SD_GET_BLKNO() number was passed. 13941 * Thus we implement a one way scan, retracting after reaching 13942 * the end of the drive to the first request on the second 13943 * queue, at which time it becomes the first queue. 13944 * A one-way scan is natural because of the way UNIX read-ahead 13945 * blocks are allocated. 13946 * 13947 * If we lie after the first request, then we must locate the 13948 * second request list and add ourselves to it. 13949 */ 13950 ap = un->un_waitq_headp; 13951 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13952 while (ap->av_forw != NULL) { 13953 /* 13954 * Look for an "inversion" in the (normally 13955 * ascending) block numbers. This indicates 13956 * the start of the second request list. 13957 */ 13958 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13959 /* 13960 * Search the second request list for the 13961 * first request at a larger block number. 13962 * We go before that; however if there is 13963 * no such request, we go at the end. 13964 */ 13965 do { 13966 if (SD_GET_BLKNO(bp) < 13967 SD_GET_BLKNO(ap->av_forw)) { 13968 goto insert; 13969 } 13970 ap = ap->av_forw; 13971 } while (ap->av_forw != NULL); 13972 goto insert; /* after last */ 13973 } 13974 ap = ap->av_forw; 13975 } 13976 13977 /* 13978 * No inversions... we will go after the last, and 13979 * be the first request in the second request list. 13980 */ 13981 goto insert; 13982 } 13983 13984 /* 13985 * Request is at/after the current request... 13986 * sort in the first request list. 13987 */ 13988 while (ap->av_forw != NULL) { 13989 /* 13990 * We want to go after the current request (1) if 13991 * there is an inversion after it (i.e. it is the end 13992 * of the first request list), or (2) if the next 13993 * request is a larger block no. than our request. 13994 */ 13995 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13996 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13997 goto insert; 13998 } 13999 ap = ap->av_forw; 14000 } 14001 14002 /* 14003 * Neither a second list nor a larger request, therefore 14004 * we go at the end of the first list (which is the same 14005 * as the end of the whole schebang). 14006 */ 14007 insert: 14008 bp->av_forw = ap->av_forw; 14009 ap->av_forw = bp; 14010 14011 /* 14012 * If we inserted onto the tail end of the waitq, make sure the 14013 * tail pointer is updated. 14014 */ 14015 if (ap == un->un_waitq_tailp) { 14016 un->un_waitq_tailp = bp; 14017 } 14018 } 14019 14020 14021 /* 14022 * Function: sd_start_cmds 14023 * 14024 * Description: Remove and transport cmds from the driver queues. 14025 * 14026 * Arguments: un - pointer to the unit (soft state) struct for the target. 14027 * 14028 * immed_bp - ptr to a buf to be transported immediately. Only 14029 * the immed_bp is transported; bufs on the waitq are not 14030 * processed and the un_retry_bp is not checked. If immed_bp is 14031 * NULL, then normal queue processing is performed. 14032 * 14033 * Context: May be called from kernel thread context, interrupt context, 14034 * or runout callback context. This function may not block or 14035 * call routines that block. 14036 */ 14037 14038 static void 14039 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14040 { 14041 struct sd_xbuf *xp; 14042 struct buf *bp; 14043 void (*statp)(kstat_io_t *); 14044 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14045 void (*saved_statp)(kstat_io_t *); 14046 #endif 14047 int rval; 14048 struct sd_fm_internal *sfip = NULL; 14049 14050 ASSERT(un != NULL); 14051 ASSERT(mutex_owned(SD_MUTEX(un))); 14052 ASSERT(un->un_ncmds_in_transport >= 0); 14053 ASSERT(un->un_throttle >= 0); 14054 14055 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14056 14057 do { 14058 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14059 saved_statp = NULL; 14060 #endif 14061 14062 /* 14063 * If we are syncing or dumping, fail the command to 14064 * avoid recursively calling back into scsi_transport(). 14065 * The dump I/O itself uses a separate code path so this 14066 * only prevents non-dump I/O from being sent while dumping. 14067 * File system sync takes place before dumping begins. 14068 * During panic, filesystem I/O is allowed provided 14069 * un_in_callback is <= 1. This is to prevent recursion 14070 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14071 * sd_start_cmds and so on. See panic.c for more information 14072 * about the states the system can be in during panic. 14073 */ 14074 if ((un->un_state == SD_STATE_DUMPING) || 14075 (ddi_in_panic() && (un->un_in_callback > 1))) { 14076 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14077 "sd_start_cmds: panicking\n"); 14078 goto exit; 14079 } 14080 14081 if ((bp = immed_bp) != NULL) { 14082 /* 14083 * We have a bp that must be transported immediately. 14084 * It's OK to transport the immed_bp here without doing 14085 * the throttle limit check because the immed_bp is 14086 * always used in a retry/recovery case. This means 14087 * that we know we are not at the throttle limit by 14088 * virtue of the fact that to get here we must have 14089 * already gotten a command back via sdintr(). This also 14090 * relies on (1) the command on un_retry_bp preventing 14091 * further commands from the waitq from being issued; 14092 * and (2) the code in sd_retry_command checking the 14093 * throttle limit before issuing a delayed or immediate 14094 * retry. This holds even if the throttle limit is 14095 * currently ratcheted down from its maximum value. 14096 */ 14097 statp = kstat_runq_enter; 14098 if (bp == un->un_retry_bp) { 14099 ASSERT((un->un_retry_statp == NULL) || 14100 (un->un_retry_statp == kstat_waitq_enter) || 14101 (un->un_retry_statp == 14102 kstat_runq_back_to_waitq)); 14103 /* 14104 * If the waitq kstat was incremented when 14105 * sd_set_retry_bp() queued this bp for a retry, 14106 * then we must set up statp so that the waitq 14107 * count will get decremented correctly below. 14108 * Also we must clear un->un_retry_statp to 14109 * ensure that we do not act on a stale value 14110 * in this field. 14111 */ 14112 if ((un->un_retry_statp == kstat_waitq_enter) || 14113 (un->un_retry_statp == 14114 kstat_runq_back_to_waitq)) { 14115 statp = kstat_waitq_to_runq; 14116 } 14117 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14118 saved_statp = un->un_retry_statp; 14119 #endif 14120 un->un_retry_statp = NULL; 14121 14122 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14123 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14124 "un_throttle:%d un_ncmds_in_transport:%d\n", 14125 un, un->un_retry_bp, un->un_throttle, 14126 un->un_ncmds_in_transport); 14127 } else { 14128 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14129 "processing priority bp:0x%p\n", bp); 14130 } 14131 14132 } else if ((bp = un->un_waitq_headp) != NULL) { 14133 /* 14134 * A command on the waitq is ready to go, but do not 14135 * send it if: 14136 * 14137 * (1) the throttle limit has been reached, or 14138 * (2) a retry is pending, or 14139 * (3) a START_STOP_UNIT callback pending, or 14140 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14141 * command is pending. 14142 * 14143 * For all of these conditions, IO processing will 14144 * restart after the condition is cleared. 14145 */ 14146 if (un->un_ncmds_in_transport >= un->un_throttle) { 14147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14148 "sd_start_cmds: exiting, " 14149 "throttle limit reached!\n"); 14150 goto exit; 14151 } 14152 if (un->un_retry_bp != NULL) { 14153 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14154 "sd_start_cmds: exiting, retry pending!\n"); 14155 goto exit; 14156 } 14157 if (un->un_startstop_timeid != NULL) { 14158 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14159 "sd_start_cmds: exiting, " 14160 "START_STOP pending!\n"); 14161 goto exit; 14162 } 14163 if (un->un_direct_priority_timeid != NULL) { 14164 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14165 "sd_start_cmds: exiting, " 14166 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14167 goto exit; 14168 } 14169 14170 /* Dequeue the command */ 14171 un->un_waitq_headp = bp->av_forw; 14172 if (un->un_waitq_headp == NULL) { 14173 un->un_waitq_tailp = NULL; 14174 } 14175 bp->av_forw = NULL; 14176 statp = kstat_waitq_to_runq; 14177 SD_TRACE(SD_LOG_IO_CORE, un, 14178 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14179 14180 } else { 14181 /* No work to do so bail out now */ 14182 SD_TRACE(SD_LOG_IO_CORE, un, 14183 "sd_start_cmds: no more work, exiting!\n"); 14184 goto exit; 14185 } 14186 14187 /* 14188 * Reset the state to normal. This is the mechanism by which 14189 * the state transitions from either SD_STATE_RWAIT or 14190 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14191 * If state is SD_STATE_PM_CHANGING then this command is 14192 * part of the device power control and the state must 14193 * not be put back to normal. Doing so would would 14194 * allow new commands to proceed when they shouldn't, 14195 * the device may be going off. 14196 */ 14197 if ((un->un_state != SD_STATE_SUSPENDED) && 14198 (un->un_state != SD_STATE_PM_CHANGING)) { 14199 New_state(un, SD_STATE_NORMAL); 14200 } 14201 14202 xp = SD_GET_XBUF(bp); 14203 ASSERT(xp != NULL); 14204 14205 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14206 /* 14207 * Allocate the scsi_pkt if we need one, or attach DMA 14208 * resources if we have a scsi_pkt that needs them. The 14209 * latter should only occur for commands that are being 14210 * retried. 14211 */ 14212 if ((xp->xb_pktp == NULL) || 14213 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14214 #else 14215 if (xp->xb_pktp == NULL) { 14216 #endif 14217 /* 14218 * There is no scsi_pkt allocated for this buf. Call 14219 * the initpkt function to allocate & init one. 14220 * 14221 * The scsi_init_pkt runout callback functionality is 14222 * implemented as follows: 14223 * 14224 * 1) The initpkt function always calls 14225 * scsi_init_pkt(9F) with sdrunout specified as the 14226 * callback routine. 14227 * 2) A successful packet allocation is initialized and 14228 * the I/O is transported. 14229 * 3) The I/O associated with an allocation resource 14230 * failure is left on its queue to be retried via 14231 * runout or the next I/O. 14232 * 4) The I/O associated with a DMA error is removed 14233 * from the queue and failed with EIO. Processing of 14234 * the transport queues is also halted to be 14235 * restarted via runout or the next I/O. 14236 * 5) The I/O associated with a CDB size or packet 14237 * size error is removed from the queue and failed 14238 * with EIO. Processing of the transport queues is 14239 * continued. 14240 * 14241 * Note: there is no interface for canceling a runout 14242 * callback. To prevent the driver from detaching or 14243 * suspending while a runout is pending the driver 14244 * state is set to SD_STATE_RWAIT 14245 * 14246 * Note: using the scsi_init_pkt callback facility can 14247 * result in an I/O request persisting at the head of 14248 * the list which cannot be satisfied even after 14249 * multiple retries. In the future the driver may 14250 * implement some kind of maximum runout count before 14251 * failing an I/O. 14252 * 14253 * Note: the use of funcp below may seem superfluous, 14254 * but it helps warlock figure out the correct 14255 * initpkt function calls (see [s]sd.wlcmd). 14256 */ 14257 struct scsi_pkt *pktp; 14258 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14259 14260 ASSERT(bp != un->un_rqs_bp); 14261 14262 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14263 switch ((*funcp)(bp, &pktp)) { 14264 case SD_PKT_ALLOC_SUCCESS: 14265 xp->xb_pktp = pktp; 14266 SD_TRACE(SD_LOG_IO_CORE, un, 14267 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14268 pktp); 14269 goto got_pkt; 14270 14271 case SD_PKT_ALLOC_FAILURE: 14272 /* 14273 * Temporary (hopefully) resource depletion. 14274 * Since retries and RQS commands always have a 14275 * scsi_pkt allocated, these cases should never 14276 * get here. So the only cases this needs to 14277 * handle is a bp from the waitq (which we put 14278 * back onto the waitq for sdrunout), or a bp 14279 * sent as an immed_bp (which we just fail). 14280 */ 14281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14282 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14283 14284 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14285 14286 if (bp == immed_bp) { 14287 /* 14288 * If SD_XB_DMA_FREED is clear, then 14289 * this is a failure to allocate a 14290 * scsi_pkt, and we must fail the 14291 * command. 14292 */ 14293 if ((xp->xb_pkt_flags & 14294 SD_XB_DMA_FREED) == 0) { 14295 break; 14296 } 14297 14298 /* 14299 * If this immediate command is NOT our 14300 * un_retry_bp, then we must fail it. 14301 */ 14302 if (bp != un->un_retry_bp) { 14303 break; 14304 } 14305 14306 /* 14307 * We get here if this cmd is our 14308 * un_retry_bp that was DMAFREED, but 14309 * scsi_init_pkt() failed to reallocate 14310 * DMA resources when we attempted to 14311 * retry it. This can happen when an 14312 * mpxio failover is in progress, but 14313 * we don't want to just fail the 14314 * command in this case. 14315 * 14316 * Use timeout(9F) to restart it after 14317 * a 100ms delay. We don't want to 14318 * let sdrunout() restart it, because 14319 * sdrunout() is just supposed to start 14320 * commands that are sitting on the 14321 * wait queue. The un_retry_bp stays 14322 * set until the command completes, but 14323 * sdrunout can be called many times 14324 * before that happens. Since sdrunout 14325 * cannot tell if the un_retry_bp is 14326 * already in the transport, it could 14327 * end up calling scsi_transport() for 14328 * the un_retry_bp multiple times. 14329 * 14330 * Also: don't schedule the callback 14331 * if some other callback is already 14332 * pending. 14333 */ 14334 if (un->un_retry_statp == NULL) { 14335 /* 14336 * restore the kstat pointer to 14337 * keep kstat counts coherent 14338 * when we do retry the command. 14339 */ 14340 un->un_retry_statp = 14341 saved_statp; 14342 } 14343 14344 if ((un->un_startstop_timeid == NULL) && 14345 (un->un_retry_timeid == NULL) && 14346 (un->un_direct_priority_timeid == 14347 NULL)) { 14348 14349 un->un_retry_timeid = 14350 timeout( 14351 sd_start_retry_command, 14352 un, SD_RESTART_TIMEOUT); 14353 } 14354 goto exit; 14355 } 14356 14357 #else 14358 if (bp == immed_bp) { 14359 break; /* Just fail the command */ 14360 } 14361 #endif 14362 14363 /* Add the buf back to the head of the waitq */ 14364 bp->av_forw = un->un_waitq_headp; 14365 un->un_waitq_headp = bp; 14366 if (un->un_waitq_tailp == NULL) { 14367 un->un_waitq_tailp = bp; 14368 } 14369 goto exit; 14370 14371 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14372 /* 14373 * HBA DMA resource failure. Fail the command 14374 * and continue processing of the queues. 14375 */ 14376 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14377 "sd_start_cmds: " 14378 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14379 break; 14380 14381 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14382 /* 14383 * Note:x86: Partial DMA mapping not supported 14384 * for USCSI commands, and all the needed DMA 14385 * resources were not allocated. 14386 */ 14387 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14388 "sd_start_cmds: " 14389 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14390 break; 14391 14392 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14393 /* 14394 * Note:x86: Request cannot fit into CDB based 14395 * on lba and len. 14396 */ 14397 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14398 "sd_start_cmds: " 14399 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14400 break; 14401 14402 default: 14403 /* Should NEVER get here! */ 14404 panic("scsi_initpkt error"); 14405 /*NOTREACHED*/ 14406 } 14407 14408 /* 14409 * Fatal error in allocating a scsi_pkt for this buf. 14410 * Update kstats & return the buf with an error code. 14411 * We must use sd_return_failed_command_no_restart() to 14412 * avoid a recursive call back into sd_start_cmds(). 14413 * However this also means that we must keep processing 14414 * the waitq here in order to avoid stalling. 14415 */ 14416 if (statp == kstat_waitq_to_runq) { 14417 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14418 } 14419 sd_return_failed_command_no_restart(un, bp, EIO); 14420 if (bp == immed_bp) { 14421 /* immed_bp is gone by now, so clear this */ 14422 immed_bp = NULL; 14423 } 14424 continue; 14425 } 14426 got_pkt: 14427 if (bp == immed_bp) { 14428 /* goto the head of the class.... */ 14429 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14430 } 14431 14432 un->un_ncmds_in_transport++; 14433 SD_UPDATE_KSTATS(un, statp, bp); 14434 14435 /* 14436 * Call scsi_transport() to send the command to the target. 14437 * According to SCSA architecture, we must drop the mutex here 14438 * before calling scsi_transport() in order to avoid deadlock. 14439 * Note that the scsi_pkt's completion routine can be executed 14440 * (from interrupt context) even before the call to 14441 * scsi_transport() returns. 14442 */ 14443 SD_TRACE(SD_LOG_IO_CORE, un, 14444 "sd_start_cmds: calling scsi_transport()\n"); 14445 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14446 14447 mutex_exit(SD_MUTEX(un)); 14448 rval = scsi_transport(xp->xb_pktp); 14449 mutex_enter(SD_MUTEX(un)); 14450 14451 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14452 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14453 14454 switch (rval) { 14455 case TRAN_ACCEPT: 14456 /* Clear this with every pkt accepted by the HBA */ 14457 un->un_tran_fatal_count = 0; 14458 break; /* Success; try the next cmd (if any) */ 14459 14460 case TRAN_BUSY: 14461 un->un_ncmds_in_transport--; 14462 ASSERT(un->un_ncmds_in_transport >= 0); 14463 14464 /* 14465 * Don't retry request sense, the sense data 14466 * is lost when another request is sent. 14467 * Free up the rqs buf and retry 14468 * the original failed cmd. Update kstat. 14469 */ 14470 if (bp == un->un_rqs_bp) { 14471 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14472 bp = sd_mark_rqs_idle(un, xp); 14473 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14474 NULL, NULL, EIO, un->un_busy_timeout / 500, 14475 kstat_waitq_enter); 14476 goto exit; 14477 } 14478 14479 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14480 /* 14481 * Free the DMA resources for the scsi_pkt. This will 14482 * allow mpxio to select another path the next time 14483 * we call scsi_transport() with this scsi_pkt. 14484 * See sdintr() for the rationalization behind this. 14485 */ 14486 if ((un->un_f_is_fibre == TRUE) && 14487 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14488 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14489 scsi_dmafree(xp->xb_pktp); 14490 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14491 } 14492 #endif 14493 14494 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14495 /* 14496 * Commands that are SD_PATH_DIRECT_PRIORITY 14497 * are for error recovery situations. These do 14498 * not use the normal command waitq, so if they 14499 * get a TRAN_BUSY we cannot put them back onto 14500 * the waitq for later retry. One possible 14501 * problem is that there could already be some 14502 * other command on un_retry_bp that is waiting 14503 * for this one to complete, so we would be 14504 * deadlocked if we put this command back onto 14505 * the waitq for later retry (since un_retry_bp 14506 * must complete before the driver gets back to 14507 * commands on the waitq). 14508 * 14509 * To avoid deadlock we must schedule a callback 14510 * that will restart this command after a set 14511 * interval. This should keep retrying for as 14512 * long as the underlying transport keeps 14513 * returning TRAN_BUSY (just like for other 14514 * commands). Use the same timeout interval as 14515 * for the ordinary TRAN_BUSY retry. 14516 */ 14517 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14518 "sd_start_cmds: scsi_transport() returned " 14519 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14520 14521 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14522 un->un_direct_priority_timeid = 14523 timeout(sd_start_direct_priority_command, 14524 bp, un->un_busy_timeout / 500); 14525 14526 goto exit; 14527 } 14528 14529 /* 14530 * For TRAN_BUSY, we want to reduce the throttle value, 14531 * unless we are retrying a command. 14532 */ 14533 if (bp != un->un_retry_bp) { 14534 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14535 } 14536 14537 /* 14538 * Set up the bp to be tried again 10 ms later. 14539 * Note:x86: Is there a timeout value in the sd_lun 14540 * for this condition? 14541 */ 14542 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14543 kstat_runq_back_to_waitq); 14544 goto exit; 14545 14546 case TRAN_FATAL_ERROR: 14547 un->un_tran_fatal_count++; 14548 /* FALLTHRU */ 14549 14550 case TRAN_BADPKT: 14551 default: 14552 un->un_ncmds_in_transport--; 14553 ASSERT(un->un_ncmds_in_transport >= 0); 14554 14555 /* 14556 * If this is our REQUEST SENSE command with a 14557 * transport error, we must get back the pointers 14558 * to the original buf, and mark the REQUEST 14559 * SENSE command as "available". 14560 */ 14561 if (bp == un->un_rqs_bp) { 14562 bp = sd_mark_rqs_idle(un, xp); 14563 xp = SD_GET_XBUF(bp); 14564 } else { 14565 /* 14566 * Legacy behavior: do not update transport 14567 * error count for request sense commands. 14568 */ 14569 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14570 } 14571 14572 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14573 sd_print_transport_rejected_message(un, xp, rval); 14574 14575 /* 14576 * This command will be terminated by SD driver due 14577 * to a fatal transport error. We should post 14578 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14579 * of "fail" for any command to indicate this 14580 * situation. 14581 */ 14582 if (xp->xb_ena > 0) { 14583 ASSERT(un->un_fm_private != NULL); 14584 sfip = un->un_fm_private; 14585 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14586 sd_ssc_extract_info(&sfip->fm_ssc, un, 14587 xp->xb_pktp, bp, xp); 14588 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14589 } 14590 14591 /* 14592 * We must use sd_return_failed_command_no_restart() to 14593 * avoid a recursive call back into sd_start_cmds(). 14594 * However this also means that we must keep processing 14595 * the waitq here in order to avoid stalling. 14596 */ 14597 sd_return_failed_command_no_restart(un, bp, EIO); 14598 14599 /* 14600 * Notify any threads waiting in sd_ddi_suspend() that 14601 * a command completion has occurred. 14602 */ 14603 if (un->un_state == SD_STATE_SUSPENDED) { 14604 cv_broadcast(&un->un_disk_busy_cv); 14605 } 14606 14607 if (bp == immed_bp) { 14608 /* immed_bp is gone by now, so clear this */ 14609 immed_bp = NULL; 14610 } 14611 break; 14612 } 14613 14614 } while (immed_bp == NULL); 14615 14616 exit: 14617 ASSERT(mutex_owned(SD_MUTEX(un))); 14618 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14619 } 14620 14621 14622 /* 14623 * Function: sd_return_command 14624 * 14625 * Description: Returns a command to its originator (with or without an 14626 * error). Also starts commands waiting to be transported 14627 * to the target. 14628 * 14629 * Context: May be called from interrupt, kernel, or timeout context 14630 */ 14631 14632 static void 14633 sd_return_command(struct sd_lun *un, struct buf *bp) 14634 { 14635 struct sd_xbuf *xp; 14636 struct scsi_pkt *pktp; 14637 struct sd_fm_internal *sfip; 14638 14639 ASSERT(bp != NULL); 14640 ASSERT(un != NULL); 14641 ASSERT(mutex_owned(SD_MUTEX(un))); 14642 ASSERT(bp != un->un_rqs_bp); 14643 xp = SD_GET_XBUF(bp); 14644 ASSERT(xp != NULL); 14645 14646 pktp = SD_GET_PKTP(bp); 14647 sfip = (struct sd_fm_internal *)un->un_fm_private; 14648 ASSERT(sfip != NULL); 14649 14650 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14651 14652 /* 14653 * Note: check for the "sdrestart failed" case. 14654 */ 14655 if ((un->un_partial_dma_supported == 1) && 14656 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14657 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14658 (xp->xb_pktp->pkt_resid == 0)) { 14659 14660 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14661 /* 14662 * Successfully set up next portion of cmd 14663 * transfer, try sending it 14664 */ 14665 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14666 NULL, NULL, 0, (clock_t)0, NULL); 14667 sd_start_cmds(un, NULL); 14668 return; /* Note:x86: need a return here? */ 14669 } 14670 } 14671 14672 /* 14673 * If this is the failfast bp, clear it from un_failfast_bp. This 14674 * can happen if upon being re-tried the failfast bp either 14675 * succeeded or encountered another error (possibly even a different 14676 * error than the one that precipitated the failfast state, but in 14677 * that case it would have had to exhaust retries as well). Regardless, 14678 * this should not occur whenever the instance is in the active 14679 * failfast state. 14680 */ 14681 if (bp == un->un_failfast_bp) { 14682 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14683 un->un_failfast_bp = NULL; 14684 } 14685 14686 /* 14687 * Clear the failfast state upon successful completion of ANY cmd. 14688 */ 14689 if (bp->b_error == 0) { 14690 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14691 /* 14692 * If this is a successful command, but used to be retried, 14693 * we will take it as a recovered command and post an 14694 * ereport with driver-assessment of "recovered". 14695 */ 14696 if (xp->xb_ena > 0) { 14697 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14698 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14699 } 14700 } else { 14701 /* 14702 * If this is a failed non-USCSI command we will post an 14703 * ereport with driver-assessment set accordingly("fail" or 14704 * "fatal"). 14705 */ 14706 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 14707 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14708 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14709 } 14710 } 14711 14712 /* 14713 * This is used if the command was retried one or more times. Show that 14714 * we are done with it, and allow processing of the waitq to resume. 14715 */ 14716 if (bp == un->un_retry_bp) { 14717 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14718 "sd_return_command: un:0x%p: " 14719 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14720 un->un_retry_bp = NULL; 14721 un->un_retry_statp = NULL; 14722 } 14723 14724 SD_UPDATE_RDWR_STATS(un, bp); 14725 SD_UPDATE_PARTITION_STATS(un, bp); 14726 14727 switch (un->un_state) { 14728 case SD_STATE_SUSPENDED: 14729 /* 14730 * Notify any threads waiting in sd_ddi_suspend() that 14731 * a command completion has occurred. 14732 */ 14733 cv_broadcast(&un->un_disk_busy_cv); 14734 break; 14735 default: 14736 sd_start_cmds(un, NULL); 14737 break; 14738 } 14739 14740 /* Return this command up the iodone chain to its originator. */ 14741 mutex_exit(SD_MUTEX(un)); 14742 14743 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14744 xp->xb_pktp = NULL; 14745 14746 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14747 14748 ASSERT(!mutex_owned(SD_MUTEX(un))); 14749 mutex_enter(SD_MUTEX(un)); 14750 14751 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14752 } 14753 14754 14755 /* 14756 * Function: sd_return_failed_command 14757 * 14758 * Description: Command completion when an error occurred. 14759 * 14760 * Context: May be called from interrupt context 14761 */ 14762 14763 static void 14764 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14765 { 14766 ASSERT(bp != NULL); 14767 ASSERT(un != NULL); 14768 ASSERT(mutex_owned(SD_MUTEX(un))); 14769 14770 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14771 "sd_return_failed_command: entry\n"); 14772 14773 /* 14774 * b_resid could already be nonzero due to a partial data 14775 * transfer, so do not change it here. 14776 */ 14777 SD_BIOERROR(bp, errcode); 14778 14779 sd_return_command(un, bp); 14780 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14781 "sd_return_failed_command: exit\n"); 14782 } 14783 14784 14785 /* 14786 * Function: sd_return_failed_command_no_restart 14787 * 14788 * Description: Same as sd_return_failed_command, but ensures that no 14789 * call back into sd_start_cmds will be issued. 14790 * 14791 * Context: May be called from interrupt context 14792 */ 14793 14794 static void 14795 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14796 int errcode) 14797 { 14798 struct sd_xbuf *xp; 14799 14800 ASSERT(bp != NULL); 14801 ASSERT(un != NULL); 14802 ASSERT(mutex_owned(SD_MUTEX(un))); 14803 xp = SD_GET_XBUF(bp); 14804 ASSERT(xp != NULL); 14805 ASSERT(errcode != 0); 14806 14807 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14808 "sd_return_failed_command_no_restart: entry\n"); 14809 14810 /* 14811 * b_resid could already be nonzero due to a partial data 14812 * transfer, so do not change it here. 14813 */ 14814 SD_BIOERROR(bp, errcode); 14815 14816 /* 14817 * If this is the failfast bp, clear it. This can happen if the 14818 * failfast bp encounterd a fatal error when we attempted to 14819 * re-try it (such as a scsi_transport(9F) failure). However 14820 * we should NOT be in an active failfast state if the failfast 14821 * bp is not NULL. 14822 */ 14823 if (bp == un->un_failfast_bp) { 14824 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14825 un->un_failfast_bp = NULL; 14826 } 14827 14828 if (bp == un->un_retry_bp) { 14829 /* 14830 * This command was retried one or more times. Show that we are 14831 * done with it, and allow processing of the waitq to resume. 14832 */ 14833 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14834 "sd_return_failed_command_no_restart: " 14835 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14836 un->un_retry_bp = NULL; 14837 un->un_retry_statp = NULL; 14838 } 14839 14840 SD_UPDATE_RDWR_STATS(un, bp); 14841 SD_UPDATE_PARTITION_STATS(un, bp); 14842 14843 mutex_exit(SD_MUTEX(un)); 14844 14845 if (xp->xb_pktp != NULL) { 14846 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14847 xp->xb_pktp = NULL; 14848 } 14849 14850 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14851 14852 mutex_enter(SD_MUTEX(un)); 14853 14854 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14855 "sd_return_failed_command_no_restart: exit\n"); 14856 } 14857 14858 14859 /* 14860 * Function: sd_retry_command 14861 * 14862 * Description: queue up a command for retry, or (optionally) fail it 14863 * if retry counts are exhausted. 14864 * 14865 * Arguments: un - Pointer to the sd_lun struct for the target. 14866 * 14867 * bp - Pointer to the buf for the command to be retried. 14868 * 14869 * retry_check_flag - Flag to see which (if any) of the retry 14870 * counts should be decremented/checked. If the indicated 14871 * retry count is exhausted, then the command will not be 14872 * retried; it will be failed instead. This should use a 14873 * value equal to one of the following: 14874 * 14875 * SD_RETRIES_NOCHECK 14876 * SD_RESD_RETRIES_STANDARD 14877 * SD_RETRIES_VICTIM 14878 * 14879 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14880 * if the check should be made to see of FLAG_ISOLATE is set 14881 * in the pkt. If FLAG_ISOLATE is set, then the command is 14882 * not retried, it is simply failed. 14883 * 14884 * user_funcp - Ptr to function to call before dispatching the 14885 * command. May be NULL if no action needs to be performed. 14886 * (Primarily intended for printing messages.) 14887 * 14888 * user_arg - Optional argument to be passed along to 14889 * the user_funcp call. 14890 * 14891 * failure_code - errno return code to set in the bp if the 14892 * command is going to be failed. 14893 * 14894 * retry_delay - Retry delay interval in (clock_t) units. May 14895 * be zero which indicates that the retry should be retried 14896 * immediately (ie, without an intervening delay). 14897 * 14898 * statp - Ptr to kstat function to be updated if the command 14899 * is queued for a delayed retry. May be NULL if no kstat 14900 * update is desired. 14901 * 14902 * Context: May be called from interrupt context. 14903 */ 14904 14905 static void 14906 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14907 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14908 code), void *user_arg, int failure_code, clock_t retry_delay, 14909 void (*statp)(kstat_io_t *)) 14910 { 14911 struct sd_xbuf *xp; 14912 struct scsi_pkt *pktp; 14913 struct sd_fm_internal *sfip; 14914 14915 ASSERT(un != NULL); 14916 ASSERT(mutex_owned(SD_MUTEX(un))); 14917 ASSERT(bp != NULL); 14918 xp = SD_GET_XBUF(bp); 14919 ASSERT(xp != NULL); 14920 pktp = SD_GET_PKTP(bp); 14921 ASSERT(pktp != NULL); 14922 14923 sfip = (struct sd_fm_internal *)un->un_fm_private; 14924 ASSERT(sfip != NULL); 14925 14926 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14927 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14928 14929 /* 14930 * If we are syncing or dumping, fail the command to avoid 14931 * recursively calling back into scsi_transport(). 14932 */ 14933 if (ddi_in_panic()) { 14934 goto fail_command_no_log; 14935 } 14936 14937 /* 14938 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14939 * log an error and fail the command. 14940 */ 14941 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14942 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14943 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14944 sd_dump_memory(un, SD_LOG_IO, "CDB", 14945 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14946 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14947 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14948 goto fail_command; 14949 } 14950 14951 /* 14952 * If we are suspended, then put the command onto head of the 14953 * wait queue since we don't want to start more commands, and 14954 * clear the un_retry_bp. Next time when we are resumed, will 14955 * handle the command in the wait queue. 14956 */ 14957 switch (un->un_state) { 14958 case SD_STATE_SUSPENDED: 14959 case SD_STATE_DUMPING: 14960 bp->av_forw = un->un_waitq_headp; 14961 un->un_waitq_headp = bp; 14962 if (un->un_waitq_tailp == NULL) { 14963 un->un_waitq_tailp = bp; 14964 } 14965 if (bp == un->un_retry_bp) { 14966 un->un_retry_bp = NULL; 14967 un->un_retry_statp = NULL; 14968 } 14969 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14970 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14971 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14972 return; 14973 default: 14974 break; 14975 } 14976 14977 /* 14978 * If the caller wants us to check FLAG_ISOLATE, then see if that 14979 * is set; if it is then we do not want to retry the command. 14980 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14981 */ 14982 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14983 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14984 goto fail_command; 14985 } 14986 } 14987 14988 14989 /* 14990 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14991 * command timeout or a selection timeout has occurred. This means 14992 * that we were unable to establish an kind of communication with 14993 * the target, and subsequent retries and/or commands are likely 14994 * to encounter similar results and take a long time to complete. 14995 * 14996 * If this is a failfast error condition, we need to update the 14997 * failfast state, even if this bp does not have B_FAILFAST set. 14998 */ 14999 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15000 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15001 ASSERT(un->un_failfast_bp == NULL); 15002 /* 15003 * If we are already in the active failfast state, and 15004 * another failfast error condition has been detected, 15005 * then fail this command if it has B_FAILFAST set. 15006 * If B_FAILFAST is clear, then maintain the legacy 15007 * behavior of retrying heroically, even tho this will 15008 * take a lot more time to fail the command. 15009 */ 15010 if (bp->b_flags & B_FAILFAST) { 15011 goto fail_command; 15012 } 15013 } else { 15014 /* 15015 * We're not in the active failfast state, but we 15016 * have a failfast error condition, so we must begin 15017 * transition to the next state. We do this regardless 15018 * of whether or not this bp has B_FAILFAST set. 15019 */ 15020 if (un->un_failfast_bp == NULL) { 15021 /* 15022 * This is the first bp to meet a failfast 15023 * condition so save it on un_failfast_bp & 15024 * do normal retry processing. Do not enter 15025 * active failfast state yet. This marks 15026 * entry into the "failfast pending" state. 15027 */ 15028 un->un_failfast_bp = bp; 15029 15030 } else if (un->un_failfast_bp == bp) { 15031 /* 15032 * This is the second time *this* bp has 15033 * encountered a failfast error condition, 15034 * so enter active failfast state & flush 15035 * queues as appropriate. 15036 */ 15037 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15038 un->un_failfast_bp = NULL; 15039 sd_failfast_flushq(un); 15040 15041 /* 15042 * Fail this bp now if B_FAILFAST set; 15043 * otherwise continue with retries. (It would 15044 * be pretty ironic if this bp succeeded on a 15045 * subsequent retry after we just flushed all 15046 * the queues). 15047 */ 15048 if (bp->b_flags & B_FAILFAST) { 15049 goto fail_command; 15050 } 15051 15052 #if !defined(lint) && !defined(__lint) 15053 } else { 15054 /* 15055 * If neither of the preceeding conditionals 15056 * was true, it means that there is some 15057 * *other* bp that has met an inital failfast 15058 * condition and is currently either being 15059 * retried or is waiting to be retried. In 15060 * that case we should perform normal retry 15061 * processing on *this* bp, since there is a 15062 * chance that the current failfast condition 15063 * is transient and recoverable. If that does 15064 * not turn out to be the case, then retries 15065 * will be cleared when the wait queue is 15066 * flushed anyway. 15067 */ 15068 #endif 15069 } 15070 } 15071 } else { 15072 /* 15073 * SD_RETRIES_FAILFAST is clear, which indicates that we 15074 * likely were able to at least establish some level of 15075 * communication with the target and subsequent commands 15076 * and/or retries are likely to get through to the target, 15077 * In this case we want to be aggressive about clearing 15078 * the failfast state. Note that this does not affect 15079 * the "failfast pending" condition. 15080 */ 15081 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15082 } 15083 15084 15085 /* 15086 * Check the specified retry count to see if we can still do 15087 * any retries with this pkt before we should fail it. 15088 */ 15089 switch (retry_check_flag & SD_RETRIES_MASK) { 15090 case SD_RETRIES_VICTIM: 15091 /* 15092 * Check the victim retry count. If exhausted, then fall 15093 * thru & check against the standard retry count. 15094 */ 15095 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15096 /* Increment count & proceed with the retry */ 15097 xp->xb_victim_retry_count++; 15098 break; 15099 } 15100 /* Victim retries exhausted, fall back to std. retries... */ 15101 /* FALLTHRU */ 15102 15103 case SD_RETRIES_STANDARD: 15104 if (xp->xb_retry_count >= un->un_retry_count) { 15105 /* Retries exhausted, fail the command */ 15106 SD_TRACE(SD_LOG_IO_CORE, un, 15107 "sd_retry_command: retries exhausted!\n"); 15108 /* 15109 * update b_resid for failed SCMD_READ & SCMD_WRITE 15110 * commands with nonzero pkt_resid. 15111 */ 15112 if ((pktp->pkt_reason == CMD_CMPLT) && 15113 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15114 (pktp->pkt_resid != 0)) { 15115 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15116 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15117 SD_UPDATE_B_RESID(bp, pktp); 15118 } 15119 } 15120 goto fail_command; 15121 } 15122 xp->xb_retry_count++; 15123 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15124 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15125 break; 15126 15127 case SD_RETRIES_UA: 15128 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15129 /* Retries exhausted, fail the command */ 15130 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15131 "Unit Attention retries exhausted. " 15132 "Check the target.\n"); 15133 goto fail_command; 15134 } 15135 xp->xb_ua_retry_count++; 15136 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15137 "sd_retry_command: retry count:%d\n", 15138 xp->xb_ua_retry_count); 15139 break; 15140 15141 case SD_RETRIES_BUSY: 15142 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15143 /* Retries exhausted, fail the command */ 15144 SD_TRACE(SD_LOG_IO_CORE, un, 15145 "sd_retry_command: retries exhausted!\n"); 15146 goto fail_command; 15147 } 15148 xp->xb_retry_count++; 15149 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15150 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15151 break; 15152 15153 case SD_RETRIES_NOCHECK: 15154 default: 15155 /* No retry count to check. Just proceed with the retry */ 15156 break; 15157 } 15158 15159 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15160 15161 /* 15162 * If this is a non-USCSI command being retried 15163 * during execution last time, we should post an ereport with 15164 * driver-assessment of the value "retry". 15165 * For partial DMA, request sense and STATUS_QFULL, there are no 15166 * hardware errors, we bypass ereport posting. 15167 */ 15168 if (failure_code != 0) { 15169 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15170 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15171 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15172 } 15173 } 15174 15175 /* 15176 * If we were given a zero timeout, we must attempt to retry the 15177 * command immediately (ie, without a delay). 15178 */ 15179 if (retry_delay == 0) { 15180 /* 15181 * Check some limiting conditions to see if we can actually 15182 * do the immediate retry. If we cannot, then we must 15183 * fall back to queueing up a delayed retry. 15184 */ 15185 if (un->un_ncmds_in_transport >= un->un_throttle) { 15186 /* 15187 * We are at the throttle limit for the target, 15188 * fall back to delayed retry. 15189 */ 15190 retry_delay = un->un_busy_timeout; 15191 statp = kstat_waitq_enter; 15192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15193 "sd_retry_command: immed. retry hit " 15194 "throttle!\n"); 15195 } else { 15196 /* 15197 * We're clear to proceed with the immediate retry. 15198 * First call the user-provided function (if any) 15199 */ 15200 if (user_funcp != NULL) { 15201 (*user_funcp)(un, bp, user_arg, 15202 SD_IMMEDIATE_RETRY_ISSUED); 15203 #ifdef __lock_lint 15204 sd_print_incomplete_msg(un, bp, user_arg, 15205 SD_IMMEDIATE_RETRY_ISSUED); 15206 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15207 SD_IMMEDIATE_RETRY_ISSUED); 15208 sd_print_sense_failed_msg(un, bp, user_arg, 15209 SD_IMMEDIATE_RETRY_ISSUED); 15210 #endif 15211 } 15212 15213 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15214 "sd_retry_command: issuing immediate retry\n"); 15215 15216 /* 15217 * Call sd_start_cmds() to transport the command to 15218 * the target. 15219 */ 15220 sd_start_cmds(un, bp); 15221 15222 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15223 "sd_retry_command exit\n"); 15224 return; 15225 } 15226 } 15227 15228 /* 15229 * Set up to retry the command after a delay. 15230 * First call the user-provided function (if any) 15231 */ 15232 if (user_funcp != NULL) { 15233 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15234 } 15235 15236 sd_set_retry_bp(un, bp, retry_delay, statp); 15237 15238 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15239 return; 15240 15241 fail_command: 15242 15243 if (user_funcp != NULL) { 15244 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15245 } 15246 15247 fail_command_no_log: 15248 15249 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15250 "sd_retry_command: returning failed command\n"); 15251 15252 sd_return_failed_command(un, bp, failure_code); 15253 15254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15255 } 15256 15257 15258 /* 15259 * Function: sd_set_retry_bp 15260 * 15261 * Description: Set up the given bp for retry. 15262 * 15263 * Arguments: un - ptr to associated softstate 15264 * bp - ptr to buf(9S) for the command 15265 * retry_delay - time interval before issuing retry (may be 0) 15266 * statp - optional pointer to kstat function 15267 * 15268 * Context: May be called under interrupt context 15269 */ 15270 15271 static void 15272 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15273 void (*statp)(kstat_io_t *)) 15274 { 15275 ASSERT(un != NULL); 15276 ASSERT(mutex_owned(SD_MUTEX(un))); 15277 ASSERT(bp != NULL); 15278 15279 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15280 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15281 15282 /* 15283 * Indicate that the command is being retried. This will not allow any 15284 * other commands on the wait queue to be transported to the target 15285 * until this command has been completed (success or failure). The 15286 * "retry command" is not transported to the target until the given 15287 * time delay expires, unless the user specified a 0 retry_delay. 15288 * 15289 * Note: the timeout(9F) callback routine is what actually calls 15290 * sd_start_cmds() to transport the command, with the exception of a 15291 * zero retry_delay. The only current implementor of a zero retry delay 15292 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15293 */ 15294 if (un->un_retry_bp == NULL) { 15295 ASSERT(un->un_retry_statp == NULL); 15296 un->un_retry_bp = bp; 15297 15298 /* 15299 * If the user has not specified a delay the command should 15300 * be queued and no timeout should be scheduled. 15301 */ 15302 if (retry_delay == 0) { 15303 /* 15304 * Save the kstat pointer that will be used in the 15305 * call to SD_UPDATE_KSTATS() below, so that 15306 * sd_start_cmds() can correctly decrement the waitq 15307 * count when it is time to transport this command. 15308 */ 15309 un->un_retry_statp = statp; 15310 goto done; 15311 } 15312 } 15313 15314 if (un->un_retry_bp == bp) { 15315 /* 15316 * Save the kstat pointer that will be used in the call to 15317 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15318 * correctly decrement the waitq count when it is time to 15319 * transport this command. 15320 */ 15321 un->un_retry_statp = statp; 15322 15323 /* 15324 * Schedule a timeout if: 15325 * 1) The user has specified a delay. 15326 * 2) There is not a START_STOP_UNIT callback pending. 15327 * 15328 * If no delay has been specified, then it is up to the caller 15329 * to ensure that IO processing continues without stalling. 15330 * Effectively, this means that the caller will issue the 15331 * required call to sd_start_cmds(). The START_STOP_UNIT 15332 * callback does this after the START STOP UNIT command has 15333 * completed. In either of these cases we should not schedule 15334 * a timeout callback here. Also don't schedule the timeout if 15335 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15336 */ 15337 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15338 (un->un_direct_priority_timeid == NULL)) { 15339 un->un_retry_timeid = 15340 timeout(sd_start_retry_command, un, retry_delay); 15341 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15342 "sd_set_retry_bp: setting timeout: un: 0x%p" 15343 " bp:0x%p un_retry_timeid:0x%p\n", 15344 un, bp, un->un_retry_timeid); 15345 } 15346 } else { 15347 /* 15348 * We only get in here if there is already another command 15349 * waiting to be retried. In this case, we just put the 15350 * given command onto the wait queue, so it can be transported 15351 * after the current retry command has completed. 15352 * 15353 * Also we have to make sure that if the command at the head 15354 * of the wait queue is the un_failfast_bp, that we do not 15355 * put ahead of it any other commands that are to be retried. 15356 */ 15357 if ((un->un_failfast_bp != NULL) && 15358 (un->un_failfast_bp == un->un_waitq_headp)) { 15359 /* 15360 * Enqueue this command AFTER the first command on 15361 * the wait queue (which is also un_failfast_bp). 15362 */ 15363 bp->av_forw = un->un_waitq_headp->av_forw; 15364 un->un_waitq_headp->av_forw = bp; 15365 if (un->un_waitq_headp == un->un_waitq_tailp) { 15366 un->un_waitq_tailp = bp; 15367 } 15368 } else { 15369 /* Enqueue this command at the head of the waitq. */ 15370 bp->av_forw = un->un_waitq_headp; 15371 un->un_waitq_headp = bp; 15372 if (un->un_waitq_tailp == NULL) { 15373 un->un_waitq_tailp = bp; 15374 } 15375 } 15376 15377 if (statp == NULL) { 15378 statp = kstat_waitq_enter; 15379 } 15380 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15381 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15382 } 15383 15384 done: 15385 if (statp != NULL) { 15386 SD_UPDATE_KSTATS(un, statp, bp); 15387 } 15388 15389 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15390 "sd_set_retry_bp: exit un:0x%p\n", un); 15391 } 15392 15393 15394 /* 15395 * Function: sd_start_retry_command 15396 * 15397 * Description: Start the command that has been waiting on the target's 15398 * retry queue. Called from timeout(9F) context after the 15399 * retry delay interval has expired. 15400 * 15401 * Arguments: arg - pointer to associated softstate for the device. 15402 * 15403 * Context: timeout(9F) thread context. May not sleep. 15404 */ 15405 15406 static void 15407 sd_start_retry_command(void *arg) 15408 { 15409 struct sd_lun *un = arg; 15410 15411 ASSERT(un != NULL); 15412 ASSERT(!mutex_owned(SD_MUTEX(un))); 15413 15414 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15415 "sd_start_retry_command: entry\n"); 15416 15417 mutex_enter(SD_MUTEX(un)); 15418 15419 un->un_retry_timeid = NULL; 15420 15421 if (un->un_retry_bp != NULL) { 15422 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15423 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15424 un, un->un_retry_bp); 15425 sd_start_cmds(un, un->un_retry_bp); 15426 } 15427 15428 mutex_exit(SD_MUTEX(un)); 15429 15430 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15431 "sd_start_retry_command: exit\n"); 15432 } 15433 15434 15435 /* 15436 * Function: sd_start_direct_priority_command 15437 * 15438 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15439 * received TRAN_BUSY when we called scsi_transport() to send it 15440 * to the underlying HBA. This function is called from timeout(9F) 15441 * context after the delay interval has expired. 15442 * 15443 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15444 * 15445 * Context: timeout(9F) thread context. May not sleep. 15446 */ 15447 15448 static void 15449 sd_start_direct_priority_command(void *arg) 15450 { 15451 struct buf *priority_bp = arg; 15452 struct sd_lun *un; 15453 15454 ASSERT(priority_bp != NULL); 15455 un = SD_GET_UN(priority_bp); 15456 ASSERT(un != NULL); 15457 ASSERT(!mutex_owned(SD_MUTEX(un))); 15458 15459 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15460 "sd_start_direct_priority_command: entry\n"); 15461 15462 mutex_enter(SD_MUTEX(un)); 15463 un->un_direct_priority_timeid = NULL; 15464 sd_start_cmds(un, priority_bp); 15465 mutex_exit(SD_MUTEX(un)); 15466 15467 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15468 "sd_start_direct_priority_command: exit\n"); 15469 } 15470 15471 15472 /* 15473 * Function: sd_send_request_sense_command 15474 * 15475 * Description: Sends a REQUEST SENSE command to the target 15476 * 15477 * Context: May be called from interrupt context. 15478 */ 15479 15480 static void 15481 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15482 struct scsi_pkt *pktp) 15483 { 15484 ASSERT(bp != NULL); 15485 ASSERT(un != NULL); 15486 ASSERT(mutex_owned(SD_MUTEX(un))); 15487 15488 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15489 "entry: buf:0x%p\n", bp); 15490 15491 /* 15492 * If we are syncing or dumping, then fail the command to avoid a 15493 * recursive callback into scsi_transport(). Also fail the command 15494 * if we are suspended (legacy behavior). 15495 */ 15496 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15497 (un->un_state == SD_STATE_DUMPING)) { 15498 sd_return_failed_command(un, bp, EIO); 15499 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15500 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15501 return; 15502 } 15503 15504 /* 15505 * Retry the failed command and don't issue the request sense if: 15506 * 1) the sense buf is busy 15507 * 2) we have 1 or more outstanding commands on the target 15508 * (the sense data will be cleared or invalidated any way) 15509 * 15510 * Note: There could be an issue with not checking a retry limit here, 15511 * the problem is determining which retry limit to check. 15512 */ 15513 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15514 /* Don't retry if the command is flagged as non-retryable */ 15515 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15516 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15517 NULL, NULL, 0, un->un_busy_timeout, 15518 kstat_waitq_enter); 15519 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15520 "sd_send_request_sense_command: " 15521 "at full throttle, retrying exit\n"); 15522 } else { 15523 sd_return_failed_command(un, bp, EIO); 15524 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15525 "sd_send_request_sense_command: " 15526 "at full throttle, non-retryable exit\n"); 15527 } 15528 return; 15529 } 15530 15531 sd_mark_rqs_busy(un, bp); 15532 sd_start_cmds(un, un->un_rqs_bp); 15533 15534 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15535 "sd_send_request_sense_command: exit\n"); 15536 } 15537 15538 15539 /* 15540 * Function: sd_mark_rqs_busy 15541 * 15542 * Description: Indicate that the request sense bp for this instance is 15543 * in use. 15544 * 15545 * Context: May be called under interrupt context 15546 */ 15547 15548 static void 15549 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15550 { 15551 struct sd_xbuf *sense_xp; 15552 15553 ASSERT(un != NULL); 15554 ASSERT(bp != NULL); 15555 ASSERT(mutex_owned(SD_MUTEX(un))); 15556 ASSERT(un->un_sense_isbusy == 0); 15557 15558 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15559 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15560 15561 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15562 ASSERT(sense_xp != NULL); 15563 15564 SD_INFO(SD_LOG_IO, un, 15565 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15566 15567 ASSERT(sense_xp->xb_pktp != NULL); 15568 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15569 == (FLAG_SENSING | FLAG_HEAD)); 15570 15571 un->un_sense_isbusy = 1; 15572 un->un_rqs_bp->b_resid = 0; 15573 sense_xp->xb_pktp->pkt_resid = 0; 15574 sense_xp->xb_pktp->pkt_reason = 0; 15575 15576 /* So we can get back the bp at interrupt time! */ 15577 sense_xp->xb_sense_bp = bp; 15578 15579 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15580 15581 /* 15582 * Mark this buf as awaiting sense data. (This is already set in 15583 * the pkt_flags for the RQS packet.) 15584 */ 15585 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15586 15587 /* Request sense down same path */ 15588 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15589 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15590 sense_xp->xb_pktp->pkt_path_instance = 15591 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15592 15593 sense_xp->xb_retry_count = 0; 15594 sense_xp->xb_victim_retry_count = 0; 15595 sense_xp->xb_ua_retry_count = 0; 15596 sense_xp->xb_nr_retry_count = 0; 15597 sense_xp->xb_dma_resid = 0; 15598 15599 /* Clean up the fields for auto-request sense */ 15600 sense_xp->xb_sense_status = 0; 15601 sense_xp->xb_sense_state = 0; 15602 sense_xp->xb_sense_resid = 0; 15603 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15604 15605 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15606 } 15607 15608 15609 /* 15610 * Function: sd_mark_rqs_idle 15611 * 15612 * Description: SD_MUTEX must be held continuously through this routine 15613 * to prevent reuse of the rqs struct before the caller can 15614 * complete it's processing. 15615 * 15616 * Return Code: Pointer to the RQS buf 15617 * 15618 * Context: May be called under interrupt context 15619 */ 15620 15621 static struct buf * 15622 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15623 { 15624 struct buf *bp; 15625 ASSERT(un != NULL); 15626 ASSERT(sense_xp != NULL); 15627 ASSERT(mutex_owned(SD_MUTEX(un))); 15628 ASSERT(un->un_sense_isbusy != 0); 15629 15630 un->un_sense_isbusy = 0; 15631 bp = sense_xp->xb_sense_bp; 15632 sense_xp->xb_sense_bp = NULL; 15633 15634 /* This pkt is no longer interested in getting sense data */ 15635 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15636 15637 return (bp); 15638 } 15639 15640 15641 15642 /* 15643 * Function: sd_alloc_rqs 15644 * 15645 * Description: Set up the unit to receive auto request sense data 15646 * 15647 * Return Code: DDI_SUCCESS or DDI_FAILURE 15648 * 15649 * Context: Called under attach(9E) context 15650 */ 15651 15652 static int 15653 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15654 { 15655 struct sd_xbuf *xp; 15656 15657 ASSERT(un != NULL); 15658 ASSERT(!mutex_owned(SD_MUTEX(un))); 15659 ASSERT(un->un_rqs_bp == NULL); 15660 ASSERT(un->un_rqs_pktp == NULL); 15661 15662 /* 15663 * First allocate the required buf and scsi_pkt structs, then set up 15664 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15665 */ 15666 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15667 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15668 if (un->un_rqs_bp == NULL) { 15669 return (DDI_FAILURE); 15670 } 15671 15672 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15673 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15674 15675 if (un->un_rqs_pktp == NULL) { 15676 sd_free_rqs(un); 15677 return (DDI_FAILURE); 15678 } 15679 15680 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15681 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15682 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15683 15684 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15685 15686 /* Set up the other needed members in the ARQ scsi_pkt. */ 15687 un->un_rqs_pktp->pkt_comp = sdintr; 15688 un->un_rqs_pktp->pkt_time = sd_io_time; 15689 un->un_rqs_pktp->pkt_flags |= 15690 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15691 15692 /* 15693 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15694 * provide any intpkt, destroypkt routines as we take care of 15695 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15696 */ 15697 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15698 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15699 xp->xb_pktp = un->un_rqs_pktp; 15700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15701 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15702 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15703 15704 /* 15705 * Save the pointer to the request sense private bp so it can 15706 * be retrieved in sdintr. 15707 */ 15708 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15709 ASSERT(un->un_rqs_bp->b_private == xp); 15710 15711 /* 15712 * See if the HBA supports auto-request sense for the specified 15713 * target/lun. If it does, then try to enable it (if not already 15714 * enabled). 15715 * 15716 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15717 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15718 * return success. However, in both of these cases ARQ is always 15719 * enabled and scsi_ifgetcap will always return true. The best approach 15720 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15721 * 15722 * The 3rd case is the HBA (adp) always return enabled on 15723 * scsi_ifgetgetcap even when it's not enable, the best approach 15724 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15725 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15726 */ 15727 15728 if (un->un_f_is_fibre == TRUE) { 15729 un->un_f_arq_enabled = TRUE; 15730 } else { 15731 #if defined(__i386) || defined(__amd64) 15732 /* 15733 * Circumvent the Adaptec bug, remove this code when 15734 * the bug is fixed 15735 */ 15736 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15737 #endif 15738 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15739 case 0: 15740 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15741 "sd_alloc_rqs: HBA supports ARQ\n"); 15742 /* 15743 * ARQ is supported by this HBA but currently is not 15744 * enabled. Attempt to enable it and if successful then 15745 * mark this instance as ARQ enabled. 15746 */ 15747 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15748 == 1) { 15749 /* Successfully enabled ARQ in the HBA */ 15750 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15751 "sd_alloc_rqs: ARQ enabled\n"); 15752 un->un_f_arq_enabled = TRUE; 15753 } else { 15754 /* Could not enable ARQ in the HBA */ 15755 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15756 "sd_alloc_rqs: failed ARQ enable\n"); 15757 un->un_f_arq_enabled = FALSE; 15758 } 15759 break; 15760 case 1: 15761 /* 15762 * ARQ is supported by this HBA and is already enabled. 15763 * Just mark ARQ as enabled for this instance. 15764 */ 15765 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15766 "sd_alloc_rqs: ARQ already enabled\n"); 15767 un->un_f_arq_enabled = TRUE; 15768 break; 15769 default: 15770 /* 15771 * ARQ is not supported by this HBA; disable it for this 15772 * instance. 15773 */ 15774 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15775 "sd_alloc_rqs: HBA does not support ARQ\n"); 15776 un->un_f_arq_enabled = FALSE; 15777 break; 15778 } 15779 } 15780 15781 return (DDI_SUCCESS); 15782 } 15783 15784 15785 /* 15786 * Function: sd_free_rqs 15787 * 15788 * Description: Cleanup for the pre-instance RQS command. 15789 * 15790 * Context: Kernel thread context 15791 */ 15792 15793 static void 15794 sd_free_rqs(struct sd_lun *un) 15795 { 15796 ASSERT(un != NULL); 15797 15798 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15799 15800 /* 15801 * If consistent memory is bound to a scsi_pkt, the pkt 15802 * has to be destroyed *before* freeing the consistent memory. 15803 * Don't change the sequence of this operations. 15804 * scsi_destroy_pkt() might access memory, which isn't allowed, 15805 * after it was freed in scsi_free_consistent_buf(). 15806 */ 15807 if (un->un_rqs_pktp != NULL) { 15808 scsi_destroy_pkt(un->un_rqs_pktp); 15809 un->un_rqs_pktp = NULL; 15810 } 15811 15812 if (un->un_rqs_bp != NULL) { 15813 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15814 if (xp != NULL) { 15815 kmem_free(xp, sizeof (struct sd_xbuf)); 15816 } 15817 scsi_free_consistent_buf(un->un_rqs_bp); 15818 un->un_rqs_bp = NULL; 15819 } 15820 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15821 } 15822 15823 15824 15825 /* 15826 * Function: sd_reduce_throttle 15827 * 15828 * Description: Reduces the maximum # of outstanding commands on a 15829 * target to the current number of outstanding commands. 15830 * Queues a tiemout(9F) callback to restore the limit 15831 * after a specified interval has elapsed. 15832 * Typically used when we get a TRAN_BUSY return code 15833 * back from scsi_transport(). 15834 * 15835 * Arguments: un - ptr to the sd_lun softstate struct 15836 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15837 * 15838 * Context: May be called from interrupt context 15839 */ 15840 15841 static void 15842 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15843 { 15844 ASSERT(un != NULL); 15845 ASSERT(mutex_owned(SD_MUTEX(un))); 15846 ASSERT(un->un_ncmds_in_transport >= 0); 15847 15848 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15849 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15850 un, un->un_throttle, un->un_ncmds_in_transport); 15851 15852 if (un->un_throttle > 1) { 15853 if (un->un_f_use_adaptive_throttle == TRUE) { 15854 switch (throttle_type) { 15855 case SD_THROTTLE_TRAN_BUSY: 15856 if (un->un_busy_throttle == 0) { 15857 un->un_busy_throttle = un->un_throttle; 15858 } 15859 break; 15860 case SD_THROTTLE_QFULL: 15861 un->un_busy_throttle = 0; 15862 break; 15863 default: 15864 ASSERT(FALSE); 15865 } 15866 15867 if (un->un_ncmds_in_transport > 0) { 15868 un->un_throttle = un->un_ncmds_in_transport; 15869 } 15870 15871 } else { 15872 if (un->un_ncmds_in_transport == 0) { 15873 un->un_throttle = 1; 15874 } else { 15875 un->un_throttle = un->un_ncmds_in_transport; 15876 } 15877 } 15878 } 15879 15880 /* Reschedule the timeout if none is currently active */ 15881 if (un->un_reset_throttle_timeid == NULL) { 15882 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15883 un, SD_THROTTLE_RESET_INTERVAL); 15884 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15885 "sd_reduce_throttle: timeout scheduled!\n"); 15886 } 15887 15888 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15889 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15890 } 15891 15892 15893 15894 /* 15895 * Function: sd_restore_throttle 15896 * 15897 * Description: Callback function for timeout(9F). Resets the current 15898 * value of un->un_throttle to its default. 15899 * 15900 * Arguments: arg - pointer to associated softstate for the device. 15901 * 15902 * Context: May be called from interrupt context 15903 */ 15904 15905 static void 15906 sd_restore_throttle(void *arg) 15907 { 15908 struct sd_lun *un = arg; 15909 15910 ASSERT(un != NULL); 15911 ASSERT(!mutex_owned(SD_MUTEX(un))); 15912 15913 mutex_enter(SD_MUTEX(un)); 15914 15915 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15916 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15917 15918 un->un_reset_throttle_timeid = NULL; 15919 15920 if (un->un_f_use_adaptive_throttle == TRUE) { 15921 /* 15922 * If un_busy_throttle is nonzero, then it contains the 15923 * value that un_throttle was when we got a TRAN_BUSY back 15924 * from scsi_transport(). We want to revert back to this 15925 * value. 15926 * 15927 * In the QFULL case, the throttle limit will incrementally 15928 * increase until it reaches max throttle. 15929 */ 15930 if (un->un_busy_throttle > 0) { 15931 un->un_throttle = un->un_busy_throttle; 15932 un->un_busy_throttle = 0; 15933 } else { 15934 /* 15935 * increase throttle by 10% open gate slowly, schedule 15936 * another restore if saved throttle has not been 15937 * reached 15938 */ 15939 short throttle; 15940 if (sd_qfull_throttle_enable) { 15941 throttle = un->un_throttle + 15942 max((un->un_throttle / 10), 1); 15943 un->un_throttle = 15944 (throttle < un->un_saved_throttle) ? 15945 throttle : un->un_saved_throttle; 15946 if (un->un_throttle < un->un_saved_throttle) { 15947 un->un_reset_throttle_timeid = 15948 timeout(sd_restore_throttle, 15949 un, 15950 SD_QFULL_THROTTLE_RESET_INTERVAL); 15951 } 15952 } 15953 } 15954 15955 /* 15956 * If un_throttle has fallen below the low-water mark, we 15957 * restore the maximum value here (and allow it to ratchet 15958 * down again if necessary). 15959 */ 15960 if (un->un_throttle < un->un_min_throttle) { 15961 un->un_throttle = un->un_saved_throttle; 15962 } 15963 } else { 15964 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15965 "restoring limit from 0x%x to 0x%x\n", 15966 un->un_throttle, un->un_saved_throttle); 15967 un->un_throttle = un->un_saved_throttle; 15968 } 15969 15970 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15971 "sd_restore_throttle: calling sd_start_cmds!\n"); 15972 15973 sd_start_cmds(un, NULL); 15974 15975 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15976 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15977 un, un->un_throttle); 15978 15979 mutex_exit(SD_MUTEX(un)); 15980 15981 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15982 } 15983 15984 /* 15985 * Function: sdrunout 15986 * 15987 * Description: Callback routine for scsi_init_pkt when a resource allocation 15988 * fails. 15989 * 15990 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15991 * soft state instance. 15992 * 15993 * Return Code: The scsi_init_pkt routine allows for the callback function to 15994 * return a 0 indicating the callback should be rescheduled or a 1 15995 * indicating not to reschedule. This routine always returns 1 15996 * because the driver always provides a callback function to 15997 * scsi_init_pkt. This results in a callback always being scheduled 15998 * (via the scsi_init_pkt callback implementation) if a resource 15999 * failure occurs. 16000 * 16001 * Context: This callback function may not block or call routines that block 16002 * 16003 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16004 * request persisting at the head of the list which cannot be 16005 * satisfied even after multiple retries. In the future the driver 16006 * may implement some time of maximum runout count before failing 16007 * an I/O. 16008 */ 16009 16010 static int 16011 sdrunout(caddr_t arg) 16012 { 16013 struct sd_lun *un = (struct sd_lun *)arg; 16014 16015 ASSERT(un != NULL); 16016 ASSERT(!mutex_owned(SD_MUTEX(un))); 16017 16018 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16019 16020 mutex_enter(SD_MUTEX(un)); 16021 sd_start_cmds(un, NULL); 16022 mutex_exit(SD_MUTEX(un)); 16023 /* 16024 * This callback routine always returns 1 (i.e. do not reschedule) 16025 * because we always specify sdrunout as the callback handler for 16026 * scsi_init_pkt inside the call to sd_start_cmds. 16027 */ 16028 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16029 return (1); 16030 } 16031 16032 16033 /* 16034 * Function: sdintr 16035 * 16036 * Description: Completion callback routine for scsi_pkt(9S) structs 16037 * sent to the HBA driver via scsi_transport(9F). 16038 * 16039 * Context: Interrupt context 16040 */ 16041 16042 static void 16043 sdintr(struct scsi_pkt *pktp) 16044 { 16045 struct buf *bp; 16046 struct sd_xbuf *xp; 16047 struct sd_lun *un; 16048 size_t actual_len; 16049 sd_ssc_t *sscp; 16050 16051 ASSERT(pktp != NULL); 16052 bp = (struct buf *)pktp->pkt_private; 16053 ASSERT(bp != NULL); 16054 xp = SD_GET_XBUF(bp); 16055 ASSERT(xp != NULL); 16056 ASSERT(xp->xb_pktp != NULL); 16057 un = SD_GET_UN(bp); 16058 ASSERT(un != NULL); 16059 ASSERT(!mutex_owned(SD_MUTEX(un))); 16060 16061 #ifdef SD_FAULT_INJECTION 16062 16063 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16064 /* SD FaultInjection */ 16065 sd_faultinjection(pktp); 16066 16067 #endif /* SD_FAULT_INJECTION */ 16068 16069 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16070 " xp:0x%p, un:0x%p\n", bp, xp, un); 16071 16072 mutex_enter(SD_MUTEX(un)); 16073 16074 ASSERT(un->un_fm_private != NULL); 16075 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16076 ASSERT(sscp != NULL); 16077 16078 /* Reduce the count of the #commands currently in transport */ 16079 un->un_ncmds_in_transport--; 16080 ASSERT(un->un_ncmds_in_transport >= 0); 16081 16082 /* Increment counter to indicate that the callback routine is active */ 16083 un->un_in_callback++; 16084 16085 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16086 16087 #ifdef SDDEBUG 16088 if (bp == un->un_retry_bp) { 16089 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16090 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16091 un, un->un_retry_bp, un->un_ncmds_in_transport); 16092 } 16093 #endif 16094 16095 /* 16096 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16097 * state if needed. 16098 */ 16099 if (pktp->pkt_reason == CMD_DEV_GONE) { 16100 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16101 "Command failed to complete...Device is gone\n"); 16102 if (un->un_mediastate != DKIO_DEV_GONE) { 16103 un->un_mediastate = DKIO_DEV_GONE; 16104 cv_broadcast(&un->un_state_cv); 16105 } 16106 sd_return_failed_command(un, bp, EIO); 16107 goto exit; 16108 } 16109 16110 if (pktp->pkt_state & STATE_XARQ_DONE) { 16111 SD_TRACE(SD_LOG_COMMON, un, 16112 "sdintr: extra sense data received. pkt=%p\n", pktp); 16113 } 16114 16115 /* 16116 * First see if the pkt has auto-request sense data with it.... 16117 * Look at the packet state first so we don't take a performance 16118 * hit looking at the arq enabled flag unless absolutely necessary. 16119 */ 16120 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16121 (un->un_f_arq_enabled == TRUE)) { 16122 /* 16123 * The HBA did an auto request sense for this command so check 16124 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16125 * driver command that should not be retried. 16126 */ 16127 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16128 /* 16129 * Save the relevant sense info into the xp for the 16130 * original cmd. 16131 */ 16132 struct scsi_arq_status *asp; 16133 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16134 xp->xb_sense_status = 16135 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16136 xp->xb_sense_state = asp->sts_rqpkt_state; 16137 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16138 if (pktp->pkt_state & STATE_XARQ_DONE) { 16139 actual_len = MAX_SENSE_LENGTH - 16140 xp->xb_sense_resid; 16141 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16142 MAX_SENSE_LENGTH); 16143 } else { 16144 if (xp->xb_sense_resid > SENSE_LENGTH) { 16145 actual_len = MAX_SENSE_LENGTH - 16146 xp->xb_sense_resid; 16147 } else { 16148 actual_len = SENSE_LENGTH - 16149 xp->xb_sense_resid; 16150 } 16151 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16152 if ((((struct uscsi_cmd *) 16153 (xp->xb_pktinfo))->uscsi_rqlen) > 16154 actual_len) { 16155 xp->xb_sense_resid = 16156 (((struct uscsi_cmd *) 16157 (xp->xb_pktinfo))-> 16158 uscsi_rqlen) - actual_len; 16159 } else { 16160 xp->xb_sense_resid = 0; 16161 } 16162 } 16163 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16164 SENSE_LENGTH); 16165 } 16166 16167 /* fail the command */ 16168 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16169 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16170 sd_return_failed_command(un, bp, EIO); 16171 goto exit; 16172 } 16173 16174 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16175 /* 16176 * We want to either retry or fail this command, so free 16177 * the DMA resources here. If we retry the command then 16178 * the DMA resources will be reallocated in sd_start_cmds(). 16179 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16180 * causes the *entire* transfer to start over again from the 16181 * beginning of the request, even for PARTIAL chunks that 16182 * have already transferred successfully. 16183 */ 16184 if ((un->un_f_is_fibre == TRUE) && 16185 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16186 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16187 scsi_dmafree(pktp); 16188 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16189 } 16190 #endif 16191 16192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16193 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16194 16195 sd_handle_auto_request_sense(un, bp, xp, pktp); 16196 goto exit; 16197 } 16198 16199 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16200 if (pktp->pkt_flags & FLAG_SENSING) { 16201 /* This pktp is from the unit's REQUEST_SENSE command */ 16202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16203 "sdintr: sd_handle_request_sense\n"); 16204 sd_handle_request_sense(un, bp, xp, pktp); 16205 goto exit; 16206 } 16207 16208 /* 16209 * Check to see if the command successfully completed as requested; 16210 * this is the most common case (and also the hot performance path). 16211 * 16212 * Requirements for successful completion are: 16213 * pkt_reason is CMD_CMPLT and packet status is status good. 16214 * In addition: 16215 * - A residual of zero indicates successful completion no matter what 16216 * the command is. 16217 * - If the residual is not zero and the command is not a read or 16218 * write, then it's still defined as successful completion. In other 16219 * words, if the command is a read or write the residual must be 16220 * zero for successful completion. 16221 * - If the residual is not zero and the command is a read or 16222 * write, and it's a USCSICMD, then it's still defined as 16223 * successful completion. 16224 */ 16225 if ((pktp->pkt_reason == CMD_CMPLT) && 16226 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16227 16228 /* 16229 * Since this command is returned with a good status, we 16230 * can reset the count for Sonoma failover. 16231 */ 16232 un->un_sonoma_failure_count = 0; 16233 16234 /* 16235 * Return all USCSI commands on good status 16236 */ 16237 if (pktp->pkt_resid == 0) { 16238 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16239 "sdintr: returning command for resid == 0\n"); 16240 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16241 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16242 SD_UPDATE_B_RESID(bp, pktp); 16243 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16244 "sdintr: returning command for resid != 0\n"); 16245 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16246 SD_UPDATE_B_RESID(bp, pktp); 16247 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16248 "sdintr: returning uscsi command\n"); 16249 } else { 16250 goto not_successful; 16251 } 16252 sd_return_command(un, bp); 16253 16254 /* 16255 * Decrement counter to indicate that the callback routine 16256 * is done. 16257 */ 16258 un->un_in_callback--; 16259 ASSERT(un->un_in_callback >= 0); 16260 mutex_exit(SD_MUTEX(un)); 16261 16262 return; 16263 } 16264 16265 not_successful: 16266 16267 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16268 /* 16269 * The following is based upon knowledge of the underlying transport 16270 * and its use of DMA resources. This code should be removed when 16271 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16272 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16273 * and sd_start_cmds(). 16274 * 16275 * Free any DMA resources associated with this command if there 16276 * is a chance it could be retried or enqueued for later retry. 16277 * If we keep the DMA binding then mpxio cannot reissue the 16278 * command on another path whenever a path failure occurs. 16279 * 16280 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16281 * causes the *entire* transfer to start over again from the 16282 * beginning of the request, even for PARTIAL chunks that 16283 * have already transferred successfully. 16284 * 16285 * This is only done for non-uscsi commands (and also skipped for the 16286 * driver's internal RQS command). Also just do this for Fibre Channel 16287 * devices as these are the only ones that support mpxio. 16288 */ 16289 if ((un->un_f_is_fibre == TRUE) && 16290 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16291 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16292 scsi_dmafree(pktp); 16293 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16294 } 16295 #endif 16296 16297 /* 16298 * The command did not successfully complete as requested so check 16299 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16300 * driver command that should not be retried so just return. If 16301 * FLAG_DIAGNOSE is not set the error will be processed below. 16302 */ 16303 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16304 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16305 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16306 /* 16307 * Issue a request sense if a check condition caused the error 16308 * (we handle the auto request sense case above), otherwise 16309 * just fail the command. 16310 */ 16311 if ((pktp->pkt_reason == CMD_CMPLT) && 16312 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16313 sd_send_request_sense_command(un, bp, pktp); 16314 } else { 16315 sd_return_failed_command(un, bp, EIO); 16316 } 16317 goto exit; 16318 } 16319 16320 /* 16321 * The command did not successfully complete as requested so process 16322 * the error, retry, and/or attempt recovery. 16323 */ 16324 switch (pktp->pkt_reason) { 16325 case CMD_CMPLT: 16326 switch (SD_GET_PKT_STATUS(pktp)) { 16327 case STATUS_GOOD: 16328 /* 16329 * The command completed successfully with a non-zero 16330 * residual 16331 */ 16332 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16333 "sdintr: STATUS_GOOD \n"); 16334 sd_pkt_status_good(un, bp, xp, pktp); 16335 break; 16336 16337 case STATUS_CHECK: 16338 case STATUS_TERMINATED: 16339 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16340 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16341 sd_pkt_status_check_condition(un, bp, xp, pktp); 16342 break; 16343 16344 case STATUS_BUSY: 16345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16346 "sdintr: STATUS_BUSY\n"); 16347 sd_pkt_status_busy(un, bp, xp, pktp); 16348 break; 16349 16350 case STATUS_RESERVATION_CONFLICT: 16351 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16352 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16353 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16354 break; 16355 16356 case STATUS_QFULL: 16357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16358 "sdintr: STATUS_QFULL\n"); 16359 sd_pkt_status_qfull(un, bp, xp, pktp); 16360 break; 16361 16362 case STATUS_MET: 16363 case STATUS_INTERMEDIATE: 16364 case STATUS_SCSI2: 16365 case STATUS_INTERMEDIATE_MET: 16366 case STATUS_ACA_ACTIVE: 16367 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16368 "Unexpected SCSI status received: 0x%x\n", 16369 SD_GET_PKT_STATUS(pktp)); 16370 /* 16371 * Mark the ssc_flags when detected invalid status 16372 * code for non-USCSI command. 16373 */ 16374 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16375 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16376 "stat-code"); 16377 } 16378 sd_return_failed_command(un, bp, EIO); 16379 break; 16380 16381 default: 16382 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16383 "Invalid SCSI status received: 0x%x\n", 16384 SD_GET_PKT_STATUS(pktp)); 16385 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16386 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16387 "stat-code"); 16388 } 16389 sd_return_failed_command(un, bp, EIO); 16390 break; 16391 16392 } 16393 break; 16394 16395 case CMD_INCOMPLETE: 16396 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16397 "sdintr: CMD_INCOMPLETE\n"); 16398 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16399 break; 16400 case CMD_TRAN_ERR: 16401 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16402 "sdintr: CMD_TRAN_ERR\n"); 16403 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16404 break; 16405 case CMD_RESET: 16406 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16407 "sdintr: CMD_RESET \n"); 16408 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16409 break; 16410 case CMD_ABORTED: 16411 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16412 "sdintr: CMD_ABORTED \n"); 16413 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16414 break; 16415 case CMD_TIMEOUT: 16416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16417 "sdintr: CMD_TIMEOUT\n"); 16418 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16419 break; 16420 case CMD_UNX_BUS_FREE: 16421 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16422 "sdintr: CMD_UNX_BUS_FREE \n"); 16423 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16424 break; 16425 case CMD_TAG_REJECT: 16426 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16427 "sdintr: CMD_TAG_REJECT\n"); 16428 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16429 break; 16430 default: 16431 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16432 "sdintr: default\n"); 16433 /* 16434 * Mark the ssc_flags for detecting invliad pkt_reason. 16435 */ 16436 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16437 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16438 "pkt-reason"); 16439 } 16440 sd_pkt_reason_default(un, bp, xp, pktp); 16441 break; 16442 } 16443 16444 exit: 16445 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16446 16447 /* Decrement counter to indicate that the callback routine is done. */ 16448 un->un_in_callback--; 16449 ASSERT(un->un_in_callback >= 0); 16450 16451 /* 16452 * At this point, the pkt has been dispatched, ie, it is either 16453 * being re-tried or has been returned to its caller and should 16454 * not be referenced. 16455 */ 16456 16457 mutex_exit(SD_MUTEX(un)); 16458 } 16459 16460 16461 /* 16462 * Function: sd_print_incomplete_msg 16463 * 16464 * Description: Prints the error message for a CMD_INCOMPLETE error. 16465 * 16466 * Arguments: un - ptr to associated softstate for the device. 16467 * bp - ptr to the buf(9S) for the command. 16468 * arg - message string ptr 16469 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16470 * or SD_NO_RETRY_ISSUED. 16471 * 16472 * Context: May be called under interrupt context 16473 */ 16474 16475 static void 16476 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16477 { 16478 struct scsi_pkt *pktp; 16479 char *msgp; 16480 char *cmdp = arg; 16481 16482 ASSERT(un != NULL); 16483 ASSERT(mutex_owned(SD_MUTEX(un))); 16484 ASSERT(bp != NULL); 16485 ASSERT(arg != NULL); 16486 pktp = SD_GET_PKTP(bp); 16487 ASSERT(pktp != NULL); 16488 16489 switch (code) { 16490 case SD_DELAYED_RETRY_ISSUED: 16491 case SD_IMMEDIATE_RETRY_ISSUED: 16492 msgp = "retrying"; 16493 break; 16494 case SD_NO_RETRY_ISSUED: 16495 default: 16496 msgp = "giving up"; 16497 break; 16498 } 16499 16500 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16501 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16502 "incomplete %s- %s\n", cmdp, msgp); 16503 } 16504 } 16505 16506 16507 16508 /* 16509 * Function: sd_pkt_status_good 16510 * 16511 * Description: Processing for a STATUS_GOOD code in pkt_status. 16512 * 16513 * Context: May be called under interrupt context 16514 */ 16515 16516 static void 16517 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16518 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16519 { 16520 char *cmdp; 16521 16522 ASSERT(un != NULL); 16523 ASSERT(mutex_owned(SD_MUTEX(un))); 16524 ASSERT(bp != NULL); 16525 ASSERT(xp != NULL); 16526 ASSERT(pktp != NULL); 16527 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16528 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16529 ASSERT(pktp->pkt_resid != 0); 16530 16531 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16532 16533 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16534 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16535 case SCMD_READ: 16536 cmdp = "read"; 16537 break; 16538 case SCMD_WRITE: 16539 cmdp = "write"; 16540 break; 16541 default: 16542 SD_UPDATE_B_RESID(bp, pktp); 16543 sd_return_command(un, bp); 16544 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16545 return; 16546 } 16547 16548 /* 16549 * See if we can retry the read/write, preferrably immediately. 16550 * If retries are exhaused, then sd_retry_command() will update 16551 * the b_resid count. 16552 */ 16553 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16554 cmdp, EIO, (clock_t)0, NULL); 16555 16556 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16557 } 16558 16559 16560 16561 16562 16563 /* 16564 * Function: sd_handle_request_sense 16565 * 16566 * Description: Processing for non-auto Request Sense command. 16567 * 16568 * Arguments: un - ptr to associated softstate 16569 * sense_bp - ptr to buf(9S) for the RQS command 16570 * sense_xp - ptr to the sd_xbuf for the RQS command 16571 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16572 * 16573 * Context: May be called under interrupt context 16574 */ 16575 16576 static void 16577 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16578 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16579 { 16580 struct buf *cmd_bp; /* buf for the original command */ 16581 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16582 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16583 size_t actual_len; /* actual sense data length */ 16584 16585 ASSERT(un != NULL); 16586 ASSERT(mutex_owned(SD_MUTEX(un))); 16587 ASSERT(sense_bp != NULL); 16588 ASSERT(sense_xp != NULL); 16589 ASSERT(sense_pktp != NULL); 16590 16591 /* 16592 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16593 * RQS command and not the original command. 16594 */ 16595 ASSERT(sense_pktp == un->un_rqs_pktp); 16596 ASSERT(sense_bp == un->un_rqs_bp); 16597 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16598 (FLAG_SENSING | FLAG_HEAD)); 16599 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16600 FLAG_SENSING) == FLAG_SENSING); 16601 16602 /* These are the bp, xp, and pktp for the original command */ 16603 cmd_bp = sense_xp->xb_sense_bp; 16604 cmd_xp = SD_GET_XBUF(cmd_bp); 16605 cmd_pktp = SD_GET_PKTP(cmd_bp); 16606 16607 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16608 /* 16609 * The REQUEST SENSE command failed. Release the REQUEST 16610 * SENSE command for re-use, get back the bp for the original 16611 * command, and attempt to re-try the original command if 16612 * FLAG_DIAGNOSE is not set in the original packet. 16613 */ 16614 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16615 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16616 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16617 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16618 NULL, NULL, EIO, (clock_t)0, NULL); 16619 return; 16620 } 16621 } 16622 16623 /* 16624 * Save the relevant sense info into the xp for the original cmd. 16625 * 16626 * Note: if the request sense failed the state info will be zero 16627 * as set in sd_mark_rqs_busy() 16628 */ 16629 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16630 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16631 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16632 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16633 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16634 SENSE_LENGTH)) { 16635 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16636 MAX_SENSE_LENGTH); 16637 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16638 } else { 16639 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16640 SENSE_LENGTH); 16641 if (actual_len < SENSE_LENGTH) { 16642 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16643 } else { 16644 cmd_xp->xb_sense_resid = 0; 16645 } 16646 } 16647 16648 /* 16649 * Free up the RQS command.... 16650 * NOTE: 16651 * Must do this BEFORE calling sd_validate_sense_data! 16652 * sd_validate_sense_data may return the original command in 16653 * which case the pkt will be freed and the flags can no 16654 * longer be touched. 16655 * SD_MUTEX is held through this process until the command 16656 * is dispatched based upon the sense data, so there are 16657 * no race conditions. 16658 */ 16659 (void) sd_mark_rqs_idle(un, sense_xp); 16660 16661 /* 16662 * For a retryable command see if we have valid sense data, if so then 16663 * turn it over to sd_decode_sense() to figure out the right course of 16664 * action. Just fail a non-retryable command. 16665 */ 16666 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16667 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16668 SD_SENSE_DATA_IS_VALID) { 16669 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16670 } 16671 } else { 16672 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16673 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16674 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16675 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16676 sd_return_failed_command(un, cmd_bp, EIO); 16677 } 16678 } 16679 16680 16681 16682 16683 /* 16684 * Function: sd_handle_auto_request_sense 16685 * 16686 * Description: Processing for auto-request sense information. 16687 * 16688 * Arguments: un - ptr to associated softstate 16689 * bp - ptr to buf(9S) for the command 16690 * xp - ptr to the sd_xbuf for the command 16691 * pktp - ptr to the scsi_pkt(9S) for the command 16692 * 16693 * Context: May be called under interrupt context 16694 */ 16695 16696 static void 16697 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16698 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16699 { 16700 struct scsi_arq_status *asp; 16701 size_t actual_len; 16702 16703 ASSERT(un != NULL); 16704 ASSERT(mutex_owned(SD_MUTEX(un))); 16705 ASSERT(bp != NULL); 16706 ASSERT(xp != NULL); 16707 ASSERT(pktp != NULL); 16708 ASSERT(pktp != un->un_rqs_pktp); 16709 ASSERT(bp != un->un_rqs_bp); 16710 16711 /* 16712 * For auto-request sense, we get a scsi_arq_status back from 16713 * the HBA, with the sense data in the sts_sensedata member. 16714 * The pkt_scbp of the packet points to this scsi_arq_status. 16715 */ 16716 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16717 16718 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16719 /* 16720 * The auto REQUEST SENSE failed; see if we can re-try 16721 * the original command. 16722 */ 16723 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16724 "auto request sense failed (reason=%s)\n", 16725 scsi_rname(asp->sts_rqpkt_reason)); 16726 16727 sd_reset_target(un, pktp); 16728 16729 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16730 NULL, NULL, EIO, (clock_t)0, NULL); 16731 return; 16732 } 16733 16734 /* Save the relevant sense info into the xp for the original cmd. */ 16735 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16736 xp->xb_sense_state = asp->sts_rqpkt_state; 16737 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16738 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16739 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16740 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16741 MAX_SENSE_LENGTH); 16742 } else { 16743 if (xp->xb_sense_resid > SENSE_LENGTH) { 16744 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16745 } else { 16746 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16747 } 16748 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16749 if ((((struct uscsi_cmd *) 16750 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16751 xp->xb_sense_resid = (((struct uscsi_cmd *) 16752 (xp->xb_pktinfo))->uscsi_rqlen) - 16753 actual_len; 16754 } else { 16755 xp->xb_sense_resid = 0; 16756 } 16757 } 16758 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16759 } 16760 16761 /* 16762 * See if we have valid sense data, if so then turn it over to 16763 * sd_decode_sense() to figure out the right course of action. 16764 */ 16765 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16766 SD_SENSE_DATA_IS_VALID) { 16767 sd_decode_sense(un, bp, xp, pktp); 16768 } 16769 } 16770 16771 16772 /* 16773 * Function: sd_print_sense_failed_msg 16774 * 16775 * Description: Print log message when RQS has failed. 16776 * 16777 * Arguments: un - ptr to associated softstate 16778 * bp - ptr to buf(9S) for the command 16779 * arg - generic message string ptr 16780 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16781 * or SD_NO_RETRY_ISSUED 16782 * 16783 * Context: May be called from interrupt context 16784 */ 16785 16786 static void 16787 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16788 int code) 16789 { 16790 char *msgp = arg; 16791 16792 ASSERT(un != NULL); 16793 ASSERT(mutex_owned(SD_MUTEX(un))); 16794 ASSERT(bp != NULL); 16795 16796 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16798 } 16799 } 16800 16801 16802 /* 16803 * Function: sd_validate_sense_data 16804 * 16805 * Description: Check the given sense data for validity. 16806 * If the sense data is not valid, the command will 16807 * be either failed or retried! 16808 * 16809 * Return Code: SD_SENSE_DATA_IS_INVALID 16810 * SD_SENSE_DATA_IS_VALID 16811 * 16812 * Context: May be called from interrupt context 16813 */ 16814 16815 static int 16816 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16817 size_t actual_len) 16818 { 16819 struct scsi_extended_sense *esp; 16820 struct scsi_pkt *pktp; 16821 char *msgp = NULL; 16822 sd_ssc_t *sscp; 16823 16824 ASSERT(un != NULL); 16825 ASSERT(mutex_owned(SD_MUTEX(un))); 16826 ASSERT(bp != NULL); 16827 ASSERT(bp != un->un_rqs_bp); 16828 ASSERT(xp != NULL); 16829 ASSERT(un->un_fm_private != NULL); 16830 16831 pktp = SD_GET_PKTP(bp); 16832 ASSERT(pktp != NULL); 16833 16834 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16835 ASSERT(sscp != NULL); 16836 16837 /* 16838 * Check the status of the RQS command (auto or manual). 16839 */ 16840 switch (xp->xb_sense_status & STATUS_MASK) { 16841 case STATUS_GOOD: 16842 break; 16843 16844 case STATUS_RESERVATION_CONFLICT: 16845 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16846 return (SD_SENSE_DATA_IS_INVALID); 16847 16848 case STATUS_BUSY: 16849 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16850 "Busy Status on REQUEST SENSE\n"); 16851 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16852 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16853 return (SD_SENSE_DATA_IS_INVALID); 16854 16855 case STATUS_QFULL: 16856 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16857 "QFULL Status on REQUEST SENSE\n"); 16858 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16859 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16860 return (SD_SENSE_DATA_IS_INVALID); 16861 16862 case STATUS_CHECK: 16863 case STATUS_TERMINATED: 16864 msgp = "Check Condition on REQUEST SENSE\n"; 16865 goto sense_failed; 16866 16867 default: 16868 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16869 goto sense_failed; 16870 } 16871 16872 /* 16873 * See if we got the minimum required amount of sense data. 16874 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16875 * or less. 16876 */ 16877 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16878 (actual_len == 0)) { 16879 msgp = "Request Sense couldn't get sense data\n"; 16880 goto sense_failed; 16881 } 16882 16883 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16884 msgp = "Not enough sense information\n"; 16885 /* Mark the ssc_flags for detecting invalid sense data */ 16886 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16887 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16888 "sense-data"); 16889 } 16890 goto sense_failed; 16891 } 16892 16893 /* 16894 * We require the extended sense data 16895 */ 16896 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16897 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16898 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16899 static char tmp[8]; 16900 static char buf[148]; 16901 char *p = (char *)(xp->xb_sense_data); 16902 int i; 16903 16904 mutex_enter(&sd_sense_mutex); 16905 (void) strcpy(buf, "undecodable sense information:"); 16906 for (i = 0; i < actual_len; i++) { 16907 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16908 (void) strcpy(&buf[strlen(buf)], tmp); 16909 } 16910 i = strlen(buf); 16911 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16912 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16913 mutex_exit(&sd_sense_mutex); 16914 } 16915 16916 /* Mark the ssc_flags for detecting invalid sense data */ 16917 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16918 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16919 "sense-data"); 16920 } 16921 16922 /* Note: Legacy behavior, fail the command with no retry */ 16923 sd_return_failed_command(un, bp, EIO); 16924 return (SD_SENSE_DATA_IS_INVALID); 16925 } 16926 16927 /* 16928 * Check that es_code is valid (es_class concatenated with es_code 16929 * make up the "response code" field. es_class will always be 7, so 16930 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16931 * format. 16932 */ 16933 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16934 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16935 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16936 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16937 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16938 /* Mark the ssc_flags for detecting invalid sense data */ 16939 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16940 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16941 "sense-data"); 16942 } 16943 goto sense_failed; 16944 } 16945 16946 return (SD_SENSE_DATA_IS_VALID); 16947 16948 sense_failed: 16949 /* 16950 * If the request sense failed (for whatever reason), attempt 16951 * to retry the original command. 16952 */ 16953 #if defined(__i386) || defined(__amd64) 16954 /* 16955 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16956 * sddef.h for Sparc platform, and x86 uses 1 binary 16957 * for both SCSI/FC. 16958 * The SD_RETRY_DELAY value need to be adjusted here 16959 * when SD_RETRY_DELAY change in sddef.h 16960 */ 16961 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16962 sd_print_sense_failed_msg, msgp, EIO, 16963 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16964 #else 16965 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16966 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16967 #endif 16968 16969 return (SD_SENSE_DATA_IS_INVALID); 16970 } 16971 16972 /* 16973 * Function: sd_decode_sense 16974 * 16975 * Description: Take recovery action(s) when SCSI Sense Data is received. 16976 * 16977 * Context: Interrupt context. 16978 */ 16979 16980 static void 16981 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16982 struct scsi_pkt *pktp) 16983 { 16984 uint8_t sense_key; 16985 16986 ASSERT(un != NULL); 16987 ASSERT(mutex_owned(SD_MUTEX(un))); 16988 ASSERT(bp != NULL); 16989 ASSERT(bp != un->un_rqs_bp); 16990 ASSERT(xp != NULL); 16991 ASSERT(pktp != NULL); 16992 16993 sense_key = scsi_sense_key(xp->xb_sense_data); 16994 16995 switch (sense_key) { 16996 case KEY_NO_SENSE: 16997 sd_sense_key_no_sense(un, bp, xp, pktp); 16998 break; 16999 case KEY_RECOVERABLE_ERROR: 17000 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17001 bp, xp, pktp); 17002 break; 17003 case KEY_NOT_READY: 17004 sd_sense_key_not_ready(un, xp->xb_sense_data, 17005 bp, xp, pktp); 17006 break; 17007 case KEY_MEDIUM_ERROR: 17008 case KEY_HARDWARE_ERROR: 17009 sd_sense_key_medium_or_hardware_error(un, 17010 xp->xb_sense_data, bp, xp, pktp); 17011 break; 17012 case KEY_ILLEGAL_REQUEST: 17013 sd_sense_key_illegal_request(un, bp, xp, pktp); 17014 break; 17015 case KEY_UNIT_ATTENTION: 17016 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17017 bp, xp, pktp); 17018 break; 17019 case KEY_WRITE_PROTECT: 17020 case KEY_VOLUME_OVERFLOW: 17021 case KEY_MISCOMPARE: 17022 sd_sense_key_fail_command(un, bp, xp, pktp); 17023 break; 17024 case KEY_BLANK_CHECK: 17025 sd_sense_key_blank_check(un, bp, xp, pktp); 17026 break; 17027 case KEY_ABORTED_COMMAND: 17028 sd_sense_key_aborted_command(un, bp, xp, pktp); 17029 break; 17030 case KEY_VENDOR_UNIQUE: 17031 case KEY_COPY_ABORTED: 17032 case KEY_EQUAL: 17033 case KEY_RESERVED: 17034 default: 17035 sd_sense_key_default(un, xp->xb_sense_data, 17036 bp, xp, pktp); 17037 break; 17038 } 17039 } 17040 17041 17042 /* 17043 * Function: sd_dump_memory 17044 * 17045 * Description: Debug logging routine to print the contents of a user provided 17046 * buffer. The output of the buffer is broken up into 256 byte 17047 * segments due to a size constraint of the scsi_log. 17048 * implementation. 17049 * 17050 * Arguments: un - ptr to softstate 17051 * comp - component mask 17052 * title - "title" string to preceed data when printed 17053 * data - ptr to data block to be printed 17054 * len - size of data block to be printed 17055 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17056 * 17057 * Context: May be called from interrupt context 17058 */ 17059 17060 #define SD_DUMP_MEMORY_BUF_SIZE 256 17061 17062 static char *sd_dump_format_string[] = { 17063 " 0x%02x", 17064 " %c" 17065 }; 17066 17067 static void 17068 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17069 int len, int fmt) 17070 { 17071 int i, j; 17072 int avail_count; 17073 int start_offset; 17074 int end_offset; 17075 size_t entry_len; 17076 char *bufp; 17077 char *local_buf; 17078 char *format_string; 17079 17080 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17081 17082 /* 17083 * In the debug version of the driver, this function is called from a 17084 * number of places which are NOPs in the release driver. 17085 * The debug driver therefore has additional methods of filtering 17086 * debug output. 17087 */ 17088 #ifdef SDDEBUG 17089 /* 17090 * In the debug version of the driver we can reduce the amount of debug 17091 * messages by setting sd_error_level to something other than 17092 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17093 * sd_component_mask. 17094 */ 17095 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17096 (sd_error_level != SCSI_ERR_ALL)) { 17097 return; 17098 } 17099 if (((sd_component_mask & comp) == 0) || 17100 (sd_error_level != SCSI_ERR_ALL)) { 17101 return; 17102 } 17103 #else 17104 if (sd_error_level != SCSI_ERR_ALL) { 17105 return; 17106 } 17107 #endif 17108 17109 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17110 bufp = local_buf; 17111 /* 17112 * Available length is the length of local_buf[], minus the 17113 * length of the title string, minus one for the ":", minus 17114 * one for the newline, minus one for the NULL terminator. 17115 * This gives the #bytes available for holding the printed 17116 * values from the given data buffer. 17117 */ 17118 if (fmt == SD_LOG_HEX) { 17119 format_string = sd_dump_format_string[0]; 17120 } else /* SD_LOG_CHAR */ { 17121 format_string = sd_dump_format_string[1]; 17122 } 17123 /* 17124 * Available count is the number of elements from the given 17125 * data buffer that we can fit into the available length. 17126 * This is based upon the size of the format string used. 17127 * Make one entry and find it's size. 17128 */ 17129 (void) sprintf(bufp, format_string, data[0]); 17130 entry_len = strlen(bufp); 17131 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17132 17133 j = 0; 17134 while (j < len) { 17135 bufp = local_buf; 17136 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17137 start_offset = j; 17138 17139 end_offset = start_offset + avail_count; 17140 17141 (void) sprintf(bufp, "%s:", title); 17142 bufp += strlen(bufp); 17143 for (i = start_offset; ((i < end_offset) && (j < len)); 17144 i++, j++) { 17145 (void) sprintf(bufp, format_string, data[i]); 17146 bufp += entry_len; 17147 } 17148 (void) sprintf(bufp, "\n"); 17149 17150 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17151 } 17152 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17153 } 17154 17155 /* 17156 * Function: sd_print_sense_msg 17157 * 17158 * Description: Log a message based upon the given sense data. 17159 * 17160 * Arguments: un - ptr to associated softstate 17161 * bp - ptr to buf(9S) for the command 17162 * arg - ptr to associate sd_sense_info struct 17163 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17164 * or SD_NO_RETRY_ISSUED 17165 * 17166 * Context: May be called from interrupt context 17167 */ 17168 17169 static void 17170 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17171 { 17172 struct sd_xbuf *xp; 17173 struct scsi_pkt *pktp; 17174 uint8_t *sensep; 17175 daddr_t request_blkno; 17176 diskaddr_t err_blkno; 17177 int severity; 17178 int pfa_flag; 17179 extern struct scsi_key_strings scsi_cmds[]; 17180 17181 ASSERT(un != NULL); 17182 ASSERT(mutex_owned(SD_MUTEX(un))); 17183 ASSERT(bp != NULL); 17184 xp = SD_GET_XBUF(bp); 17185 ASSERT(xp != NULL); 17186 pktp = SD_GET_PKTP(bp); 17187 ASSERT(pktp != NULL); 17188 ASSERT(arg != NULL); 17189 17190 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17191 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17192 17193 if ((code == SD_DELAYED_RETRY_ISSUED) || 17194 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17195 severity = SCSI_ERR_RETRYABLE; 17196 } 17197 17198 /* Use absolute block number for the request block number */ 17199 request_blkno = xp->xb_blkno; 17200 17201 /* 17202 * Now try to get the error block number from the sense data 17203 */ 17204 sensep = xp->xb_sense_data; 17205 17206 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17207 (uint64_t *)&err_blkno)) { 17208 /* 17209 * We retrieved the error block number from the information 17210 * portion of the sense data. 17211 * 17212 * For USCSI commands we are better off using the error 17213 * block no. as the requested block no. (This is the best 17214 * we can estimate.) 17215 */ 17216 if ((SD_IS_BUFIO(xp) == FALSE) && 17217 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17218 request_blkno = err_blkno; 17219 } 17220 } else { 17221 /* 17222 * Without the es_valid bit set (for fixed format) or an 17223 * information descriptor (for descriptor format) we cannot 17224 * be certain of the error blkno, so just use the 17225 * request_blkno. 17226 */ 17227 err_blkno = (diskaddr_t)request_blkno; 17228 } 17229 17230 /* 17231 * The following will log the buffer contents for the release driver 17232 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17233 * level is set to verbose. 17234 */ 17235 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17236 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17237 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17238 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17239 17240 if (pfa_flag == FALSE) { 17241 /* This is normally only set for USCSI */ 17242 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17243 return; 17244 } 17245 17246 if ((SD_IS_BUFIO(xp) == TRUE) && 17247 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17248 (severity < sd_error_level))) { 17249 return; 17250 } 17251 } 17252 /* 17253 * Check for Sonoma Failover and keep a count of how many failed I/O's 17254 */ 17255 if ((SD_IS_LSI(un)) && 17256 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17257 (scsi_sense_asc(sensep) == 0x94) && 17258 (scsi_sense_ascq(sensep) == 0x01)) { 17259 un->un_sonoma_failure_count++; 17260 if (un->un_sonoma_failure_count > 1) { 17261 return; 17262 } 17263 } 17264 17265 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17266 request_blkno, err_blkno, scsi_cmds, 17267 (struct scsi_extended_sense *)sensep, 17268 un->un_additional_codes, NULL); 17269 } 17270 17271 /* 17272 * Function: sd_sense_key_no_sense 17273 * 17274 * Description: Recovery action when sense data was not received. 17275 * 17276 * Context: May be called from interrupt context 17277 */ 17278 17279 static void 17280 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17281 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17282 { 17283 struct sd_sense_info si; 17284 17285 ASSERT(un != NULL); 17286 ASSERT(mutex_owned(SD_MUTEX(un))); 17287 ASSERT(bp != NULL); 17288 ASSERT(xp != NULL); 17289 ASSERT(pktp != NULL); 17290 17291 si.ssi_severity = SCSI_ERR_FATAL; 17292 si.ssi_pfa_flag = FALSE; 17293 17294 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17295 17296 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17297 &si, EIO, (clock_t)0, NULL); 17298 } 17299 17300 17301 /* 17302 * Function: sd_sense_key_recoverable_error 17303 * 17304 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17305 * 17306 * Context: May be called from interrupt context 17307 */ 17308 17309 static void 17310 sd_sense_key_recoverable_error(struct sd_lun *un, 17311 uint8_t *sense_datap, 17312 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17313 { 17314 struct sd_sense_info si; 17315 uint8_t asc = scsi_sense_asc(sense_datap); 17316 17317 ASSERT(un != NULL); 17318 ASSERT(mutex_owned(SD_MUTEX(un))); 17319 ASSERT(bp != NULL); 17320 ASSERT(xp != NULL); 17321 ASSERT(pktp != NULL); 17322 17323 /* 17324 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17325 */ 17326 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17327 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17328 si.ssi_severity = SCSI_ERR_INFO; 17329 si.ssi_pfa_flag = TRUE; 17330 } else { 17331 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17332 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17333 si.ssi_severity = SCSI_ERR_RECOVERED; 17334 si.ssi_pfa_flag = FALSE; 17335 } 17336 17337 if (pktp->pkt_resid == 0) { 17338 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17339 sd_return_command(un, bp); 17340 return; 17341 } 17342 17343 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17344 &si, EIO, (clock_t)0, NULL); 17345 } 17346 17347 17348 17349 17350 /* 17351 * Function: sd_sense_key_not_ready 17352 * 17353 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17354 * 17355 * Context: May be called from interrupt context 17356 */ 17357 17358 static void 17359 sd_sense_key_not_ready(struct sd_lun *un, 17360 uint8_t *sense_datap, 17361 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17362 { 17363 struct sd_sense_info si; 17364 uint8_t asc = scsi_sense_asc(sense_datap); 17365 uint8_t ascq = scsi_sense_ascq(sense_datap); 17366 17367 ASSERT(un != NULL); 17368 ASSERT(mutex_owned(SD_MUTEX(un))); 17369 ASSERT(bp != NULL); 17370 ASSERT(xp != NULL); 17371 ASSERT(pktp != NULL); 17372 17373 si.ssi_severity = SCSI_ERR_FATAL; 17374 si.ssi_pfa_flag = FALSE; 17375 17376 /* 17377 * Update error stats after first NOT READY error. Disks may have 17378 * been powered down and may need to be restarted. For CDROMs, 17379 * report NOT READY errors only if media is present. 17380 */ 17381 if ((ISCD(un) && (asc == 0x3A)) || 17382 (xp->xb_nr_retry_count > 0)) { 17383 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17384 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17385 } 17386 17387 /* 17388 * Just fail if the "not ready" retry limit has been reached. 17389 */ 17390 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17391 /* Special check for error message printing for removables. */ 17392 if (un->un_f_has_removable_media && (asc == 0x04) && 17393 (ascq >= 0x04)) { 17394 si.ssi_severity = SCSI_ERR_ALL; 17395 } 17396 goto fail_command; 17397 } 17398 17399 /* 17400 * Check the ASC and ASCQ in the sense data as needed, to determine 17401 * what to do. 17402 */ 17403 switch (asc) { 17404 case 0x04: /* LOGICAL UNIT NOT READY */ 17405 /* 17406 * disk drives that don't spin up result in a very long delay 17407 * in format without warning messages. We will log a message 17408 * if the error level is set to verbose. 17409 */ 17410 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17411 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17412 "logical unit not ready, resetting disk\n"); 17413 } 17414 17415 /* 17416 * There are different requirements for CDROMs and disks for 17417 * the number of retries. If a CD-ROM is giving this, it is 17418 * probably reading TOC and is in the process of getting 17419 * ready, so we should keep on trying for a long time to make 17420 * sure that all types of media are taken in account (for 17421 * some media the drive takes a long time to read TOC). For 17422 * disks we do not want to retry this too many times as this 17423 * can cause a long hang in format when the drive refuses to 17424 * spin up (a very common failure). 17425 */ 17426 switch (ascq) { 17427 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17428 /* 17429 * Disk drives frequently refuse to spin up which 17430 * results in a very long hang in format without 17431 * warning messages. 17432 * 17433 * Note: This code preserves the legacy behavior of 17434 * comparing xb_nr_retry_count against zero for fibre 17435 * channel targets instead of comparing against the 17436 * un_reset_retry_count value. The reason for this 17437 * discrepancy has been so utterly lost beneath the 17438 * Sands of Time that even Indiana Jones could not 17439 * find it. 17440 */ 17441 if (un->un_f_is_fibre == TRUE) { 17442 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17443 (xp->xb_nr_retry_count > 0)) && 17444 (un->un_startstop_timeid == NULL)) { 17445 scsi_log(SD_DEVINFO(un), sd_label, 17446 CE_WARN, "logical unit not ready, " 17447 "resetting disk\n"); 17448 sd_reset_target(un, pktp); 17449 } 17450 } else { 17451 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17452 (xp->xb_nr_retry_count > 17453 un->un_reset_retry_count)) && 17454 (un->un_startstop_timeid == NULL)) { 17455 scsi_log(SD_DEVINFO(un), sd_label, 17456 CE_WARN, "logical unit not ready, " 17457 "resetting disk\n"); 17458 sd_reset_target(un, pktp); 17459 } 17460 } 17461 break; 17462 17463 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17464 /* 17465 * If the target is in the process of becoming 17466 * ready, just proceed with the retry. This can 17467 * happen with CD-ROMs that take a long time to 17468 * read TOC after a power cycle or reset. 17469 */ 17470 goto do_retry; 17471 17472 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17473 break; 17474 17475 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17476 /* 17477 * Retries cannot help here so just fail right away. 17478 */ 17479 goto fail_command; 17480 17481 case 0x88: 17482 /* 17483 * Vendor-unique code for T3/T4: it indicates a 17484 * path problem in a mutipathed config, but as far as 17485 * the target driver is concerned it equates to a fatal 17486 * error, so we should just fail the command right away 17487 * (without printing anything to the console). If this 17488 * is not a T3/T4, fall thru to the default recovery 17489 * action. 17490 * T3/T4 is FC only, don't need to check is_fibre 17491 */ 17492 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17493 sd_return_failed_command(un, bp, EIO); 17494 return; 17495 } 17496 /* FALLTHRU */ 17497 17498 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17499 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17500 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17501 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17502 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17503 default: /* Possible future codes in SCSI spec? */ 17504 /* 17505 * For removable-media devices, do not retry if 17506 * ASCQ > 2 as these result mostly from USCSI commands 17507 * on MMC devices issued to check status of an 17508 * operation initiated in immediate mode. Also for 17509 * ASCQ >= 4 do not print console messages as these 17510 * mainly represent a user-initiated operation 17511 * instead of a system failure. 17512 */ 17513 if (un->un_f_has_removable_media) { 17514 si.ssi_severity = SCSI_ERR_ALL; 17515 goto fail_command; 17516 } 17517 break; 17518 } 17519 17520 /* 17521 * As part of our recovery attempt for the NOT READY 17522 * condition, we issue a START STOP UNIT command. However 17523 * we want to wait for a short delay before attempting this 17524 * as there may still be more commands coming back from the 17525 * target with the check condition. To do this we use 17526 * timeout(9F) to call sd_start_stop_unit_callback() after 17527 * the delay interval expires. (sd_start_stop_unit_callback() 17528 * dispatches sd_start_stop_unit_task(), which will issue 17529 * the actual START STOP UNIT command. The delay interval 17530 * is one-half of the delay that we will use to retry the 17531 * command that generated the NOT READY condition. 17532 * 17533 * Note that we could just dispatch sd_start_stop_unit_task() 17534 * from here and allow it to sleep for the delay interval, 17535 * but then we would be tying up the taskq thread 17536 * uncesessarily for the duration of the delay. 17537 * 17538 * Do not issue the START STOP UNIT if the current command 17539 * is already a START STOP UNIT. 17540 */ 17541 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17542 break; 17543 } 17544 17545 /* 17546 * Do not schedule the timeout if one is already pending. 17547 */ 17548 if (un->un_startstop_timeid != NULL) { 17549 SD_INFO(SD_LOG_ERROR, un, 17550 "sd_sense_key_not_ready: restart already issued to" 17551 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17552 ddi_get_instance(SD_DEVINFO(un))); 17553 break; 17554 } 17555 17556 /* 17557 * Schedule the START STOP UNIT command, then queue the command 17558 * for a retry. 17559 * 17560 * Note: A timeout is not scheduled for this retry because we 17561 * want the retry to be serial with the START_STOP_UNIT. The 17562 * retry will be started when the START_STOP_UNIT is completed 17563 * in sd_start_stop_unit_task. 17564 */ 17565 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17566 un, un->un_busy_timeout / 2); 17567 xp->xb_nr_retry_count++; 17568 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17569 return; 17570 17571 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17572 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17573 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17574 "unit does not respond to selection\n"); 17575 } 17576 break; 17577 17578 case 0x3A: /* MEDIUM NOT PRESENT */ 17579 if (sd_error_level >= SCSI_ERR_FATAL) { 17580 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17581 "Caddy not inserted in drive\n"); 17582 } 17583 17584 sr_ejected(un); 17585 un->un_mediastate = DKIO_EJECTED; 17586 /* The state has changed, inform the media watch routines */ 17587 cv_broadcast(&un->un_state_cv); 17588 /* Just fail if no media is present in the drive. */ 17589 goto fail_command; 17590 17591 default: 17592 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17593 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17594 "Unit not Ready. Additional sense code 0x%x\n", 17595 asc); 17596 } 17597 break; 17598 } 17599 17600 do_retry: 17601 17602 /* 17603 * Retry the command, as some targets may report NOT READY for 17604 * several seconds after being reset. 17605 */ 17606 xp->xb_nr_retry_count++; 17607 si.ssi_severity = SCSI_ERR_RETRYABLE; 17608 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17609 &si, EIO, un->un_busy_timeout, NULL); 17610 17611 return; 17612 17613 fail_command: 17614 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17615 sd_return_failed_command(un, bp, EIO); 17616 } 17617 17618 17619 17620 /* 17621 * Function: sd_sense_key_medium_or_hardware_error 17622 * 17623 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17624 * sense key. 17625 * 17626 * Context: May be called from interrupt context 17627 */ 17628 17629 static void 17630 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17631 uint8_t *sense_datap, 17632 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17633 { 17634 struct sd_sense_info si; 17635 uint8_t sense_key = scsi_sense_key(sense_datap); 17636 uint8_t asc = scsi_sense_asc(sense_datap); 17637 17638 ASSERT(un != NULL); 17639 ASSERT(mutex_owned(SD_MUTEX(un))); 17640 ASSERT(bp != NULL); 17641 ASSERT(xp != NULL); 17642 ASSERT(pktp != NULL); 17643 17644 si.ssi_severity = SCSI_ERR_FATAL; 17645 si.ssi_pfa_flag = FALSE; 17646 17647 if (sense_key == KEY_MEDIUM_ERROR) { 17648 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17649 } 17650 17651 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17652 17653 if ((un->un_reset_retry_count != 0) && 17654 (xp->xb_retry_count == un->un_reset_retry_count)) { 17655 mutex_exit(SD_MUTEX(un)); 17656 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17657 if (un->un_f_allow_bus_device_reset == TRUE) { 17658 17659 boolean_t try_resetting_target = B_TRUE; 17660 17661 /* 17662 * We need to be able to handle specific ASC when we are 17663 * handling a KEY_HARDWARE_ERROR. In particular 17664 * taking the default action of resetting the target may 17665 * not be the appropriate way to attempt recovery. 17666 * Resetting a target because of a single LUN failure 17667 * victimizes all LUNs on that target. 17668 * 17669 * This is true for the LSI arrays, if an LSI 17670 * array controller returns an ASC of 0x84 (LUN Dead) we 17671 * should trust it. 17672 */ 17673 17674 if (sense_key == KEY_HARDWARE_ERROR) { 17675 switch (asc) { 17676 case 0x84: 17677 if (SD_IS_LSI(un)) { 17678 try_resetting_target = B_FALSE; 17679 } 17680 break; 17681 default: 17682 break; 17683 } 17684 } 17685 17686 if (try_resetting_target == B_TRUE) { 17687 int reset_retval = 0; 17688 if (un->un_f_lun_reset_enabled == TRUE) { 17689 SD_TRACE(SD_LOG_IO_CORE, un, 17690 "sd_sense_key_medium_or_hardware_" 17691 "error: issuing RESET_LUN\n"); 17692 reset_retval = 17693 scsi_reset(SD_ADDRESS(un), 17694 RESET_LUN); 17695 } 17696 if (reset_retval == 0) { 17697 SD_TRACE(SD_LOG_IO_CORE, un, 17698 "sd_sense_key_medium_or_hardware_" 17699 "error: issuing RESET_TARGET\n"); 17700 (void) scsi_reset(SD_ADDRESS(un), 17701 RESET_TARGET); 17702 } 17703 } 17704 } 17705 mutex_enter(SD_MUTEX(un)); 17706 } 17707 17708 /* 17709 * This really ought to be a fatal error, but we will retry anyway 17710 * as some drives report this as a spurious error. 17711 */ 17712 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17713 &si, EIO, (clock_t)0, NULL); 17714 } 17715 17716 17717 17718 /* 17719 * Function: sd_sense_key_illegal_request 17720 * 17721 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17722 * 17723 * Context: May be called from interrupt context 17724 */ 17725 17726 static void 17727 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17728 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17729 { 17730 struct sd_sense_info si; 17731 17732 ASSERT(un != NULL); 17733 ASSERT(mutex_owned(SD_MUTEX(un))); 17734 ASSERT(bp != NULL); 17735 ASSERT(xp != NULL); 17736 ASSERT(pktp != NULL); 17737 17738 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17739 17740 si.ssi_severity = SCSI_ERR_INFO; 17741 si.ssi_pfa_flag = FALSE; 17742 17743 /* Pointless to retry if the target thinks it's an illegal request */ 17744 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17745 sd_return_failed_command(un, bp, EIO); 17746 } 17747 17748 17749 17750 17751 /* 17752 * Function: sd_sense_key_unit_attention 17753 * 17754 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17755 * 17756 * Context: May be called from interrupt context 17757 */ 17758 17759 static void 17760 sd_sense_key_unit_attention(struct sd_lun *un, 17761 uint8_t *sense_datap, 17762 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17763 { 17764 /* 17765 * For UNIT ATTENTION we allow retries for one minute. Devices 17766 * like Sonoma can return UNIT ATTENTION close to a minute 17767 * under certain conditions. 17768 */ 17769 int retry_check_flag = SD_RETRIES_UA; 17770 boolean_t kstat_updated = B_FALSE; 17771 struct sd_sense_info si; 17772 uint8_t asc = scsi_sense_asc(sense_datap); 17773 uint8_t ascq = scsi_sense_ascq(sense_datap); 17774 17775 ASSERT(un != NULL); 17776 ASSERT(mutex_owned(SD_MUTEX(un))); 17777 ASSERT(bp != NULL); 17778 ASSERT(xp != NULL); 17779 ASSERT(pktp != NULL); 17780 17781 si.ssi_severity = SCSI_ERR_INFO; 17782 si.ssi_pfa_flag = FALSE; 17783 17784 17785 switch (asc) { 17786 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17787 if (sd_report_pfa != 0) { 17788 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17789 si.ssi_pfa_flag = TRUE; 17790 retry_check_flag = SD_RETRIES_STANDARD; 17791 goto do_retry; 17792 } 17793 17794 break; 17795 17796 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17797 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17798 un->un_resvd_status |= 17799 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17800 } 17801 #ifdef _LP64 17802 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17803 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17804 un, KM_NOSLEEP) == 0) { 17805 /* 17806 * If we can't dispatch the task we'll just 17807 * live without descriptor sense. We can 17808 * try again on the next "unit attention" 17809 */ 17810 SD_ERROR(SD_LOG_ERROR, un, 17811 "sd_sense_key_unit_attention: " 17812 "Could not dispatch " 17813 "sd_reenable_dsense_task\n"); 17814 } 17815 } 17816 #endif /* _LP64 */ 17817 /* FALLTHRU */ 17818 17819 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17820 if (!un->un_f_has_removable_media) { 17821 break; 17822 } 17823 17824 /* 17825 * When we get a unit attention from a removable-media device, 17826 * it may be in a state that will take a long time to recover 17827 * (e.g., from a reset). Since we are executing in interrupt 17828 * context here, we cannot wait around for the device to come 17829 * back. So hand this command off to sd_media_change_task() 17830 * for deferred processing under taskq thread context. (Note 17831 * that the command still may be failed if a problem is 17832 * encountered at a later time.) 17833 */ 17834 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17835 KM_NOSLEEP) == 0) { 17836 /* 17837 * Cannot dispatch the request so fail the command. 17838 */ 17839 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17840 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17841 si.ssi_severity = SCSI_ERR_FATAL; 17842 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17843 sd_return_failed_command(un, bp, EIO); 17844 } 17845 17846 /* 17847 * If failed to dispatch sd_media_change_task(), we already 17848 * updated kstat. If succeed to dispatch sd_media_change_task(), 17849 * we should update kstat later if it encounters an error. So, 17850 * we update kstat_updated flag here. 17851 */ 17852 kstat_updated = B_TRUE; 17853 17854 /* 17855 * Either the command has been successfully dispatched to a 17856 * task Q for retrying, or the dispatch failed. In either case 17857 * do NOT retry again by calling sd_retry_command. This sets up 17858 * two retries of the same command and when one completes and 17859 * frees the resources the other will access freed memory, 17860 * a bad thing. 17861 */ 17862 return; 17863 17864 default: 17865 break; 17866 } 17867 17868 /* 17869 * ASC ASCQ 17870 * 2A 09 Capacity data has changed 17871 * 2A 01 Mode parameters changed 17872 * 3F 0E Reported luns data has changed 17873 * Arrays that support logical unit expansion should report 17874 * capacity changes(2Ah/09). Mode parameters changed and 17875 * reported luns data has changed are the approximation. 17876 */ 17877 if (((asc == 0x2a) && (ascq == 0x09)) || 17878 ((asc == 0x2a) && (ascq == 0x01)) || 17879 ((asc == 0x3f) && (ascq == 0x0e))) { 17880 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 17881 KM_NOSLEEP) == 0) { 17882 SD_ERROR(SD_LOG_ERROR, un, 17883 "sd_sense_key_unit_attention: " 17884 "Could not dispatch sd_target_change_task\n"); 17885 } 17886 } 17887 17888 /* 17889 * Update kstat if we haven't done that. 17890 */ 17891 if (!kstat_updated) { 17892 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17893 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17894 } 17895 17896 do_retry: 17897 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17898 EIO, SD_UA_RETRY_DELAY, NULL); 17899 } 17900 17901 17902 17903 /* 17904 * Function: sd_sense_key_fail_command 17905 * 17906 * Description: Use to fail a command when we don't like the sense key that 17907 * was returned. 17908 * 17909 * Context: May be called from interrupt context 17910 */ 17911 17912 static void 17913 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17914 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17915 { 17916 struct sd_sense_info si; 17917 17918 ASSERT(un != NULL); 17919 ASSERT(mutex_owned(SD_MUTEX(un))); 17920 ASSERT(bp != NULL); 17921 ASSERT(xp != NULL); 17922 ASSERT(pktp != NULL); 17923 17924 si.ssi_severity = SCSI_ERR_FATAL; 17925 si.ssi_pfa_flag = FALSE; 17926 17927 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17928 sd_return_failed_command(un, bp, EIO); 17929 } 17930 17931 17932 17933 /* 17934 * Function: sd_sense_key_blank_check 17935 * 17936 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17937 * Has no monetary connotation. 17938 * 17939 * Context: May be called from interrupt context 17940 */ 17941 17942 static void 17943 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17944 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17945 { 17946 struct sd_sense_info si; 17947 17948 ASSERT(un != NULL); 17949 ASSERT(mutex_owned(SD_MUTEX(un))); 17950 ASSERT(bp != NULL); 17951 ASSERT(xp != NULL); 17952 ASSERT(pktp != NULL); 17953 17954 /* 17955 * Blank check is not fatal for removable devices, therefore 17956 * it does not require a console message. 17957 */ 17958 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 17959 SCSI_ERR_FATAL; 17960 si.ssi_pfa_flag = FALSE; 17961 17962 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17963 sd_return_failed_command(un, bp, EIO); 17964 } 17965 17966 17967 17968 17969 /* 17970 * Function: sd_sense_key_aborted_command 17971 * 17972 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17973 * 17974 * Context: May be called from interrupt context 17975 */ 17976 17977 static void 17978 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17979 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17980 { 17981 struct sd_sense_info si; 17982 17983 ASSERT(un != NULL); 17984 ASSERT(mutex_owned(SD_MUTEX(un))); 17985 ASSERT(bp != NULL); 17986 ASSERT(xp != NULL); 17987 ASSERT(pktp != NULL); 17988 17989 si.ssi_severity = SCSI_ERR_FATAL; 17990 si.ssi_pfa_flag = FALSE; 17991 17992 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17993 17994 /* 17995 * This really ought to be a fatal error, but we will retry anyway 17996 * as some drives report this as a spurious error. 17997 */ 17998 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17999 &si, EIO, drv_usectohz(100000), NULL); 18000 } 18001 18002 18003 18004 /* 18005 * Function: sd_sense_key_default 18006 * 18007 * Description: Default recovery action for several SCSI sense keys (basically 18008 * attempts a retry). 18009 * 18010 * Context: May be called from interrupt context 18011 */ 18012 18013 static void 18014 sd_sense_key_default(struct sd_lun *un, 18015 uint8_t *sense_datap, 18016 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18017 { 18018 struct sd_sense_info si; 18019 uint8_t sense_key = scsi_sense_key(sense_datap); 18020 18021 ASSERT(un != NULL); 18022 ASSERT(mutex_owned(SD_MUTEX(un))); 18023 ASSERT(bp != NULL); 18024 ASSERT(xp != NULL); 18025 ASSERT(pktp != NULL); 18026 18027 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18028 18029 /* 18030 * Undecoded sense key. Attempt retries and hope that will fix 18031 * the problem. Otherwise, we're dead. 18032 */ 18033 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18034 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18035 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18036 } 18037 18038 si.ssi_severity = SCSI_ERR_FATAL; 18039 si.ssi_pfa_flag = FALSE; 18040 18041 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18042 &si, EIO, (clock_t)0, NULL); 18043 } 18044 18045 18046 18047 /* 18048 * Function: sd_print_retry_msg 18049 * 18050 * Description: Print a message indicating the retry action being taken. 18051 * 18052 * Arguments: un - ptr to associated softstate 18053 * bp - ptr to buf(9S) for the command 18054 * arg - not used. 18055 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18056 * or SD_NO_RETRY_ISSUED 18057 * 18058 * Context: May be called from interrupt context 18059 */ 18060 /* ARGSUSED */ 18061 static void 18062 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18063 { 18064 struct sd_xbuf *xp; 18065 struct scsi_pkt *pktp; 18066 char *reasonp; 18067 char *msgp; 18068 18069 ASSERT(un != NULL); 18070 ASSERT(mutex_owned(SD_MUTEX(un))); 18071 ASSERT(bp != NULL); 18072 pktp = SD_GET_PKTP(bp); 18073 ASSERT(pktp != NULL); 18074 xp = SD_GET_XBUF(bp); 18075 ASSERT(xp != NULL); 18076 18077 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18078 mutex_enter(&un->un_pm_mutex); 18079 if ((un->un_state == SD_STATE_SUSPENDED) || 18080 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18081 (pktp->pkt_flags & FLAG_SILENT)) { 18082 mutex_exit(&un->un_pm_mutex); 18083 goto update_pkt_reason; 18084 } 18085 mutex_exit(&un->un_pm_mutex); 18086 18087 /* 18088 * Suppress messages if they are all the same pkt_reason; with 18089 * TQ, many (up to 256) are returned with the same pkt_reason. 18090 * If we are in panic, then suppress the retry messages. 18091 */ 18092 switch (flag) { 18093 case SD_NO_RETRY_ISSUED: 18094 msgp = "giving up"; 18095 break; 18096 case SD_IMMEDIATE_RETRY_ISSUED: 18097 case SD_DELAYED_RETRY_ISSUED: 18098 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18099 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18100 (sd_error_level != SCSI_ERR_ALL))) { 18101 return; 18102 } 18103 msgp = "retrying command"; 18104 break; 18105 default: 18106 goto update_pkt_reason; 18107 } 18108 18109 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18110 scsi_rname(pktp->pkt_reason)); 18111 18112 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18113 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18114 18115 update_pkt_reason: 18116 /* 18117 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18118 * This is to prevent multiple console messages for the same failure 18119 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18120 * when the command is retried successfully because there still may be 18121 * more commands coming back with the same value of pktp->pkt_reason. 18122 */ 18123 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18124 un->un_last_pkt_reason = pktp->pkt_reason; 18125 } 18126 } 18127 18128 18129 /* 18130 * Function: sd_print_cmd_incomplete_msg 18131 * 18132 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18133 * 18134 * Arguments: un - ptr to associated softstate 18135 * bp - ptr to buf(9S) for the command 18136 * arg - passed to sd_print_retry_msg() 18137 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18138 * or SD_NO_RETRY_ISSUED 18139 * 18140 * Context: May be called from interrupt context 18141 */ 18142 18143 static void 18144 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18145 int code) 18146 { 18147 dev_info_t *dip; 18148 18149 ASSERT(un != NULL); 18150 ASSERT(mutex_owned(SD_MUTEX(un))); 18151 ASSERT(bp != NULL); 18152 18153 switch (code) { 18154 case SD_NO_RETRY_ISSUED: 18155 /* Command was failed. Someone turned off this target? */ 18156 if (un->un_state != SD_STATE_OFFLINE) { 18157 /* 18158 * Suppress message if we are detaching and 18159 * device has been disconnected 18160 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18161 * private interface and not part of the DDI 18162 */ 18163 dip = un->un_sd->sd_dev; 18164 if (!(DEVI_IS_DETACHING(dip) && 18165 DEVI_IS_DEVICE_REMOVED(dip))) { 18166 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18167 "disk not responding to selection\n"); 18168 } 18169 New_state(un, SD_STATE_OFFLINE); 18170 } 18171 break; 18172 18173 case SD_DELAYED_RETRY_ISSUED: 18174 case SD_IMMEDIATE_RETRY_ISSUED: 18175 default: 18176 /* Command was successfully queued for retry */ 18177 sd_print_retry_msg(un, bp, arg, code); 18178 break; 18179 } 18180 } 18181 18182 18183 /* 18184 * Function: sd_pkt_reason_cmd_incomplete 18185 * 18186 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18187 * 18188 * Context: May be called from interrupt context 18189 */ 18190 18191 static void 18192 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18193 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18194 { 18195 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18196 18197 ASSERT(un != NULL); 18198 ASSERT(mutex_owned(SD_MUTEX(un))); 18199 ASSERT(bp != NULL); 18200 ASSERT(xp != NULL); 18201 ASSERT(pktp != NULL); 18202 18203 /* Do not do a reset if selection did not complete */ 18204 /* Note: Should this not just check the bit? */ 18205 if (pktp->pkt_state != STATE_GOT_BUS) { 18206 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18207 sd_reset_target(un, pktp); 18208 } 18209 18210 /* 18211 * If the target was not successfully selected, then set 18212 * SD_RETRIES_FAILFAST to indicate that we lost communication 18213 * with the target, and further retries and/or commands are 18214 * likely to take a long time. 18215 */ 18216 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18217 flag |= SD_RETRIES_FAILFAST; 18218 } 18219 18220 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18221 18222 sd_retry_command(un, bp, flag, 18223 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18224 } 18225 18226 18227 18228 /* 18229 * Function: sd_pkt_reason_cmd_tran_err 18230 * 18231 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18232 * 18233 * Context: May be called from interrupt context 18234 */ 18235 18236 static void 18237 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18238 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18239 { 18240 ASSERT(un != NULL); 18241 ASSERT(mutex_owned(SD_MUTEX(un))); 18242 ASSERT(bp != NULL); 18243 ASSERT(xp != NULL); 18244 ASSERT(pktp != NULL); 18245 18246 /* 18247 * Do not reset if we got a parity error, or if 18248 * selection did not complete. 18249 */ 18250 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18251 /* Note: Should this not just check the bit for pkt_state? */ 18252 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18253 (pktp->pkt_state != STATE_GOT_BUS)) { 18254 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18255 sd_reset_target(un, pktp); 18256 } 18257 18258 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18259 18260 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18261 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18262 } 18263 18264 18265 18266 /* 18267 * Function: sd_pkt_reason_cmd_reset 18268 * 18269 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18270 * 18271 * Context: May be called from interrupt context 18272 */ 18273 18274 static void 18275 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18276 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18277 { 18278 ASSERT(un != NULL); 18279 ASSERT(mutex_owned(SD_MUTEX(un))); 18280 ASSERT(bp != NULL); 18281 ASSERT(xp != NULL); 18282 ASSERT(pktp != NULL); 18283 18284 /* The target may still be running the command, so try to reset. */ 18285 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18286 sd_reset_target(un, pktp); 18287 18288 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18289 18290 /* 18291 * If pkt_reason is CMD_RESET chances are that this pkt got 18292 * reset because another target on this bus caused it. The target 18293 * that caused it should get CMD_TIMEOUT with pkt_statistics 18294 * of STAT_TIMEOUT/STAT_DEV_RESET. 18295 */ 18296 18297 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18298 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18299 } 18300 18301 18302 18303 18304 /* 18305 * Function: sd_pkt_reason_cmd_aborted 18306 * 18307 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18308 * 18309 * Context: May be called from interrupt context 18310 */ 18311 18312 static void 18313 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18314 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18315 { 18316 ASSERT(un != NULL); 18317 ASSERT(mutex_owned(SD_MUTEX(un))); 18318 ASSERT(bp != NULL); 18319 ASSERT(xp != NULL); 18320 ASSERT(pktp != NULL); 18321 18322 /* The target may still be running the command, so try to reset. */ 18323 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18324 sd_reset_target(un, pktp); 18325 18326 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18327 18328 /* 18329 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18330 * aborted because another target on this bus caused it. The target 18331 * that caused it should get CMD_TIMEOUT with pkt_statistics 18332 * of STAT_TIMEOUT/STAT_DEV_RESET. 18333 */ 18334 18335 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18336 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18337 } 18338 18339 18340 18341 /* 18342 * Function: sd_pkt_reason_cmd_timeout 18343 * 18344 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18345 * 18346 * Context: May be called from interrupt context 18347 */ 18348 18349 static void 18350 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18351 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18352 { 18353 ASSERT(un != NULL); 18354 ASSERT(mutex_owned(SD_MUTEX(un))); 18355 ASSERT(bp != NULL); 18356 ASSERT(xp != NULL); 18357 ASSERT(pktp != NULL); 18358 18359 18360 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18361 sd_reset_target(un, pktp); 18362 18363 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18364 18365 /* 18366 * A command timeout indicates that we could not establish 18367 * communication with the target, so set SD_RETRIES_FAILFAST 18368 * as further retries/commands are likely to take a long time. 18369 */ 18370 sd_retry_command(un, bp, 18371 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18372 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18373 } 18374 18375 18376 18377 /* 18378 * Function: sd_pkt_reason_cmd_unx_bus_free 18379 * 18380 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18381 * 18382 * Context: May be called from interrupt context 18383 */ 18384 18385 static void 18386 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18387 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18388 { 18389 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18390 18391 ASSERT(un != NULL); 18392 ASSERT(mutex_owned(SD_MUTEX(un))); 18393 ASSERT(bp != NULL); 18394 ASSERT(xp != NULL); 18395 ASSERT(pktp != NULL); 18396 18397 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18398 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18399 18400 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18401 sd_print_retry_msg : NULL; 18402 18403 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18404 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18405 } 18406 18407 18408 /* 18409 * Function: sd_pkt_reason_cmd_tag_reject 18410 * 18411 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18412 * 18413 * Context: May be called from interrupt context 18414 */ 18415 18416 static void 18417 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18418 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18419 { 18420 ASSERT(un != NULL); 18421 ASSERT(mutex_owned(SD_MUTEX(un))); 18422 ASSERT(bp != NULL); 18423 ASSERT(xp != NULL); 18424 ASSERT(pktp != NULL); 18425 18426 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18427 pktp->pkt_flags = 0; 18428 un->un_tagflags = 0; 18429 if (un->un_f_opt_queueing == TRUE) { 18430 un->un_throttle = min(un->un_throttle, 3); 18431 } else { 18432 un->un_throttle = 1; 18433 } 18434 mutex_exit(SD_MUTEX(un)); 18435 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18436 mutex_enter(SD_MUTEX(un)); 18437 18438 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18439 18440 /* Legacy behavior not to check retry counts here. */ 18441 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18442 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18443 } 18444 18445 18446 /* 18447 * Function: sd_pkt_reason_default 18448 * 18449 * Description: Default recovery actions for SCSA pkt_reason values that 18450 * do not have more explicit recovery actions. 18451 * 18452 * Context: May be called from interrupt context 18453 */ 18454 18455 static void 18456 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18457 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18458 { 18459 ASSERT(un != NULL); 18460 ASSERT(mutex_owned(SD_MUTEX(un))); 18461 ASSERT(bp != NULL); 18462 ASSERT(xp != NULL); 18463 ASSERT(pktp != NULL); 18464 18465 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18466 sd_reset_target(un, pktp); 18467 18468 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18469 18470 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18471 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18472 } 18473 18474 18475 18476 /* 18477 * Function: sd_pkt_status_check_condition 18478 * 18479 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18480 * 18481 * Context: May be called from interrupt context 18482 */ 18483 18484 static void 18485 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18486 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18487 { 18488 ASSERT(un != NULL); 18489 ASSERT(mutex_owned(SD_MUTEX(un))); 18490 ASSERT(bp != NULL); 18491 ASSERT(xp != NULL); 18492 ASSERT(pktp != NULL); 18493 18494 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18495 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18496 18497 /* 18498 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18499 * command will be retried after the request sense). Otherwise, retry 18500 * the command. Note: we are issuing the request sense even though the 18501 * retry limit may have been reached for the failed command. 18502 */ 18503 if (un->un_f_arq_enabled == FALSE) { 18504 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18505 "no ARQ, sending request sense command\n"); 18506 sd_send_request_sense_command(un, bp, pktp); 18507 } else { 18508 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18509 "ARQ,retrying request sense command\n"); 18510 #if defined(__i386) || defined(__amd64) 18511 /* 18512 * The SD_RETRY_DELAY value need to be adjusted here 18513 * when SD_RETRY_DELAY change in sddef.h 18514 */ 18515 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18516 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18517 NULL); 18518 #else 18519 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18520 EIO, SD_RETRY_DELAY, NULL); 18521 #endif 18522 } 18523 18524 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18525 } 18526 18527 18528 /* 18529 * Function: sd_pkt_status_busy 18530 * 18531 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18532 * 18533 * Context: May be called from interrupt context 18534 */ 18535 18536 static void 18537 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18538 struct scsi_pkt *pktp) 18539 { 18540 ASSERT(un != NULL); 18541 ASSERT(mutex_owned(SD_MUTEX(un))); 18542 ASSERT(bp != NULL); 18543 ASSERT(xp != NULL); 18544 ASSERT(pktp != NULL); 18545 18546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18547 "sd_pkt_status_busy: entry\n"); 18548 18549 /* If retries are exhausted, just fail the command. */ 18550 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18551 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18552 "device busy too long\n"); 18553 sd_return_failed_command(un, bp, EIO); 18554 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18555 "sd_pkt_status_busy: exit\n"); 18556 return; 18557 } 18558 xp->xb_retry_count++; 18559 18560 /* 18561 * Try to reset the target. However, we do not want to perform 18562 * more than one reset if the device continues to fail. The reset 18563 * will be performed when the retry count reaches the reset 18564 * threshold. This threshold should be set such that at least 18565 * one retry is issued before the reset is performed. 18566 */ 18567 if (xp->xb_retry_count == 18568 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18569 int rval = 0; 18570 mutex_exit(SD_MUTEX(un)); 18571 if (un->un_f_allow_bus_device_reset == TRUE) { 18572 /* 18573 * First try to reset the LUN; if we cannot then 18574 * try to reset the target. 18575 */ 18576 if (un->un_f_lun_reset_enabled == TRUE) { 18577 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18578 "sd_pkt_status_busy: RESET_LUN\n"); 18579 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18580 } 18581 if (rval == 0) { 18582 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18583 "sd_pkt_status_busy: RESET_TARGET\n"); 18584 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18585 } 18586 } 18587 if (rval == 0) { 18588 /* 18589 * If the RESET_LUN and/or RESET_TARGET failed, 18590 * try RESET_ALL 18591 */ 18592 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18593 "sd_pkt_status_busy: RESET_ALL\n"); 18594 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18595 } 18596 mutex_enter(SD_MUTEX(un)); 18597 if (rval == 0) { 18598 /* 18599 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18600 * At this point we give up & fail the command. 18601 */ 18602 sd_return_failed_command(un, bp, EIO); 18603 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18604 "sd_pkt_status_busy: exit (failed cmd)\n"); 18605 return; 18606 } 18607 } 18608 18609 /* 18610 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18611 * we have already checked the retry counts above. 18612 */ 18613 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18614 EIO, un->un_busy_timeout, NULL); 18615 18616 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18617 "sd_pkt_status_busy: exit\n"); 18618 } 18619 18620 18621 /* 18622 * Function: sd_pkt_status_reservation_conflict 18623 * 18624 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18625 * command status. 18626 * 18627 * Context: May be called from interrupt context 18628 */ 18629 18630 static void 18631 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18632 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18633 { 18634 ASSERT(un != NULL); 18635 ASSERT(mutex_owned(SD_MUTEX(un))); 18636 ASSERT(bp != NULL); 18637 ASSERT(xp != NULL); 18638 ASSERT(pktp != NULL); 18639 18640 /* 18641 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18642 * conflict could be due to various reasons like incorrect keys, not 18643 * registered or not reserved etc. So, we return EACCES to the caller. 18644 */ 18645 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18646 int cmd = SD_GET_PKT_OPCODE(pktp); 18647 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18648 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18649 sd_return_failed_command(un, bp, EACCES); 18650 return; 18651 } 18652 } 18653 18654 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18655 18656 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18657 if (sd_failfast_enable != 0) { 18658 /* By definition, we must panic here.... */ 18659 sd_panic_for_res_conflict(un); 18660 /*NOTREACHED*/ 18661 } 18662 SD_ERROR(SD_LOG_IO, un, 18663 "sd_handle_resv_conflict: Disk Reserved\n"); 18664 sd_return_failed_command(un, bp, EACCES); 18665 return; 18666 } 18667 18668 /* 18669 * 1147670: retry only if sd_retry_on_reservation_conflict 18670 * property is set (default is 1). Retries will not succeed 18671 * on a disk reserved by another initiator. HA systems 18672 * may reset this via sd.conf to avoid these retries. 18673 * 18674 * Note: The legacy return code for this failure is EIO, however EACCES 18675 * seems more appropriate for a reservation conflict. 18676 */ 18677 if (sd_retry_on_reservation_conflict == 0) { 18678 SD_ERROR(SD_LOG_IO, un, 18679 "sd_handle_resv_conflict: Device Reserved\n"); 18680 sd_return_failed_command(un, bp, EIO); 18681 return; 18682 } 18683 18684 /* 18685 * Retry the command if we can. 18686 * 18687 * Note: The legacy return code for this failure is EIO, however EACCES 18688 * seems more appropriate for a reservation conflict. 18689 */ 18690 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18691 (clock_t)2, NULL); 18692 } 18693 18694 18695 18696 /* 18697 * Function: sd_pkt_status_qfull 18698 * 18699 * Description: Handle a QUEUE FULL condition from the target. This can 18700 * occur if the HBA does not handle the queue full condition. 18701 * (Basically this means third-party HBAs as Sun HBAs will 18702 * handle the queue full condition.) Note that if there are 18703 * some commands already in the transport, then the queue full 18704 * has occurred because the queue for this nexus is actually 18705 * full. If there are no commands in the transport, then the 18706 * queue full is resulting from some other initiator or lun 18707 * consuming all the resources at the target. 18708 * 18709 * Context: May be called from interrupt context 18710 */ 18711 18712 static void 18713 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18714 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18715 { 18716 ASSERT(un != NULL); 18717 ASSERT(mutex_owned(SD_MUTEX(un))); 18718 ASSERT(bp != NULL); 18719 ASSERT(xp != NULL); 18720 ASSERT(pktp != NULL); 18721 18722 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18723 "sd_pkt_status_qfull: entry\n"); 18724 18725 /* 18726 * Just lower the QFULL throttle and retry the command. Note that 18727 * we do not limit the number of retries here. 18728 */ 18729 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18730 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18731 SD_RESTART_TIMEOUT, NULL); 18732 18733 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18734 "sd_pkt_status_qfull: exit\n"); 18735 } 18736 18737 18738 /* 18739 * Function: sd_reset_target 18740 * 18741 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18742 * RESET_TARGET, or RESET_ALL. 18743 * 18744 * Context: May be called under interrupt context. 18745 */ 18746 18747 static void 18748 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18749 { 18750 int rval = 0; 18751 18752 ASSERT(un != NULL); 18753 ASSERT(mutex_owned(SD_MUTEX(un))); 18754 ASSERT(pktp != NULL); 18755 18756 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18757 18758 /* 18759 * No need to reset if the transport layer has already done so. 18760 */ 18761 if ((pktp->pkt_statistics & 18762 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18763 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18764 "sd_reset_target: no reset\n"); 18765 return; 18766 } 18767 18768 mutex_exit(SD_MUTEX(un)); 18769 18770 if (un->un_f_allow_bus_device_reset == TRUE) { 18771 if (un->un_f_lun_reset_enabled == TRUE) { 18772 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18773 "sd_reset_target: RESET_LUN\n"); 18774 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18775 } 18776 if (rval == 0) { 18777 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18778 "sd_reset_target: RESET_TARGET\n"); 18779 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18780 } 18781 } 18782 18783 if (rval == 0) { 18784 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18785 "sd_reset_target: RESET_ALL\n"); 18786 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18787 } 18788 18789 mutex_enter(SD_MUTEX(un)); 18790 18791 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18792 } 18793 18794 /* 18795 * Function: sd_target_change_task 18796 * 18797 * Description: Handle dynamic target change 18798 * 18799 * Context: Executes in a taskq() thread context 18800 */ 18801 static void 18802 sd_target_change_task(void *arg) 18803 { 18804 struct sd_lun *un = arg; 18805 uint64_t capacity; 18806 diskaddr_t label_cap; 18807 uint_t lbasize; 18808 sd_ssc_t *ssc; 18809 18810 ASSERT(un != NULL); 18811 ASSERT(!mutex_owned(SD_MUTEX(un))); 18812 18813 if ((un->un_f_blockcount_is_valid == FALSE) || 18814 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18815 return; 18816 } 18817 18818 ssc = sd_ssc_init(un); 18819 18820 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18821 &lbasize, SD_PATH_DIRECT) != 0) { 18822 SD_ERROR(SD_LOG_ERROR, un, 18823 "sd_target_change_task: fail to read capacity\n"); 18824 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18825 goto task_exit; 18826 } 18827 18828 mutex_enter(SD_MUTEX(un)); 18829 if (capacity <= un->un_blockcount) { 18830 mutex_exit(SD_MUTEX(un)); 18831 goto task_exit; 18832 } 18833 18834 sd_update_block_info(un, lbasize, capacity); 18835 mutex_exit(SD_MUTEX(un)); 18836 18837 /* 18838 * If lun is EFI labeled and lun capacity is greater than the 18839 * capacity contained in the label, log a sys event. 18840 */ 18841 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18842 (void*)SD_PATH_DIRECT) == 0) { 18843 mutex_enter(SD_MUTEX(un)); 18844 if (un->un_f_blockcount_is_valid && 18845 un->un_blockcount > label_cap) { 18846 mutex_exit(SD_MUTEX(un)); 18847 sd_log_lun_expansion_event(un, KM_SLEEP); 18848 } else { 18849 mutex_exit(SD_MUTEX(un)); 18850 } 18851 } 18852 18853 task_exit: 18854 sd_ssc_fini(ssc); 18855 } 18856 18857 /* 18858 * Function: sd_log_lun_expansion_event 18859 * 18860 * Description: Log lun expansion sys event 18861 * 18862 * Context: Never called from interrupt context 18863 */ 18864 static void 18865 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18866 { 18867 int err; 18868 char *path; 18869 nvlist_t *dle_attr_list; 18870 18871 /* Allocate and build sysevent attribute list */ 18872 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 18873 if (err != 0) { 18874 SD_ERROR(SD_LOG_ERROR, un, 18875 "sd_log_lun_expansion_event: fail to allocate space\n"); 18876 return; 18877 } 18878 18879 path = kmem_alloc(MAXPATHLEN, km_flag); 18880 if (path == NULL) { 18881 nvlist_free(dle_attr_list); 18882 SD_ERROR(SD_LOG_ERROR, un, 18883 "sd_log_lun_expansion_event: fail to allocate space\n"); 18884 return; 18885 } 18886 /* 18887 * Add path attribute to identify the lun. 18888 * We are using minor node 'a' as the sysevent attribute. 18889 */ 18890 (void) snprintf(path, MAXPATHLEN, "/devices"); 18891 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 18892 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 18893 ":a"); 18894 18895 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 18896 if (err != 0) { 18897 nvlist_free(dle_attr_list); 18898 kmem_free(path, MAXPATHLEN); 18899 SD_ERROR(SD_LOG_ERROR, un, 18900 "sd_log_lun_expansion_event: fail to add attribute\n"); 18901 return; 18902 } 18903 18904 /* Log dynamic lun expansion sysevent */ 18905 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 18906 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 18907 if (err != DDI_SUCCESS) { 18908 SD_ERROR(SD_LOG_ERROR, un, 18909 "sd_log_lun_expansion_event: fail to log sysevent\n"); 18910 } 18911 18912 nvlist_free(dle_attr_list); 18913 kmem_free(path, MAXPATHLEN); 18914 } 18915 18916 /* 18917 * Function: sd_media_change_task 18918 * 18919 * Description: Recovery action for CDROM to become available. 18920 * 18921 * Context: Executes in a taskq() thread context 18922 */ 18923 18924 static void 18925 sd_media_change_task(void *arg) 18926 { 18927 struct scsi_pkt *pktp = arg; 18928 struct sd_lun *un; 18929 struct buf *bp; 18930 struct sd_xbuf *xp; 18931 int err = 0; 18932 int retry_count = 0; 18933 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18934 struct sd_sense_info si; 18935 18936 ASSERT(pktp != NULL); 18937 bp = (struct buf *)pktp->pkt_private; 18938 ASSERT(bp != NULL); 18939 xp = SD_GET_XBUF(bp); 18940 ASSERT(xp != NULL); 18941 un = SD_GET_UN(bp); 18942 ASSERT(un != NULL); 18943 ASSERT(!mutex_owned(SD_MUTEX(un))); 18944 ASSERT(un->un_f_monitor_media_state); 18945 18946 si.ssi_severity = SCSI_ERR_INFO; 18947 si.ssi_pfa_flag = FALSE; 18948 18949 /* 18950 * When a reset is issued on a CDROM, it takes a long time to 18951 * recover. First few attempts to read capacity and other things 18952 * related to handling unit attention fail (with a ASC 0x4 and 18953 * ASCQ 0x1). In that case we want to do enough retries and we want 18954 * to limit the retries in other cases of genuine failures like 18955 * no media in drive. 18956 */ 18957 while (retry_count++ < retry_limit) { 18958 if ((err = sd_handle_mchange(un)) == 0) { 18959 break; 18960 } 18961 if (err == EAGAIN) { 18962 retry_limit = SD_UNIT_ATTENTION_RETRY; 18963 } 18964 /* Sleep for 0.5 sec. & try again */ 18965 delay(drv_usectohz(500000)); 18966 } 18967 18968 /* 18969 * Dispatch (retry or fail) the original command here, 18970 * along with appropriate console messages.... 18971 * 18972 * Must grab the mutex before calling sd_retry_command, 18973 * sd_print_sense_msg and sd_return_failed_command. 18974 */ 18975 mutex_enter(SD_MUTEX(un)); 18976 if (err != SD_CMD_SUCCESS) { 18977 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18978 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18979 si.ssi_severity = SCSI_ERR_FATAL; 18980 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18981 sd_return_failed_command(un, bp, EIO); 18982 } else { 18983 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18984 &si, EIO, (clock_t)0, NULL); 18985 } 18986 mutex_exit(SD_MUTEX(un)); 18987 } 18988 18989 18990 18991 /* 18992 * Function: sd_handle_mchange 18993 * 18994 * Description: Perform geometry validation & other recovery when CDROM 18995 * has been removed from drive. 18996 * 18997 * Return Code: 0 for success 18998 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18999 * sd_send_scsi_READ_CAPACITY() 19000 * 19001 * Context: Executes in a taskq() thread context 19002 */ 19003 19004 static int 19005 sd_handle_mchange(struct sd_lun *un) 19006 { 19007 uint64_t capacity; 19008 uint32_t lbasize; 19009 int rval; 19010 sd_ssc_t *ssc; 19011 19012 ASSERT(!mutex_owned(SD_MUTEX(un))); 19013 ASSERT(un->un_f_monitor_media_state); 19014 19015 ssc = sd_ssc_init(un); 19016 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19017 SD_PATH_DIRECT_PRIORITY); 19018 19019 if (rval != 0) 19020 goto failed; 19021 19022 mutex_enter(SD_MUTEX(un)); 19023 sd_update_block_info(un, lbasize, capacity); 19024 19025 if (un->un_errstats != NULL) { 19026 struct sd_errstats *stp = 19027 (struct sd_errstats *)un->un_errstats->ks_data; 19028 stp->sd_capacity.value.ui64 = (uint64_t) 19029 ((uint64_t)un->un_blockcount * 19030 (uint64_t)un->un_tgt_blocksize); 19031 } 19032 19033 /* 19034 * Check if the media in the device is writable or not 19035 */ 19036 if (ISCD(un)) { 19037 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19038 } 19039 19040 /* 19041 * Note: Maybe let the strategy/partitioning chain worry about getting 19042 * valid geometry. 19043 */ 19044 mutex_exit(SD_MUTEX(un)); 19045 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19046 19047 19048 if (cmlb_validate(un->un_cmlbhandle, 0, 19049 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19050 sd_ssc_fini(ssc); 19051 return (EIO); 19052 } else { 19053 if (un->un_f_pkstats_enabled) { 19054 sd_set_pstats(un); 19055 SD_TRACE(SD_LOG_IO_PARTITION, un, 19056 "sd_handle_mchange: un:0x%p pstats created and " 19057 "set\n", un); 19058 } 19059 } 19060 19061 /* 19062 * Try to lock the door 19063 */ 19064 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19065 SD_PATH_DIRECT_PRIORITY); 19066 failed: 19067 if (rval != 0) 19068 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19069 sd_ssc_fini(ssc); 19070 return (rval); 19071 } 19072 19073 19074 /* 19075 * Function: sd_send_scsi_DOORLOCK 19076 * 19077 * Description: Issue the scsi DOOR LOCK command 19078 * 19079 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19080 * structure for this target. 19081 * flag - SD_REMOVAL_ALLOW 19082 * SD_REMOVAL_PREVENT 19083 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19084 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19085 * to use the USCSI "direct" chain and bypass the normal 19086 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19087 * command is issued as part of an error recovery action. 19088 * 19089 * Return Code: 0 - Success 19090 * errno return code from sd_ssc_send() 19091 * 19092 * Context: Can sleep. 19093 */ 19094 19095 static int 19096 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19097 { 19098 struct scsi_extended_sense sense_buf; 19099 union scsi_cdb cdb; 19100 struct uscsi_cmd ucmd_buf; 19101 int status; 19102 struct sd_lun *un; 19103 19104 ASSERT(ssc != NULL); 19105 un = ssc->ssc_un; 19106 ASSERT(un != NULL); 19107 ASSERT(!mutex_owned(SD_MUTEX(un))); 19108 19109 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19110 19111 /* already determined doorlock is not supported, fake success */ 19112 if (un->un_f_doorlock_supported == FALSE) { 19113 return (0); 19114 } 19115 19116 /* 19117 * If we are ejecting and see an SD_REMOVAL_PREVENT 19118 * ignore the command so we can complete the eject 19119 * operation. 19120 */ 19121 if (flag == SD_REMOVAL_PREVENT) { 19122 mutex_enter(SD_MUTEX(un)); 19123 if (un->un_f_ejecting == TRUE) { 19124 mutex_exit(SD_MUTEX(un)); 19125 return (EAGAIN); 19126 } 19127 mutex_exit(SD_MUTEX(un)); 19128 } 19129 19130 bzero(&cdb, sizeof (cdb)); 19131 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19132 19133 cdb.scc_cmd = SCMD_DOORLOCK; 19134 cdb.cdb_opaque[4] = (uchar_t)flag; 19135 19136 ucmd_buf.uscsi_cdb = (char *)&cdb; 19137 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19138 ucmd_buf.uscsi_bufaddr = NULL; 19139 ucmd_buf.uscsi_buflen = 0; 19140 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19141 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19142 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19143 ucmd_buf.uscsi_timeout = 15; 19144 19145 SD_TRACE(SD_LOG_IO, un, 19146 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19147 19148 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19149 UIO_SYSSPACE, path_flag); 19150 19151 if (status == 0) 19152 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19153 19154 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19155 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19156 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19157 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19158 19159 /* fake success and skip subsequent doorlock commands */ 19160 un->un_f_doorlock_supported = FALSE; 19161 return (0); 19162 } 19163 19164 return (status); 19165 } 19166 19167 /* 19168 * Function: sd_send_scsi_READ_CAPACITY 19169 * 19170 * Description: This routine uses the scsi READ CAPACITY command to determine 19171 * the device capacity in number of blocks and the device native 19172 * block size. If this function returns a failure, then the 19173 * values in *capp and *lbap are undefined. If the capacity 19174 * returned is 0xffffffff then the lun is too large for a 19175 * normal READ CAPACITY command and the results of a 19176 * READ CAPACITY 16 will be used instead. 19177 * 19178 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19179 * capp - ptr to unsigned 64-bit variable to receive the 19180 * capacity value from the command. 19181 * lbap - ptr to unsigned 32-bit varaible to receive the 19182 * block size value from the command 19183 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19184 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19185 * to use the USCSI "direct" chain and bypass the normal 19186 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19187 * command is issued as part of an error recovery action. 19188 * 19189 * Return Code: 0 - Success 19190 * EIO - IO error 19191 * EACCES - Reservation conflict detected 19192 * EAGAIN - Device is becoming ready 19193 * errno return code from sd_ssc_send() 19194 * 19195 * Context: Can sleep. Blocks until command completes. 19196 */ 19197 19198 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19199 19200 static int 19201 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19202 int path_flag) 19203 { 19204 struct scsi_extended_sense sense_buf; 19205 struct uscsi_cmd ucmd_buf; 19206 union scsi_cdb cdb; 19207 uint32_t *capacity_buf; 19208 uint64_t capacity; 19209 uint32_t lbasize; 19210 int status; 19211 struct sd_lun *un; 19212 19213 ASSERT(ssc != NULL); 19214 19215 un = ssc->ssc_un; 19216 ASSERT(un != NULL); 19217 ASSERT(!mutex_owned(SD_MUTEX(un))); 19218 ASSERT(capp != NULL); 19219 ASSERT(lbap != NULL); 19220 19221 SD_TRACE(SD_LOG_IO, un, 19222 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19223 19224 /* 19225 * First send a READ_CAPACITY command to the target. 19226 * (This command is mandatory under SCSI-2.) 19227 * 19228 * Set up the CDB for the READ_CAPACITY command. The Partial 19229 * Medium Indicator bit is cleared. The address field must be 19230 * zero if the PMI bit is zero. 19231 */ 19232 bzero(&cdb, sizeof (cdb)); 19233 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19234 19235 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19236 19237 cdb.scc_cmd = SCMD_READ_CAPACITY; 19238 19239 ucmd_buf.uscsi_cdb = (char *)&cdb; 19240 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19241 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19242 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19243 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19244 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19245 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19246 ucmd_buf.uscsi_timeout = 60; 19247 19248 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19249 UIO_SYSSPACE, path_flag); 19250 19251 switch (status) { 19252 case 0: 19253 /* Return failure if we did not get valid capacity data. */ 19254 if (ucmd_buf.uscsi_resid != 0) { 19255 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19256 "sd_send_scsi_READ_CAPACITY received " 19257 "invalid capacity data"); 19258 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19259 return (EIO); 19260 } 19261 19262 /* 19263 * Read capacity and block size from the READ CAPACITY 10 data. 19264 * This data may be adjusted later due to device specific 19265 * issues. 19266 * 19267 * According to the SCSI spec, the READ CAPACITY 10 19268 * command returns the following: 19269 * 19270 * bytes 0-3: Maximum logical block address available. 19271 * (MSB in byte:0 & LSB in byte:3) 19272 * 19273 * bytes 4-7: Block length in bytes 19274 * (MSB in byte:4 & LSB in byte:7) 19275 * 19276 */ 19277 capacity = BE_32(capacity_buf[0]); 19278 lbasize = BE_32(capacity_buf[1]); 19279 19280 /* 19281 * Done with capacity_buf 19282 */ 19283 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19284 19285 /* 19286 * if the reported capacity is set to all 0xf's, then 19287 * this disk is too large and requires SBC-2 commands. 19288 * Reissue the request using READ CAPACITY 16. 19289 */ 19290 if (capacity == 0xffffffff) { 19291 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19292 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19293 &lbasize, path_flag); 19294 if (status != 0) { 19295 return (status); 19296 } 19297 } 19298 break; /* Success! */ 19299 case EIO: 19300 switch (ucmd_buf.uscsi_status) { 19301 case STATUS_RESERVATION_CONFLICT: 19302 status = EACCES; 19303 break; 19304 case STATUS_CHECK: 19305 /* 19306 * Check condition; look for ASC/ASCQ of 0x04/0x01 19307 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19308 */ 19309 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19310 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19311 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19312 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19313 return (EAGAIN); 19314 } 19315 break; 19316 default: 19317 break; 19318 } 19319 /* FALLTHRU */ 19320 default: 19321 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19322 return (status); 19323 } 19324 19325 /* 19326 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19327 * (2352 and 0 are common) so for these devices always force the value 19328 * to 2048 as required by the ATAPI specs. 19329 */ 19330 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19331 lbasize = 2048; 19332 } 19333 19334 /* 19335 * Get the maximum LBA value from the READ CAPACITY data. 19336 * Here we assume that the Partial Medium Indicator (PMI) bit 19337 * was cleared when issuing the command. This means that the LBA 19338 * returned from the device is the LBA of the last logical block 19339 * on the logical unit. The actual logical block count will be 19340 * this value plus one. 19341 * 19342 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19343 * so scale the capacity value to reflect this. 19344 */ 19345 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19346 19347 /* 19348 * Copy the values from the READ CAPACITY command into the space 19349 * provided by the caller. 19350 */ 19351 *capp = capacity; 19352 *lbap = lbasize; 19353 19354 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19355 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19356 19357 /* 19358 * Both the lbasize and capacity from the device must be nonzero, 19359 * otherwise we assume that the values are not valid and return 19360 * failure to the caller. (4203735) 19361 */ 19362 if ((capacity == 0) || (lbasize == 0)) { 19363 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19364 "sd_send_scsi_READ_CAPACITY received invalid value " 19365 "capacity %llu lbasize %d", capacity, lbasize); 19366 return (EIO); 19367 } 19368 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19369 return (0); 19370 } 19371 19372 /* 19373 * Function: sd_send_scsi_READ_CAPACITY_16 19374 * 19375 * Description: This routine uses the scsi READ CAPACITY 16 command to 19376 * determine the device capacity in number of blocks and the 19377 * device native block size. If this function returns a failure, 19378 * then the values in *capp and *lbap are undefined. 19379 * This routine should always be called by 19380 * sd_send_scsi_READ_CAPACITY which will appy any device 19381 * specific adjustments to capacity and lbasize. 19382 * 19383 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19384 * capp - ptr to unsigned 64-bit variable to receive the 19385 * capacity value from the command. 19386 * lbap - ptr to unsigned 32-bit varaible to receive the 19387 * block size value from the command 19388 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19389 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19390 * to use the USCSI "direct" chain and bypass the normal 19391 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19392 * this command is issued as part of an error recovery 19393 * action. 19394 * 19395 * Return Code: 0 - Success 19396 * EIO - IO error 19397 * EACCES - Reservation conflict detected 19398 * EAGAIN - Device is becoming ready 19399 * errno return code from sd_ssc_send() 19400 * 19401 * Context: Can sleep. Blocks until command completes. 19402 */ 19403 19404 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19405 19406 static int 19407 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19408 uint32_t *lbap, int path_flag) 19409 { 19410 struct scsi_extended_sense sense_buf; 19411 struct uscsi_cmd ucmd_buf; 19412 union scsi_cdb cdb; 19413 uint64_t *capacity16_buf; 19414 uint64_t capacity; 19415 uint32_t lbasize; 19416 int status; 19417 struct sd_lun *un; 19418 19419 ASSERT(ssc != NULL); 19420 19421 un = ssc->ssc_un; 19422 ASSERT(un != NULL); 19423 ASSERT(!mutex_owned(SD_MUTEX(un))); 19424 ASSERT(capp != NULL); 19425 ASSERT(lbap != NULL); 19426 19427 SD_TRACE(SD_LOG_IO, un, 19428 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19429 19430 /* 19431 * First send a READ_CAPACITY_16 command to the target. 19432 * 19433 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19434 * Medium Indicator bit is cleared. The address field must be 19435 * zero if the PMI bit is zero. 19436 */ 19437 bzero(&cdb, sizeof (cdb)); 19438 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19439 19440 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19441 19442 ucmd_buf.uscsi_cdb = (char *)&cdb; 19443 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19444 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19445 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19446 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19447 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19448 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19449 ucmd_buf.uscsi_timeout = 60; 19450 19451 /* 19452 * Read Capacity (16) is a Service Action In command. One 19453 * command byte (0x9E) is overloaded for multiple operations, 19454 * with the second CDB byte specifying the desired operation 19455 */ 19456 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19457 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19458 19459 /* 19460 * Fill in allocation length field 19461 */ 19462 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19463 19464 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19465 UIO_SYSSPACE, path_flag); 19466 19467 switch (status) { 19468 case 0: 19469 /* Return failure if we did not get valid capacity data. */ 19470 if (ucmd_buf.uscsi_resid > 20) { 19471 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19472 "sd_send_scsi_READ_CAPACITY_16 received " 19473 "invalid capacity data"); 19474 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19475 return (EIO); 19476 } 19477 19478 /* 19479 * Read capacity and block size from the READ CAPACITY 10 data. 19480 * This data may be adjusted later due to device specific 19481 * issues. 19482 * 19483 * According to the SCSI spec, the READ CAPACITY 10 19484 * command returns the following: 19485 * 19486 * bytes 0-7: Maximum logical block address available. 19487 * (MSB in byte:0 & LSB in byte:7) 19488 * 19489 * bytes 8-11: Block length in bytes 19490 * (MSB in byte:8 & LSB in byte:11) 19491 * 19492 */ 19493 capacity = BE_64(capacity16_buf[0]); 19494 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19495 19496 /* 19497 * Done with capacity16_buf 19498 */ 19499 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19500 19501 /* 19502 * if the reported capacity is set to all 0xf's, then 19503 * this disk is too large. This could only happen with 19504 * a device that supports LBAs larger than 64 bits which 19505 * are not defined by any current T10 standards. 19506 */ 19507 if (capacity == 0xffffffffffffffff) { 19508 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19509 "disk is too large"); 19510 return (EIO); 19511 } 19512 break; /* Success! */ 19513 case EIO: 19514 switch (ucmd_buf.uscsi_status) { 19515 case STATUS_RESERVATION_CONFLICT: 19516 status = EACCES; 19517 break; 19518 case STATUS_CHECK: 19519 /* 19520 * Check condition; look for ASC/ASCQ of 0x04/0x01 19521 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19522 */ 19523 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19524 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19525 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19526 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19527 return (EAGAIN); 19528 } 19529 break; 19530 default: 19531 break; 19532 } 19533 /* FALLTHRU */ 19534 default: 19535 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19536 return (status); 19537 } 19538 19539 *capp = capacity; 19540 *lbap = lbasize; 19541 19542 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19543 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19544 19545 return (0); 19546 } 19547 19548 19549 /* 19550 * Function: sd_send_scsi_START_STOP_UNIT 19551 * 19552 * Description: Issue a scsi START STOP UNIT command to the target. 19553 * 19554 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19555 * structure for this target. 19556 * flag - SD_TARGET_START 19557 * SD_TARGET_STOP 19558 * SD_TARGET_EJECT 19559 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19560 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19561 * to use the USCSI "direct" chain and bypass the normal 19562 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19563 * command is issued as part of an error recovery action. 19564 * 19565 * Return Code: 0 - Success 19566 * EIO - IO error 19567 * EACCES - Reservation conflict detected 19568 * ENXIO - Not Ready, medium not present 19569 * errno return code from sd_ssc_send() 19570 * 19571 * Context: Can sleep. 19572 */ 19573 19574 static int 19575 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19576 { 19577 struct scsi_extended_sense sense_buf; 19578 union scsi_cdb cdb; 19579 struct uscsi_cmd ucmd_buf; 19580 int status; 19581 struct sd_lun *un; 19582 19583 ASSERT(ssc != NULL); 19584 un = ssc->ssc_un; 19585 ASSERT(un != NULL); 19586 ASSERT(!mutex_owned(SD_MUTEX(un))); 19587 19588 SD_TRACE(SD_LOG_IO, un, 19589 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19590 19591 if (un->un_f_check_start_stop && 19592 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19593 (un->un_f_start_stop_supported != TRUE)) { 19594 return (0); 19595 } 19596 19597 /* 19598 * If we are performing an eject operation and 19599 * we receive any command other than SD_TARGET_EJECT 19600 * we should immediately return. 19601 */ 19602 if (flag != SD_TARGET_EJECT) { 19603 mutex_enter(SD_MUTEX(un)); 19604 if (un->un_f_ejecting == TRUE) { 19605 mutex_exit(SD_MUTEX(un)); 19606 return (EAGAIN); 19607 } 19608 mutex_exit(SD_MUTEX(un)); 19609 } 19610 19611 bzero(&cdb, sizeof (cdb)); 19612 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19613 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19614 19615 cdb.scc_cmd = SCMD_START_STOP; 19616 cdb.cdb_opaque[4] = (uchar_t)flag; 19617 19618 ucmd_buf.uscsi_cdb = (char *)&cdb; 19619 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19620 ucmd_buf.uscsi_bufaddr = NULL; 19621 ucmd_buf.uscsi_buflen = 0; 19622 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19623 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19624 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19625 ucmd_buf.uscsi_timeout = 200; 19626 19627 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19628 UIO_SYSSPACE, path_flag); 19629 19630 switch (status) { 19631 case 0: 19632 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19633 break; /* Success! */ 19634 case EIO: 19635 switch (ucmd_buf.uscsi_status) { 19636 case STATUS_RESERVATION_CONFLICT: 19637 status = EACCES; 19638 break; 19639 case STATUS_CHECK: 19640 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19641 switch (scsi_sense_key( 19642 (uint8_t *)&sense_buf)) { 19643 case KEY_ILLEGAL_REQUEST: 19644 status = ENOTSUP; 19645 break; 19646 case KEY_NOT_READY: 19647 if (scsi_sense_asc( 19648 (uint8_t *)&sense_buf) 19649 == 0x3A) { 19650 status = ENXIO; 19651 } 19652 break; 19653 default: 19654 break; 19655 } 19656 } 19657 break; 19658 default: 19659 break; 19660 } 19661 break; 19662 default: 19663 break; 19664 } 19665 19666 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19667 19668 return (status); 19669 } 19670 19671 19672 /* 19673 * Function: sd_start_stop_unit_callback 19674 * 19675 * Description: timeout(9F) callback to begin recovery process for a 19676 * device that has spun down. 19677 * 19678 * Arguments: arg - pointer to associated softstate struct. 19679 * 19680 * Context: Executes in a timeout(9F) thread context 19681 */ 19682 19683 static void 19684 sd_start_stop_unit_callback(void *arg) 19685 { 19686 struct sd_lun *un = arg; 19687 ASSERT(un != NULL); 19688 ASSERT(!mutex_owned(SD_MUTEX(un))); 19689 19690 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19691 19692 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19693 } 19694 19695 19696 /* 19697 * Function: sd_start_stop_unit_task 19698 * 19699 * Description: Recovery procedure when a drive is spun down. 19700 * 19701 * Arguments: arg - pointer to associated softstate struct. 19702 * 19703 * Context: Executes in a taskq() thread context 19704 */ 19705 19706 static void 19707 sd_start_stop_unit_task(void *arg) 19708 { 19709 struct sd_lun *un = arg; 19710 sd_ssc_t *ssc; 19711 int rval; 19712 19713 ASSERT(un != NULL); 19714 ASSERT(!mutex_owned(SD_MUTEX(un))); 19715 19716 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19717 19718 /* 19719 * Some unformatted drives report not ready error, no need to 19720 * restart if format has been initiated. 19721 */ 19722 mutex_enter(SD_MUTEX(un)); 19723 if (un->un_f_format_in_progress == TRUE) { 19724 mutex_exit(SD_MUTEX(un)); 19725 return; 19726 } 19727 mutex_exit(SD_MUTEX(un)); 19728 19729 /* 19730 * When a START STOP command is issued from here, it is part of a 19731 * failure recovery operation and must be issued before any other 19732 * commands, including any pending retries. Thus it must be sent 19733 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19734 * succeeds or not, we will start I/O after the attempt. 19735 */ 19736 ssc = sd_ssc_init(un); 19737 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19738 SD_PATH_DIRECT_PRIORITY); 19739 if (rval != 0) 19740 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19741 sd_ssc_fini(ssc); 19742 /* 19743 * The above call blocks until the START_STOP_UNIT command completes. 19744 * Now that it has completed, we must re-try the original IO that 19745 * received the NOT READY condition in the first place. There are 19746 * three possible conditions here: 19747 * 19748 * (1) The original IO is on un_retry_bp. 19749 * (2) The original IO is on the regular wait queue, and un_retry_bp 19750 * is NULL. 19751 * (3) The original IO is on the regular wait queue, and un_retry_bp 19752 * points to some other, unrelated bp. 19753 * 19754 * For each case, we must call sd_start_cmds() with un_retry_bp 19755 * as the argument. If un_retry_bp is NULL, this will initiate 19756 * processing of the regular wait queue. If un_retry_bp is not NULL, 19757 * then this will process the bp on un_retry_bp. That may or may not 19758 * be the original IO, but that does not matter: the important thing 19759 * is to keep the IO processing going at this point. 19760 * 19761 * Note: This is a very specific error recovery sequence associated 19762 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19763 * serialize the I/O with completion of the spin-up. 19764 */ 19765 mutex_enter(SD_MUTEX(un)); 19766 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19767 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19768 un, un->un_retry_bp); 19769 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19770 sd_start_cmds(un, un->un_retry_bp); 19771 mutex_exit(SD_MUTEX(un)); 19772 19773 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19774 } 19775 19776 19777 /* 19778 * Function: sd_send_scsi_INQUIRY 19779 * 19780 * Description: Issue the scsi INQUIRY command. 19781 * 19782 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19783 * structure for this target. 19784 * bufaddr 19785 * buflen 19786 * evpd 19787 * page_code 19788 * page_length 19789 * 19790 * Return Code: 0 - Success 19791 * errno return code from sd_ssc_send() 19792 * 19793 * Context: Can sleep. Does not return until command is completed. 19794 */ 19795 19796 static int 19797 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19798 uchar_t evpd, uchar_t page_code, size_t *residp) 19799 { 19800 union scsi_cdb cdb; 19801 struct uscsi_cmd ucmd_buf; 19802 int status; 19803 struct sd_lun *un; 19804 19805 ASSERT(ssc != NULL); 19806 un = ssc->ssc_un; 19807 ASSERT(un != NULL); 19808 ASSERT(!mutex_owned(SD_MUTEX(un))); 19809 ASSERT(bufaddr != NULL); 19810 19811 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19812 19813 bzero(&cdb, sizeof (cdb)); 19814 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19815 bzero(bufaddr, buflen); 19816 19817 cdb.scc_cmd = SCMD_INQUIRY; 19818 cdb.cdb_opaque[1] = evpd; 19819 cdb.cdb_opaque[2] = page_code; 19820 FORMG0COUNT(&cdb, buflen); 19821 19822 ucmd_buf.uscsi_cdb = (char *)&cdb; 19823 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19824 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19825 ucmd_buf.uscsi_buflen = buflen; 19826 ucmd_buf.uscsi_rqbuf = NULL; 19827 ucmd_buf.uscsi_rqlen = 0; 19828 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19829 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19830 19831 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19832 UIO_SYSSPACE, SD_PATH_DIRECT); 19833 19834 /* 19835 * Only handle status == 0, the upper-level caller 19836 * will put different assessment based on the context. 19837 */ 19838 if (status == 0) 19839 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19840 19841 if ((status == 0) && (residp != NULL)) { 19842 *residp = ucmd_buf.uscsi_resid; 19843 } 19844 19845 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19846 19847 return (status); 19848 } 19849 19850 19851 /* 19852 * Function: sd_send_scsi_TEST_UNIT_READY 19853 * 19854 * Description: Issue the scsi TEST UNIT READY command. 19855 * This routine can be told to set the flag USCSI_DIAGNOSE to 19856 * prevent retrying failed commands. Use this when the intent 19857 * is either to check for device readiness, to clear a Unit 19858 * Attention, or to clear any outstanding sense data. 19859 * However under specific conditions the expected behavior 19860 * is for retries to bring a device ready, so use the flag 19861 * with caution. 19862 * 19863 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19864 * structure for this target. 19865 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19866 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19867 * 0: dont check for media present, do retries on cmd. 19868 * 19869 * Return Code: 0 - Success 19870 * EIO - IO error 19871 * EACCES - Reservation conflict detected 19872 * ENXIO - Not Ready, medium not present 19873 * errno return code from sd_ssc_send() 19874 * 19875 * Context: Can sleep. Does not return until command is completed. 19876 */ 19877 19878 static int 19879 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 19880 { 19881 struct scsi_extended_sense sense_buf; 19882 union scsi_cdb cdb; 19883 struct uscsi_cmd ucmd_buf; 19884 int status; 19885 struct sd_lun *un; 19886 19887 ASSERT(ssc != NULL); 19888 un = ssc->ssc_un; 19889 ASSERT(un != NULL); 19890 ASSERT(!mutex_owned(SD_MUTEX(un))); 19891 19892 SD_TRACE(SD_LOG_IO, un, 19893 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19894 19895 /* 19896 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19897 * timeouts when they receive a TUR and the queue is not empty. Check 19898 * the configuration flag set during attach (indicating the drive has 19899 * this firmware bug) and un_ncmds_in_transport before issuing the 19900 * TUR. If there are 19901 * pending commands return success, this is a bit arbitrary but is ok 19902 * for non-removables (i.e. the eliteI disks) and non-clustering 19903 * configurations. 19904 */ 19905 if (un->un_f_cfg_tur_check == TRUE) { 19906 mutex_enter(SD_MUTEX(un)); 19907 if (un->un_ncmds_in_transport != 0) { 19908 mutex_exit(SD_MUTEX(un)); 19909 return (0); 19910 } 19911 mutex_exit(SD_MUTEX(un)); 19912 } 19913 19914 bzero(&cdb, sizeof (cdb)); 19915 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19916 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19917 19918 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19919 19920 ucmd_buf.uscsi_cdb = (char *)&cdb; 19921 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19922 ucmd_buf.uscsi_bufaddr = NULL; 19923 ucmd_buf.uscsi_buflen = 0; 19924 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19925 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19926 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19927 19928 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19929 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19930 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19931 } 19932 ucmd_buf.uscsi_timeout = 60; 19933 19934 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19935 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 19936 SD_PATH_STANDARD)); 19937 19938 switch (status) { 19939 case 0: 19940 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19941 break; /* Success! */ 19942 case EIO: 19943 switch (ucmd_buf.uscsi_status) { 19944 case STATUS_RESERVATION_CONFLICT: 19945 status = EACCES; 19946 break; 19947 case STATUS_CHECK: 19948 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19949 break; 19950 } 19951 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19952 (scsi_sense_key((uint8_t *)&sense_buf) == 19953 KEY_NOT_READY) && 19954 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 19955 status = ENXIO; 19956 } 19957 break; 19958 default: 19959 break; 19960 } 19961 break; 19962 default: 19963 break; 19964 } 19965 19966 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19967 19968 return (status); 19969 } 19970 19971 /* 19972 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19973 * 19974 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19975 * 19976 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19977 * structure for this target. 19978 * 19979 * Return Code: 0 - Success 19980 * EACCES 19981 * ENOTSUP 19982 * errno return code from sd_ssc_send() 19983 * 19984 * Context: Can sleep. Does not return until command is completed. 19985 */ 19986 19987 static int 19988 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 19989 uint16_t data_len, uchar_t *data_bufp) 19990 { 19991 struct scsi_extended_sense sense_buf; 19992 union scsi_cdb cdb; 19993 struct uscsi_cmd ucmd_buf; 19994 int status; 19995 int no_caller_buf = FALSE; 19996 struct sd_lun *un; 19997 19998 ASSERT(ssc != NULL); 19999 un = ssc->ssc_un; 20000 ASSERT(un != NULL); 20001 ASSERT(!mutex_owned(SD_MUTEX(un))); 20002 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20003 20004 SD_TRACE(SD_LOG_IO, un, 20005 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20006 20007 bzero(&cdb, sizeof (cdb)); 20008 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20009 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20010 if (data_bufp == NULL) { 20011 /* Allocate a default buf if the caller did not give one */ 20012 ASSERT(data_len == 0); 20013 data_len = MHIOC_RESV_KEY_SIZE; 20014 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20015 no_caller_buf = TRUE; 20016 } 20017 20018 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20019 cdb.cdb_opaque[1] = usr_cmd; 20020 FORMG1COUNT(&cdb, data_len); 20021 20022 ucmd_buf.uscsi_cdb = (char *)&cdb; 20023 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20024 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20025 ucmd_buf.uscsi_buflen = data_len; 20026 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20027 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20028 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20029 ucmd_buf.uscsi_timeout = 60; 20030 20031 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20032 UIO_SYSSPACE, SD_PATH_STANDARD); 20033 20034 switch (status) { 20035 case 0: 20036 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20037 20038 break; /* Success! */ 20039 case EIO: 20040 switch (ucmd_buf.uscsi_status) { 20041 case STATUS_RESERVATION_CONFLICT: 20042 status = EACCES; 20043 break; 20044 case STATUS_CHECK: 20045 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20046 (scsi_sense_key((uint8_t *)&sense_buf) == 20047 KEY_ILLEGAL_REQUEST)) { 20048 status = ENOTSUP; 20049 } 20050 break; 20051 default: 20052 break; 20053 } 20054 break; 20055 default: 20056 break; 20057 } 20058 20059 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20060 20061 if (no_caller_buf == TRUE) { 20062 kmem_free(data_bufp, data_len); 20063 } 20064 20065 return (status); 20066 } 20067 20068 20069 /* 20070 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20071 * 20072 * Description: This routine is the driver entry point for handling CD-ROM 20073 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20074 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20075 * device. 20076 * 20077 * Arguments: ssc - ssc contains un - pointer to soft state struct 20078 * for the target. 20079 * usr_cmd SCSI-3 reservation facility command (one of 20080 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20081 * SD_SCSI3_PREEMPTANDABORT) 20082 * usr_bufp - user provided pointer register, reserve descriptor or 20083 * preempt and abort structure (mhioc_register_t, 20084 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20085 * 20086 * Return Code: 0 - Success 20087 * EACCES 20088 * ENOTSUP 20089 * errno return code from sd_ssc_send() 20090 * 20091 * Context: Can sleep. Does not return until command is completed. 20092 */ 20093 20094 static int 20095 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20096 uchar_t *usr_bufp) 20097 { 20098 struct scsi_extended_sense sense_buf; 20099 union scsi_cdb cdb; 20100 struct uscsi_cmd ucmd_buf; 20101 int status; 20102 uchar_t data_len = sizeof (sd_prout_t); 20103 sd_prout_t *prp; 20104 struct sd_lun *un; 20105 20106 ASSERT(ssc != NULL); 20107 un = ssc->ssc_un; 20108 ASSERT(un != NULL); 20109 ASSERT(!mutex_owned(SD_MUTEX(un))); 20110 ASSERT(data_len == 24); /* required by scsi spec */ 20111 20112 SD_TRACE(SD_LOG_IO, un, 20113 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20114 20115 if (usr_bufp == NULL) { 20116 return (EINVAL); 20117 } 20118 20119 bzero(&cdb, sizeof (cdb)); 20120 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20121 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20122 prp = kmem_zalloc(data_len, KM_SLEEP); 20123 20124 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20125 cdb.cdb_opaque[1] = usr_cmd; 20126 FORMG1COUNT(&cdb, data_len); 20127 20128 ucmd_buf.uscsi_cdb = (char *)&cdb; 20129 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20130 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20131 ucmd_buf.uscsi_buflen = data_len; 20132 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20133 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20134 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20135 ucmd_buf.uscsi_timeout = 60; 20136 20137 switch (usr_cmd) { 20138 case SD_SCSI3_REGISTER: { 20139 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20140 20141 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20142 bcopy(ptr->newkey.key, prp->service_key, 20143 MHIOC_RESV_KEY_SIZE); 20144 prp->aptpl = ptr->aptpl; 20145 break; 20146 } 20147 case SD_SCSI3_RESERVE: 20148 case SD_SCSI3_RELEASE: { 20149 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20150 20151 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20152 prp->scope_address = BE_32(ptr->scope_specific_addr); 20153 cdb.cdb_opaque[2] = ptr->type; 20154 break; 20155 } 20156 case SD_SCSI3_PREEMPTANDABORT: { 20157 mhioc_preemptandabort_t *ptr = 20158 (mhioc_preemptandabort_t *)usr_bufp; 20159 20160 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20161 bcopy(ptr->victim_key.key, prp->service_key, 20162 MHIOC_RESV_KEY_SIZE); 20163 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20164 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20165 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20166 break; 20167 } 20168 case SD_SCSI3_REGISTERANDIGNOREKEY: 20169 { 20170 mhioc_registerandignorekey_t *ptr; 20171 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20172 bcopy(ptr->newkey.key, 20173 prp->service_key, MHIOC_RESV_KEY_SIZE); 20174 prp->aptpl = ptr->aptpl; 20175 break; 20176 } 20177 default: 20178 ASSERT(FALSE); 20179 break; 20180 } 20181 20182 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20183 UIO_SYSSPACE, SD_PATH_STANDARD); 20184 20185 switch (status) { 20186 case 0: 20187 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20188 break; /* Success! */ 20189 case EIO: 20190 switch (ucmd_buf.uscsi_status) { 20191 case STATUS_RESERVATION_CONFLICT: 20192 status = EACCES; 20193 break; 20194 case STATUS_CHECK: 20195 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20196 (scsi_sense_key((uint8_t *)&sense_buf) == 20197 KEY_ILLEGAL_REQUEST)) { 20198 status = ENOTSUP; 20199 } 20200 break; 20201 default: 20202 break; 20203 } 20204 break; 20205 default: 20206 break; 20207 } 20208 20209 kmem_free(prp, data_len); 20210 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20211 return (status); 20212 } 20213 20214 20215 /* 20216 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20217 * 20218 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20219 * 20220 * Arguments: un - pointer to the target's soft state struct 20221 * dkc - pointer to the callback structure 20222 * 20223 * Return Code: 0 - success 20224 * errno-type error code 20225 * 20226 * Context: kernel thread context only. 20227 * 20228 * _______________________________________________________________ 20229 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20230 * |FLUSH_VOLATILE| | operation | 20231 * |______________|______________|_________________________________| 20232 * | 0 | NULL | Synchronous flush on both | 20233 * | | | volatile and non-volatile cache | 20234 * |______________|______________|_________________________________| 20235 * | 1 | NULL | Synchronous flush on volatile | 20236 * | | | cache; disk drivers may suppress| 20237 * | | | flush if disk table indicates | 20238 * | | | non-volatile cache | 20239 * |______________|______________|_________________________________| 20240 * | 0 | !NULL | Asynchronous flush on both | 20241 * | | | volatile and non-volatile cache;| 20242 * |______________|______________|_________________________________| 20243 * | 1 | !NULL | Asynchronous flush on volatile | 20244 * | | | cache; disk drivers may suppress| 20245 * | | | flush if disk table indicates | 20246 * | | | non-volatile cache | 20247 * |______________|______________|_________________________________| 20248 * 20249 */ 20250 20251 static int 20252 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20253 { 20254 struct sd_uscsi_info *uip; 20255 struct uscsi_cmd *uscmd; 20256 union scsi_cdb *cdb; 20257 struct buf *bp; 20258 int rval = 0; 20259 int is_async; 20260 20261 SD_TRACE(SD_LOG_IO, un, 20262 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20263 20264 ASSERT(un != NULL); 20265 ASSERT(!mutex_owned(SD_MUTEX(un))); 20266 20267 if (dkc == NULL || dkc->dkc_callback == NULL) { 20268 is_async = FALSE; 20269 } else { 20270 is_async = TRUE; 20271 } 20272 20273 mutex_enter(SD_MUTEX(un)); 20274 /* check whether cache flush should be suppressed */ 20275 if (un->un_f_suppress_cache_flush == TRUE) { 20276 mutex_exit(SD_MUTEX(un)); 20277 /* 20278 * suppress the cache flush if the device is told to do 20279 * so by sd.conf or disk table 20280 */ 20281 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20282 skip the cache flush since suppress_cache_flush is %d!\n", 20283 un->un_f_suppress_cache_flush); 20284 20285 if (is_async == TRUE) { 20286 /* invoke callback for asynchronous flush */ 20287 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20288 } 20289 return (rval); 20290 } 20291 mutex_exit(SD_MUTEX(un)); 20292 20293 /* 20294 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20295 * set properly 20296 */ 20297 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20298 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20299 20300 mutex_enter(SD_MUTEX(un)); 20301 if (dkc != NULL && un->un_f_sync_nv_supported && 20302 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20303 /* 20304 * if the device supports SYNC_NV bit, turn on 20305 * the SYNC_NV bit to only flush volatile cache 20306 */ 20307 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20308 } 20309 mutex_exit(SD_MUTEX(un)); 20310 20311 /* 20312 * First get some memory for the uscsi_cmd struct and cdb 20313 * and initialize for SYNCHRONIZE_CACHE cmd. 20314 */ 20315 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20316 uscmd->uscsi_cdblen = CDB_GROUP1; 20317 uscmd->uscsi_cdb = (caddr_t)cdb; 20318 uscmd->uscsi_bufaddr = NULL; 20319 uscmd->uscsi_buflen = 0; 20320 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20321 uscmd->uscsi_rqlen = SENSE_LENGTH; 20322 uscmd->uscsi_rqresid = SENSE_LENGTH; 20323 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20324 uscmd->uscsi_timeout = sd_io_time; 20325 20326 /* 20327 * Allocate an sd_uscsi_info struct and fill it with the info 20328 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20329 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20330 * since we allocate the buf here in this function, we do not 20331 * need to preserve the prior contents of b_private. 20332 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20333 */ 20334 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20335 uip->ui_flags = SD_PATH_DIRECT; 20336 uip->ui_cmdp = uscmd; 20337 20338 bp = getrbuf(KM_SLEEP); 20339 bp->b_private = uip; 20340 20341 /* 20342 * Setup buffer to carry uscsi request. 20343 */ 20344 bp->b_flags = B_BUSY; 20345 bp->b_bcount = 0; 20346 bp->b_blkno = 0; 20347 20348 if (is_async == TRUE) { 20349 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20350 uip->ui_dkc = *dkc; 20351 } 20352 20353 bp->b_edev = SD_GET_DEV(un); 20354 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20355 20356 /* 20357 * Unset un_f_sync_cache_required flag 20358 */ 20359 mutex_enter(SD_MUTEX(un)); 20360 un->un_f_sync_cache_required = FALSE; 20361 mutex_exit(SD_MUTEX(un)); 20362 20363 (void) sd_uscsi_strategy(bp); 20364 20365 /* 20366 * If synchronous request, wait for completion 20367 * If async just return and let b_iodone callback 20368 * cleanup. 20369 * NOTE: On return, u_ncmds_in_driver will be decremented, 20370 * but it was also incremented in sd_uscsi_strategy(), so 20371 * we should be ok. 20372 */ 20373 if (is_async == FALSE) { 20374 (void) biowait(bp); 20375 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20376 } 20377 20378 return (rval); 20379 } 20380 20381 20382 static int 20383 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20384 { 20385 struct sd_uscsi_info *uip; 20386 struct uscsi_cmd *uscmd; 20387 uint8_t *sense_buf; 20388 struct sd_lun *un; 20389 int status; 20390 union scsi_cdb *cdb; 20391 20392 uip = (struct sd_uscsi_info *)(bp->b_private); 20393 ASSERT(uip != NULL); 20394 20395 uscmd = uip->ui_cmdp; 20396 ASSERT(uscmd != NULL); 20397 20398 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20399 ASSERT(sense_buf != NULL); 20400 20401 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20402 ASSERT(un != NULL); 20403 20404 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20405 20406 status = geterror(bp); 20407 switch (status) { 20408 case 0: 20409 break; /* Success! */ 20410 case EIO: 20411 switch (uscmd->uscsi_status) { 20412 case STATUS_RESERVATION_CONFLICT: 20413 /* Ignore reservation conflict */ 20414 status = 0; 20415 goto done; 20416 20417 case STATUS_CHECK: 20418 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20419 (scsi_sense_key(sense_buf) == 20420 KEY_ILLEGAL_REQUEST)) { 20421 /* Ignore Illegal Request error */ 20422 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20423 mutex_enter(SD_MUTEX(un)); 20424 un->un_f_sync_nv_supported = FALSE; 20425 mutex_exit(SD_MUTEX(un)); 20426 status = 0; 20427 SD_TRACE(SD_LOG_IO, un, 20428 "un_f_sync_nv_supported \ 20429 is set to false.\n"); 20430 goto done; 20431 } 20432 20433 mutex_enter(SD_MUTEX(un)); 20434 un->un_f_sync_cache_supported = FALSE; 20435 mutex_exit(SD_MUTEX(un)); 20436 SD_TRACE(SD_LOG_IO, un, 20437 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20438 un_f_sync_cache_supported set to false \ 20439 with asc = %x, ascq = %x\n", 20440 scsi_sense_asc(sense_buf), 20441 scsi_sense_ascq(sense_buf)); 20442 status = ENOTSUP; 20443 goto done; 20444 } 20445 break; 20446 default: 20447 break; 20448 } 20449 /* FALLTHRU */ 20450 default: 20451 /* 20452 * Turn on the un_f_sync_cache_required flag 20453 * since the SYNC CACHE command failed 20454 */ 20455 mutex_enter(SD_MUTEX(un)); 20456 un->un_f_sync_cache_required = TRUE; 20457 mutex_exit(SD_MUTEX(un)); 20458 20459 /* 20460 * Don't log an error message if this device 20461 * has removable media. 20462 */ 20463 if (!un->un_f_has_removable_media) { 20464 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20465 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20466 } 20467 break; 20468 } 20469 20470 done: 20471 if (uip->ui_dkc.dkc_callback != NULL) { 20472 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20473 } 20474 20475 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20476 freerbuf(bp); 20477 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20478 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20479 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20480 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20481 20482 return (status); 20483 } 20484 20485 20486 /* 20487 * Function: sd_send_scsi_GET_CONFIGURATION 20488 * 20489 * Description: Issues the get configuration command to the device. 20490 * Called from sd_check_for_writable_cd & sd_get_media_info 20491 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20492 * Arguments: ssc 20493 * ucmdbuf 20494 * rqbuf 20495 * rqbuflen 20496 * bufaddr 20497 * buflen 20498 * path_flag 20499 * 20500 * Return Code: 0 - Success 20501 * errno return code from sd_ssc_send() 20502 * 20503 * Context: Can sleep. Does not return until command is completed. 20504 * 20505 */ 20506 20507 static int 20508 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20509 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20510 int path_flag) 20511 { 20512 char cdb[CDB_GROUP1]; 20513 int status; 20514 struct sd_lun *un; 20515 20516 ASSERT(ssc != NULL); 20517 un = ssc->ssc_un; 20518 ASSERT(un != NULL); 20519 ASSERT(!mutex_owned(SD_MUTEX(un))); 20520 ASSERT(bufaddr != NULL); 20521 ASSERT(ucmdbuf != NULL); 20522 ASSERT(rqbuf != NULL); 20523 20524 SD_TRACE(SD_LOG_IO, un, 20525 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20526 20527 bzero(cdb, sizeof (cdb)); 20528 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20529 bzero(rqbuf, rqbuflen); 20530 bzero(bufaddr, buflen); 20531 20532 /* 20533 * Set up cdb field for the get configuration command. 20534 */ 20535 cdb[0] = SCMD_GET_CONFIGURATION; 20536 cdb[1] = 0x02; /* Requested Type */ 20537 cdb[8] = SD_PROFILE_HEADER_LEN; 20538 ucmdbuf->uscsi_cdb = cdb; 20539 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20540 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20541 ucmdbuf->uscsi_buflen = buflen; 20542 ucmdbuf->uscsi_timeout = sd_io_time; 20543 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20544 ucmdbuf->uscsi_rqlen = rqbuflen; 20545 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20546 20547 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20548 UIO_SYSSPACE, path_flag); 20549 20550 switch (status) { 20551 case 0: 20552 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20553 break; /* Success! */ 20554 case EIO: 20555 switch (ucmdbuf->uscsi_status) { 20556 case STATUS_RESERVATION_CONFLICT: 20557 status = EACCES; 20558 break; 20559 default: 20560 break; 20561 } 20562 break; 20563 default: 20564 break; 20565 } 20566 20567 if (status == 0) { 20568 SD_DUMP_MEMORY(un, SD_LOG_IO, 20569 "sd_send_scsi_GET_CONFIGURATION: data", 20570 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20571 } 20572 20573 SD_TRACE(SD_LOG_IO, un, 20574 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20575 20576 return (status); 20577 } 20578 20579 /* 20580 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20581 * 20582 * Description: Issues the get configuration command to the device to 20583 * retrieve a specific feature. Called from 20584 * sd_check_for_writable_cd & sd_set_mmc_caps. 20585 * Arguments: ssc 20586 * ucmdbuf 20587 * rqbuf 20588 * rqbuflen 20589 * bufaddr 20590 * buflen 20591 * feature 20592 * 20593 * Return Code: 0 - Success 20594 * errno return code from sd_ssc_send() 20595 * 20596 * Context: Can sleep. Does not return until command is completed. 20597 * 20598 */ 20599 static int 20600 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20601 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20602 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20603 { 20604 char cdb[CDB_GROUP1]; 20605 int status; 20606 struct sd_lun *un; 20607 20608 ASSERT(ssc != NULL); 20609 un = ssc->ssc_un; 20610 ASSERT(un != NULL); 20611 ASSERT(!mutex_owned(SD_MUTEX(un))); 20612 ASSERT(bufaddr != NULL); 20613 ASSERT(ucmdbuf != NULL); 20614 ASSERT(rqbuf != NULL); 20615 20616 SD_TRACE(SD_LOG_IO, un, 20617 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20618 20619 bzero(cdb, sizeof (cdb)); 20620 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20621 bzero(rqbuf, rqbuflen); 20622 bzero(bufaddr, buflen); 20623 20624 /* 20625 * Set up cdb field for the get configuration command. 20626 */ 20627 cdb[0] = SCMD_GET_CONFIGURATION; 20628 cdb[1] = 0x02; /* Requested Type */ 20629 cdb[3] = feature; 20630 cdb[8] = buflen; 20631 ucmdbuf->uscsi_cdb = cdb; 20632 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20633 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20634 ucmdbuf->uscsi_buflen = buflen; 20635 ucmdbuf->uscsi_timeout = sd_io_time; 20636 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20637 ucmdbuf->uscsi_rqlen = rqbuflen; 20638 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20639 20640 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20641 UIO_SYSSPACE, path_flag); 20642 20643 switch (status) { 20644 case 0: 20645 20646 break; /* Success! */ 20647 case EIO: 20648 switch (ucmdbuf->uscsi_status) { 20649 case STATUS_RESERVATION_CONFLICT: 20650 status = EACCES; 20651 break; 20652 default: 20653 break; 20654 } 20655 break; 20656 default: 20657 break; 20658 } 20659 20660 if (status == 0) { 20661 SD_DUMP_MEMORY(un, SD_LOG_IO, 20662 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20663 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20664 } 20665 20666 SD_TRACE(SD_LOG_IO, un, 20667 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20668 20669 return (status); 20670 } 20671 20672 20673 /* 20674 * Function: sd_send_scsi_MODE_SENSE 20675 * 20676 * Description: Utility function for issuing a scsi MODE SENSE command. 20677 * Note: This routine uses a consistent implementation for Group0, 20678 * Group1, and Group2 commands across all platforms. ATAPI devices 20679 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20680 * 20681 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20682 * structure for this target. 20683 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20684 * CDB_GROUP[1|2] (10 byte). 20685 * bufaddr - buffer for page data retrieved from the target. 20686 * buflen - size of page to be retrieved. 20687 * page_code - page code of data to be retrieved from the target. 20688 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20689 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20690 * to use the USCSI "direct" chain and bypass the normal 20691 * command waitq. 20692 * 20693 * Return Code: 0 - Success 20694 * errno return code from sd_ssc_send() 20695 * 20696 * Context: Can sleep. Does not return until command is completed. 20697 */ 20698 20699 static int 20700 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20701 size_t buflen, uchar_t page_code, int path_flag) 20702 { 20703 struct scsi_extended_sense sense_buf; 20704 union scsi_cdb cdb; 20705 struct uscsi_cmd ucmd_buf; 20706 int status; 20707 int headlen; 20708 struct sd_lun *un; 20709 20710 ASSERT(ssc != NULL); 20711 un = ssc->ssc_un; 20712 ASSERT(un != NULL); 20713 ASSERT(!mutex_owned(SD_MUTEX(un))); 20714 ASSERT(bufaddr != NULL); 20715 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20716 (cdbsize == CDB_GROUP2)); 20717 20718 SD_TRACE(SD_LOG_IO, un, 20719 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20720 20721 bzero(&cdb, sizeof (cdb)); 20722 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20723 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20724 bzero(bufaddr, buflen); 20725 20726 if (cdbsize == CDB_GROUP0) { 20727 cdb.scc_cmd = SCMD_MODE_SENSE; 20728 cdb.cdb_opaque[2] = page_code; 20729 FORMG0COUNT(&cdb, buflen); 20730 headlen = MODE_HEADER_LENGTH; 20731 } else { 20732 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20733 cdb.cdb_opaque[2] = page_code; 20734 FORMG1COUNT(&cdb, buflen); 20735 headlen = MODE_HEADER_LENGTH_GRP2; 20736 } 20737 20738 ASSERT(headlen <= buflen); 20739 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20740 20741 ucmd_buf.uscsi_cdb = (char *)&cdb; 20742 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20743 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20744 ucmd_buf.uscsi_buflen = buflen; 20745 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20746 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20747 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20748 ucmd_buf.uscsi_timeout = 60; 20749 20750 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20751 UIO_SYSSPACE, path_flag); 20752 20753 switch (status) { 20754 case 0: 20755 /* 20756 * sr_check_wp() uses 0x3f page code and check the header of 20757 * mode page to determine if target device is write-protected. 20758 * But some USB devices return 0 bytes for 0x3f page code. For 20759 * this case, make sure that mode page header is returned at 20760 * least. 20761 */ 20762 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20763 status = EIO; 20764 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 20765 "mode page header is not returned"); 20766 } 20767 break; /* Success! */ 20768 case EIO: 20769 switch (ucmd_buf.uscsi_status) { 20770 case STATUS_RESERVATION_CONFLICT: 20771 status = EACCES; 20772 break; 20773 default: 20774 break; 20775 } 20776 break; 20777 default: 20778 break; 20779 } 20780 20781 if (status == 0) { 20782 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20783 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20784 } 20785 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20786 20787 return (status); 20788 } 20789 20790 20791 /* 20792 * Function: sd_send_scsi_MODE_SELECT 20793 * 20794 * Description: Utility function for issuing a scsi MODE SELECT command. 20795 * Note: This routine uses a consistent implementation for Group0, 20796 * Group1, and Group2 commands across all platforms. ATAPI devices 20797 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20798 * 20799 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20800 * structure for this target. 20801 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20802 * CDB_GROUP[1|2] (10 byte). 20803 * bufaddr - buffer for page data retrieved from the target. 20804 * buflen - size of page to be retrieved. 20805 * save_page - boolean to determin if SP bit should be set. 20806 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20807 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20808 * to use the USCSI "direct" chain and bypass the normal 20809 * command waitq. 20810 * 20811 * Return Code: 0 - Success 20812 * errno return code from sd_ssc_send() 20813 * 20814 * Context: Can sleep. Does not return until command is completed. 20815 */ 20816 20817 static int 20818 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20819 size_t buflen, uchar_t save_page, int path_flag) 20820 { 20821 struct scsi_extended_sense sense_buf; 20822 union scsi_cdb cdb; 20823 struct uscsi_cmd ucmd_buf; 20824 int status; 20825 struct sd_lun *un; 20826 20827 ASSERT(ssc != NULL); 20828 un = ssc->ssc_un; 20829 ASSERT(un != NULL); 20830 ASSERT(!mutex_owned(SD_MUTEX(un))); 20831 ASSERT(bufaddr != NULL); 20832 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20833 (cdbsize == CDB_GROUP2)); 20834 20835 SD_TRACE(SD_LOG_IO, un, 20836 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20837 20838 bzero(&cdb, sizeof (cdb)); 20839 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20840 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20841 20842 /* Set the PF bit for many third party drives */ 20843 cdb.cdb_opaque[1] = 0x10; 20844 20845 /* Set the savepage(SP) bit if given */ 20846 if (save_page == SD_SAVE_PAGE) { 20847 cdb.cdb_opaque[1] |= 0x01; 20848 } 20849 20850 if (cdbsize == CDB_GROUP0) { 20851 cdb.scc_cmd = SCMD_MODE_SELECT; 20852 FORMG0COUNT(&cdb, buflen); 20853 } else { 20854 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20855 FORMG1COUNT(&cdb, buflen); 20856 } 20857 20858 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20859 20860 ucmd_buf.uscsi_cdb = (char *)&cdb; 20861 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20862 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20863 ucmd_buf.uscsi_buflen = buflen; 20864 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20865 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20866 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20867 ucmd_buf.uscsi_timeout = 60; 20868 20869 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20870 UIO_SYSSPACE, path_flag); 20871 20872 switch (status) { 20873 case 0: 20874 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20875 break; /* Success! */ 20876 case EIO: 20877 switch (ucmd_buf.uscsi_status) { 20878 case STATUS_RESERVATION_CONFLICT: 20879 status = EACCES; 20880 break; 20881 default: 20882 break; 20883 } 20884 break; 20885 default: 20886 break; 20887 } 20888 20889 if (status == 0) { 20890 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20891 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20892 } 20893 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20894 20895 return (status); 20896 } 20897 20898 20899 /* 20900 * Function: sd_send_scsi_RDWR 20901 * 20902 * Description: Issue a scsi READ or WRITE command with the given parameters. 20903 * 20904 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20905 * structure for this target. 20906 * cmd: SCMD_READ or SCMD_WRITE 20907 * bufaddr: Address of caller's buffer to receive the RDWR data 20908 * buflen: Length of caller's buffer receive the RDWR data. 20909 * start_block: Block number for the start of the RDWR operation. 20910 * (Assumes target-native block size.) 20911 * residp: Pointer to variable to receive the redisual of the 20912 * RDWR operation (may be NULL of no residual requested). 20913 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20914 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20915 * to use the USCSI "direct" chain and bypass the normal 20916 * command waitq. 20917 * 20918 * Return Code: 0 - Success 20919 * errno return code from sd_ssc_send() 20920 * 20921 * Context: Can sleep. Does not return until command is completed. 20922 */ 20923 20924 static int 20925 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 20926 size_t buflen, daddr_t start_block, int path_flag) 20927 { 20928 struct scsi_extended_sense sense_buf; 20929 union scsi_cdb cdb; 20930 struct uscsi_cmd ucmd_buf; 20931 uint32_t block_count; 20932 int status; 20933 int cdbsize; 20934 uchar_t flag; 20935 struct sd_lun *un; 20936 20937 ASSERT(ssc != NULL); 20938 un = ssc->ssc_un; 20939 ASSERT(un != NULL); 20940 ASSERT(!mutex_owned(SD_MUTEX(un))); 20941 ASSERT(bufaddr != NULL); 20942 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20943 20944 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20945 20946 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20947 return (EINVAL); 20948 } 20949 20950 mutex_enter(SD_MUTEX(un)); 20951 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20952 mutex_exit(SD_MUTEX(un)); 20953 20954 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20955 20956 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20957 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20958 bufaddr, buflen, start_block, block_count); 20959 20960 bzero(&cdb, sizeof (cdb)); 20961 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20962 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20963 20964 /* Compute CDB size to use */ 20965 if (start_block > 0xffffffff) 20966 cdbsize = CDB_GROUP4; 20967 else if ((start_block & 0xFFE00000) || 20968 (un->un_f_cfg_is_atapi == TRUE)) 20969 cdbsize = CDB_GROUP1; 20970 else 20971 cdbsize = CDB_GROUP0; 20972 20973 switch (cdbsize) { 20974 case CDB_GROUP0: /* 6-byte CDBs */ 20975 cdb.scc_cmd = cmd; 20976 FORMG0ADDR(&cdb, start_block); 20977 FORMG0COUNT(&cdb, block_count); 20978 break; 20979 case CDB_GROUP1: /* 10-byte CDBs */ 20980 cdb.scc_cmd = cmd | SCMD_GROUP1; 20981 FORMG1ADDR(&cdb, start_block); 20982 FORMG1COUNT(&cdb, block_count); 20983 break; 20984 case CDB_GROUP4: /* 16-byte CDBs */ 20985 cdb.scc_cmd = cmd | SCMD_GROUP4; 20986 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20987 FORMG4COUNT(&cdb, block_count); 20988 break; 20989 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20990 default: 20991 /* All others reserved */ 20992 return (EINVAL); 20993 } 20994 20995 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20996 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20997 20998 ucmd_buf.uscsi_cdb = (char *)&cdb; 20999 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21000 ucmd_buf.uscsi_bufaddr = bufaddr; 21001 ucmd_buf.uscsi_buflen = buflen; 21002 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21003 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21004 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21005 ucmd_buf.uscsi_timeout = 60; 21006 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21007 UIO_SYSSPACE, path_flag); 21008 21009 switch (status) { 21010 case 0: 21011 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21012 break; /* Success! */ 21013 case EIO: 21014 switch (ucmd_buf.uscsi_status) { 21015 case STATUS_RESERVATION_CONFLICT: 21016 status = EACCES; 21017 break; 21018 default: 21019 break; 21020 } 21021 break; 21022 default: 21023 break; 21024 } 21025 21026 if (status == 0) { 21027 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21028 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21029 } 21030 21031 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21032 21033 return (status); 21034 } 21035 21036 21037 /* 21038 * Function: sd_send_scsi_LOG_SENSE 21039 * 21040 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21041 * 21042 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21043 * structure for this target. 21044 * 21045 * Return Code: 0 - Success 21046 * errno return code from sd_ssc_send() 21047 * 21048 * Context: Can sleep. Does not return until command is completed. 21049 */ 21050 21051 static int 21052 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21053 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21054 int path_flag) 21055 21056 { 21057 struct scsi_extended_sense sense_buf; 21058 union scsi_cdb cdb; 21059 struct uscsi_cmd ucmd_buf; 21060 int status; 21061 struct sd_lun *un; 21062 21063 ASSERT(ssc != NULL); 21064 un = ssc->ssc_un; 21065 ASSERT(un != NULL); 21066 ASSERT(!mutex_owned(SD_MUTEX(un))); 21067 21068 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21069 21070 bzero(&cdb, sizeof (cdb)); 21071 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21072 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21073 21074 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21075 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21076 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21077 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21078 FORMG1COUNT(&cdb, buflen); 21079 21080 ucmd_buf.uscsi_cdb = (char *)&cdb; 21081 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21082 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21083 ucmd_buf.uscsi_buflen = buflen; 21084 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21085 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21086 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21087 ucmd_buf.uscsi_timeout = 60; 21088 21089 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21090 UIO_SYSSPACE, path_flag); 21091 21092 switch (status) { 21093 case 0: 21094 break; 21095 case EIO: 21096 switch (ucmd_buf.uscsi_status) { 21097 case STATUS_RESERVATION_CONFLICT: 21098 status = EACCES; 21099 break; 21100 case STATUS_CHECK: 21101 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21102 (scsi_sense_key((uint8_t *)&sense_buf) == 21103 KEY_ILLEGAL_REQUEST) && 21104 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21105 /* 21106 * ASC 0x24: INVALID FIELD IN CDB 21107 */ 21108 switch (page_code) { 21109 case START_STOP_CYCLE_PAGE: 21110 /* 21111 * The start stop cycle counter is 21112 * implemented as page 0x31 in earlier 21113 * generation disks. In new generation 21114 * disks the start stop cycle counter is 21115 * implemented as page 0xE. To properly 21116 * handle this case if an attempt for 21117 * log page 0xE is made and fails we 21118 * will try again using page 0x31. 21119 * 21120 * Network storage BU committed to 21121 * maintain the page 0x31 for this 21122 * purpose and will not have any other 21123 * page implemented with page code 0x31 21124 * until all disks transition to the 21125 * standard page. 21126 */ 21127 mutex_enter(SD_MUTEX(un)); 21128 un->un_start_stop_cycle_page = 21129 START_STOP_CYCLE_VU_PAGE; 21130 cdb.cdb_opaque[2] = 21131 (char)(page_control << 6) | 21132 un->un_start_stop_cycle_page; 21133 mutex_exit(SD_MUTEX(un)); 21134 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21135 status = sd_ssc_send( 21136 ssc, &ucmd_buf, FKIOCTL, 21137 UIO_SYSSPACE, path_flag); 21138 21139 break; 21140 case TEMPERATURE_PAGE: 21141 status = ENOTTY; 21142 break; 21143 default: 21144 break; 21145 } 21146 } 21147 break; 21148 default: 21149 break; 21150 } 21151 break; 21152 default: 21153 break; 21154 } 21155 21156 if (status == 0) { 21157 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21158 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21159 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21160 } 21161 21162 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21163 21164 return (status); 21165 } 21166 21167 21168 /* 21169 * Function: sdioctl 21170 * 21171 * Description: Driver's ioctl(9e) entry point function. 21172 * 21173 * Arguments: dev - device number 21174 * cmd - ioctl operation to be performed 21175 * arg - user argument, contains data to be set or reference 21176 * parameter for get 21177 * flag - bit flag, indicating open settings, 32/64 bit type 21178 * cred_p - user credential pointer 21179 * rval_p - calling process return value (OPT) 21180 * 21181 * Return Code: EINVAL 21182 * ENOTTY 21183 * ENXIO 21184 * EIO 21185 * EFAULT 21186 * ENOTSUP 21187 * EPERM 21188 * 21189 * Context: Called from the device switch at normal priority. 21190 */ 21191 21192 static int 21193 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21194 { 21195 struct sd_lun *un = NULL; 21196 int err = 0; 21197 int i = 0; 21198 cred_t *cr; 21199 int tmprval = EINVAL; 21200 int is_valid; 21201 sd_ssc_t *ssc; 21202 21203 /* 21204 * All device accesses go thru sdstrategy where we check on suspend 21205 * status 21206 */ 21207 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21208 return (ENXIO); 21209 } 21210 21211 ASSERT(!mutex_owned(SD_MUTEX(un))); 21212 21213 /* Initialize sd_ssc_t for internal uscsi commands */ 21214 ssc = sd_ssc_init(un); 21215 21216 is_valid = SD_IS_VALID_LABEL(un); 21217 21218 /* 21219 * Moved this wait from sd_uscsi_strategy to here for 21220 * reasons of deadlock prevention. Internal driver commands, 21221 * specifically those to change a devices power level, result 21222 * in a call to sd_uscsi_strategy. 21223 */ 21224 mutex_enter(SD_MUTEX(un)); 21225 while ((un->un_state == SD_STATE_SUSPENDED) || 21226 (un->un_state == SD_STATE_PM_CHANGING)) { 21227 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21228 } 21229 /* 21230 * Twiddling the counter here protects commands from now 21231 * through to the top of sd_uscsi_strategy. Without the 21232 * counter inc. a power down, for example, could get in 21233 * after the above check for state is made and before 21234 * execution gets to the top of sd_uscsi_strategy. 21235 * That would cause problems. 21236 */ 21237 un->un_ncmds_in_driver++; 21238 21239 if (!is_valid && 21240 (flag & (FNDELAY | FNONBLOCK))) { 21241 switch (cmd) { 21242 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21243 case DKIOCGVTOC: 21244 case DKIOCGEXTVTOC: 21245 case DKIOCGAPART: 21246 case DKIOCPARTINFO: 21247 case DKIOCEXTPARTINFO: 21248 case DKIOCSGEOM: 21249 case DKIOCSAPART: 21250 case DKIOCGETEFI: 21251 case DKIOCPARTITION: 21252 case DKIOCSVTOC: 21253 case DKIOCSEXTVTOC: 21254 case DKIOCSETEFI: 21255 case DKIOCGMBOOT: 21256 case DKIOCSMBOOT: 21257 case DKIOCG_PHYGEOM: 21258 case DKIOCG_VIRTGEOM: 21259 /* let cmlb handle it */ 21260 goto skip_ready_valid; 21261 21262 case CDROMPAUSE: 21263 case CDROMRESUME: 21264 case CDROMPLAYMSF: 21265 case CDROMPLAYTRKIND: 21266 case CDROMREADTOCHDR: 21267 case CDROMREADTOCENTRY: 21268 case CDROMSTOP: 21269 case CDROMSTART: 21270 case CDROMVOLCTRL: 21271 case CDROMSUBCHNL: 21272 case CDROMREADMODE2: 21273 case CDROMREADMODE1: 21274 case CDROMREADOFFSET: 21275 case CDROMSBLKMODE: 21276 case CDROMGBLKMODE: 21277 case CDROMGDRVSPEED: 21278 case CDROMSDRVSPEED: 21279 case CDROMCDDA: 21280 case CDROMCDXA: 21281 case CDROMSUBCODE: 21282 if (!ISCD(un)) { 21283 un->un_ncmds_in_driver--; 21284 ASSERT(un->un_ncmds_in_driver >= 0); 21285 mutex_exit(SD_MUTEX(un)); 21286 err = ENOTTY; 21287 goto done_without_assess; 21288 } 21289 break; 21290 case FDEJECT: 21291 case DKIOCEJECT: 21292 case CDROMEJECT: 21293 if (!un->un_f_eject_media_supported) { 21294 un->un_ncmds_in_driver--; 21295 ASSERT(un->un_ncmds_in_driver >= 0); 21296 mutex_exit(SD_MUTEX(un)); 21297 err = ENOTTY; 21298 goto done_without_assess; 21299 } 21300 break; 21301 case DKIOCFLUSHWRITECACHE: 21302 mutex_exit(SD_MUTEX(un)); 21303 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21304 if (err != 0) { 21305 mutex_enter(SD_MUTEX(un)); 21306 un->un_ncmds_in_driver--; 21307 ASSERT(un->un_ncmds_in_driver >= 0); 21308 mutex_exit(SD_MUTEX(un)); 21309 err = EIO; 21310 goto done_quick_assess; 21311 } 21312 mutex_enter(SD_MUTEX(un)); 21313 /* FALLTHROUGH */ 21314 case DKIOCREMOVABLE: 21315 case DKIOCHOTPLUGGABLE: 21316 case DKIOCINFO: 21317 case DKIOCGMEDIAINFO: 21318 case MHIOCENFAILFAST: 21319 case MHIOCSTATUS: 21320 case MHIOCTKOWN: 21321 case MHIOCRELEASE: 21322 case MHIOCGRP_INKEYS: 21323 case MHIOCGRP_INRESV: 21324 case MHIOCGRP_REGISTER: 21325 case MHIOCGRP_RESERVE: 21326 case MHIOCGRP_PREEMPTANDABORT: 21327 case MHIOCGRP_REGISTERANDIGNOREKEY: 21328 case CDROMCLOSETRAY: 21329 case USCSICMD: 21330 goto skip_ready_valid; 21331 default: 21332 break; 21333 } 21334 21335 mutex_exit(SD_MUTEX(un)); 21336 err = sd_ready_and_valid(ssc, SDPART(dev)); 21337 mutex_enter(SD_MUTEX(un)); 21338 21339 if (err != SD_READY_VALID) { 21340 switch (cmd) { 21341 case DKIOCSTATE: 21342 case CDROMGDRVSPEED: 21343 case CDROMSDRVSPEED: 21344 case FDEJECT: /* for eject command */ 21345 case DKIOCEJECT: 21346 case CDROMEJECT: 21347 case DKIOCREMOVABLE: 21348 case DKIOCHOTPLUGGABLE: 21349 break; 21350 default: 21351 if (un->un_f_has_removable_media) { 21352 err = ENXIO; 21353 } else { 21354 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21355 if (err == SD_RESERVED_BY_OTHERS) { 21356 err = EACCES; 21357 } else { 21358 err = EIO; 21359 } 21360 } 21361 un->un_ncmds_in_driver--; 21362 ASSERT(un->un_ncmds_in_driver >= 0); 21363 mutex_exit(SD_MUTEX(un)); 21364 21365 goto done_without_assess; 21366 } 21367 } 21368 } 21369 21370 skip_ready_valid: 21371 mutex_exit(SD_MUTEX(un)); 21372 21373 switch (cmd) { 21374 case DKIOCINFO: 21375 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21376 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21377 break; 21378 21379 case DKIOCGMEDIAINFO: 21380 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21381 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21382 break; 21383 21384 case DKIOCGGEOM: 21385 case DKIOCGVTOC: 21386 case DKIOCGEXTVTOC: 21387 case DKIOCGAPART: 21388 case DKIOCPARTINFO: 21389 case DKIOCEXTPARTINFO: 21390 case DKIOCSGEOM: 21391 case DKIOCSAPART: 21392 case DKIOCGETEFI: 21393 case DKIOCPARTITION: 21394 case DKIOCSVTOC: 21395 case DKIOCSEXTVTOC: 21396 case DKIOCSETEFI: 21397 case DKIOCGMBOOT: 21398 case DKIOCSMBOOT: 21399 case DKIOCG_PHYGEOM: 21400 case DKIOCG_VIRTGEOM: 21401 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21402 21403 /* TUR should spin up */ 21404 21405 if (un->un_f_has_removable_media) 21406 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21407 SD_CHECK_FOR_MEDIA); 21408 21409 else 21410 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21411 21412 if (err != 0) 21413 goto done_with_assess; 21414 21415 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21416 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21417 21418 if ((err == 0) && 21419 ((cmd == DKIOCSETEFI) || 21420 (un->un_f_pkstats_enabled) && 21421 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 21422 21423 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21424 (void *)SD_PATH_DIRECT); 21425 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21426 sd_set_pstats(un); 21427 SD_TRACE(SD_LOG_IO_PARTITION, un, 21428 "sd_ioctl: un:0x%p pstats created and " 21429 "set\n", un); 21430 } 21431 } 21432 21433 if ((cmd == DKIOCSVTOC) || 21434 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21435 21436 mutex_enter(SD_MUTEX(un)); 21437 if (un->un_f_devid_supported && 21438 (un->un_f_opt_fab_devid == TRUE)) { 21439 if (un->un_devid == NULL) { 21440 sd_register_devid(ssc, SD_DEVINFO(un), 21441 SD_TARGET_IS_UNRESERVED); 21442 } else { 21443 /* 21444 * The device id for this disk 21445 * has been fabricated. The 21446 * device id must be preserved 21447 * by writing it back out to 21448 * disk. 21449 */ 21450 if (sd_write_deviceid(ssc) != 0) { 21451 ddi_devid_free(un->un_devid); 21452 un->un_devid = NULL; 21453 } 21454 } 21455 } 21456 mutex_exit(SD_MUTEX(un)); 21457 } 21458 21459 break; 21460 21461 case DKIOCLOCK: 21462 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21463 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21464 SD_PATH_STANDARD); 21465 goto done_with_assess; 21466 21467 case DKIOCUNLOCK: 21468 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21469 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21470 SD_PATH_STANDARD); 21471 goto done_with_assess; 21472 21473 case DKIOCSTATE: { 21474 enum dkio_state state; 21475 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21476 21477 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21478 err = EFAULT; 21479 } else { 21480 err = sd_check_media(dev, state); 21481 if (err == 0) { 21482 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21483 sizeof (int), flag) != 0) 21484 err = EFAULT; 21485 } 21486 } 21487 break; 21488 } 21489 21490 case DKIOCREMOVABLE: 21491 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21492 i = un->un_f_has_removable_media ? 1 : 0; 21493 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21494 err = EFAULT; 21495 } else { 21496 err = 0; 21497 } 21498 break; 21499 21500 case DKIOCHOTPLUGGABLE: 21501 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21502 i = un->un_f_is_hotpluggable ? 1 : 0; 21503 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21504 err = EFAULT; 21505 } else { 21506 err = 0; 21507 } 21508 break; 21509 21510 case DKIOCGTEMPERATURE: 21511 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21512 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21513 break; 21514 21515 case MHIOCENFAILFAST: 21516 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21517 if ((err = drv_priv(cred_p)) == 0) { 21518 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21519 } 21520 break; 21521 21522 case MHIOCTKOWN: 21523 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21524 if ((err = drv_priv(cred_p)) == 0) { 21525 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21526 } 21527 break; 21528 21529 case MHIOCRELEASE: 21530 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21531 if ((err = drv_priv(cred_p)) == 0) { 21532 err = sd_mhdioc_release(dev); 21533 } 21534 break; 21535 21536 case MHIOCSTATUS: 21537 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21538 if ((err = drv_priv(cred_p)) == 0) { 21539 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21540 case 0: 21541 err = 0; 21542 break; 21543 case EACCES: 21544 *rval_p = 1; 21545 err = 0; 21546 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21547 break; 21548 default: 21549 err = EIO; 21550 goto done_with_assess; 21551 } 21552 } 21553 break; 21554 21555 case MHIOCQRESERVE: 21556 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21557 if ((err = drv_priv(cred_p)) == 0) { 21558 err = sd_reserve_release(dev, SD_RESERVE); 21559 } 21560 break; 21561 21562 case MHIOCREREGISTERDEVID: 21563 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21564 if (drv_priv(cred_p) == EPERM) { 21565 err = EPERM; 21566 } else if (!un->un_f_devid_supported) { 21567 err = ENOTTY; 21568 } else { 21569 err = sd_mhdioc_register_devid(dev); 21570 } 21571 break; 21572 21573 case MHIOCGRP_INKEYS: 21574 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21575 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21576 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21577 err = ENOTSUP; 21578 } else { 21579 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21580 flag); 21581 } 21582 } 21583 break; 21584 21585 case MHIOCGRP_INRESV: 21586 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21587 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21588 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21589 err = ENOTSUP; 21590 } else { 21591 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21592 } 21593 } 21594 break; 21595 21596 case MHIOCGRP_REGISTER: 21597 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21598 if ((err = drv_priv(cred_p)) != EPERM) { 21599 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21600 err = ENOTSUP; 21601 } else if (arg != NULL) { 21602 mhioc_register_t reg; 21603 if (ddi_copyin((void *)arg, ®, 21604 sizeof (mhioc_register_t), flag) != 0) { 21605 err = EFAULT; 21606 } else { 21607 err = 21608 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21609 ssc, SD_SCSI3_REGISTER, 21610 (uchar_t *)®); 21611 if (err != 0) 21612 goto done_with_assess; 21613 } 21614 } 21615 } 21616 break; 21617 21618 case MHIOCGRP_RESERVE: 21619 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21620 if ((err = drv_priv(cred_p)) != EPERM) { 21621 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21622 err = ENOTSUP; 21623 } else if (arg != NULL) { 21624 mhioc_resv_desc_t resv_desc; 21625 if (ddi_copyin((void *)arg, &resv_desc, 21626 sizeof (mhioc_resv_desc_t), flag) != 0) { 21627 err = EFAULT; 21628 } else { 21629 err = 21630 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21631 ssc, SD_SCSI3_RESERVE, 21632 (uchar_t *)&resv_desc); 21633 if (err != 0) 21634 goto done_with_assess; 21635 } 21636 } 21637 } 21638 break; 21639 21640 case MHIOCGRP_PREEMPTANDABORT: 21641 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21642 if ((err = drv_priv(cred_p)) != EPERM) { 21643 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21644 err = ENOTSUP; 21645 } else if (arg != NULL) { 21646 mhioc_preemptandabort_t preempt_abort; 21647 if (ddi_copyin((void *)arg, &preempt_abort, 21648 sizeof (mhioc_preemptandabort_t), 21649 flag) != 0) { 21650 err = EFAULT; 21651 } else { 21652 err = 21653 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21654 ssc, SD_SCSI3_PREEMPTANDABORT, 21655 (uchar_t *)&preempt_abort); 21656 if (err != 0) 21657 goto done_with_assess; 21658 } 21659 } 21660 } 21661 break; 21662 21663 case MHIOCGRP_REGISTERANDIGNOREKEY: 21664 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21665 if ((err = drv_priv(cred_p)) != EPERM) { 21666 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21667 err = ENOTSUP; 21668 } else if (arg != NULL) { 21669 mhioc_registerandignorekey_t r_and_i; 21670 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21671 sizeof (mhioc_registerandignorekey_t), 21672 flag) != 0) { 21673 err = EFAULT; 21674 } else { 21675 err = 21676 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21677 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21678 (uchar_t *)&r_and_i); 21679 if (err != 0) 21680 goto done_with_assess; 21681 } 21682 } 21683 } 21684 break; 21685 21686 case USCSICMD: 21687 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21688 cr = ddi_get_cred(); 21689 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21690 err = EPERM; 21691 } else { 21692 enum uio_seg uioseg; 21693 21694 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21695 UIO_USERSPACE; 21696 if (un->un_f_format_in_progress == TRUE) { 21697 err = EAGAIN; 21698 break; 21699 } 21700 21701 err = sd_ssc_send(ssc, 21702 (struct uscsi_cmd *)arg, 21703 flag, uioseg, SD_PATH_STANDARD); 21704 if (err != 0) 21705 goto done_with_assess; 21706 else 21707 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21708 } 21709 break; 21710 21711 case CDROMPAUSE: 21712 case CDROMRESUME: 21713 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21714 if (!ISCD(un)) { 21715 err = ENOTTY; 21716 } else { 21717 err = sr_pause_resume(dev, cmd); 21718 } 21719 break; 21720 21721 case CDROMPLAYMSF: 21722 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21723 if (!ISCD(un)) { 21724 err = ENOTTY; 21725 } else { 21726 err = sr_play_msf(dev, (caddr_t)arg, flag); 21727 } 21728 break; 21729 21730 case CDROMPLAYTRKIND: 21731 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21732 #if defined(__i386) || defined(__amd64) 21733 /* 21734 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21735 */ 21736 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21737 #else 21738 if (!ISCD(un)) { 21739 #endif 21740 err = ENOTTY; 21741 } else { 21742 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21743 } 21744 break; 21745 21746 case CDROMREADTOCHDR: 21747 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21748 if (!ISCD(un)) { 21749 err = ENOTTY; 21750 } else { 21751 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21752 } 21753 break; 21754 21755 case CDROMREADTOCENTRY: 21756 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21757 if (!ISCD(un)) { 21758 err = ENOTTY; 21759 } else { 21760 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21761 } 21762 break; 21763 21764 case CDROMSTOP: 21765 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21766 if (!ISCD(un)) { 21767 err = ENOTTY; 21768 } else { 21769 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21770 SD_PATH_STANDARD); 21771 goto done_with_assess; 21772 } 21773 break; 21774 21775 case CDROMSTART: 21776 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21777 if (!ISCD(un)) { 21778 err = ENOTTY; 21779 } else { 21780 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21781 SD_PATH_STANDARD); 21782 goto done_with_assess; 21783 } 21784 break; 21785 21786 case CDROMCLOSETRAY: 21787 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21788 if (!ISCD(un)) { 21789 err = ENOTTY; 21790 } else { 21791 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21792 SD_PATH_STANDARD); 21793 goto done_with_assess; 21794 } 21795 break; 21796 21797 case FDEJECT: /* for eject command */ 21798 case DKIOCEJECT: 21799 case CDROMEJECT: 21800 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21801 if (!un->un_f_eject_media_supported) { 21802 err = ENOTTY; 21803 } else { 21804 err = sr_eject(dev); 21805 } 21806 break; 21807 21808 case CDROMVOLCTRL: 21809 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21810 if (!ISCD(un)) { 21811 err = ENOTTY; 21812 } else { 21813 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21814 } 21815 break; 21816 21817 case CDROMSUBCHNL: 21818 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21819 if (!ISCD(un)) { 21820 err = ENOTTY; 21821 } else { 21822 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21823 } 21824 break; 21825 21826 case CDROMREADMODE2: 21827 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21828 if (!ISCD(un)) { 21829 err = ENOTTY; 21830 } else if (un->un_f_cfg_is_atapi == TRUE) { 21831 /* 21832 * If the drive supports READ CD, use that instead of 21833 * switching the LBA size via a MODE SELECT 21834 * Block Descriptor 21835 */ 21836 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21837 } else { 21838 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21839 } 21840 break; 21841 21842 case CDROMREADMODE1: 21843 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21844 if (!ISCD(un)) { 21845 err = ENOTTY; 21846 } else { 21847 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21848 } 21849 break; 21850 21851 case CDROMREADOFFSET: 21852 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21853 if (!ISCD(un)) { 21854 err = ENOTTY; 21855 } else { 21856 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21857 flag); 21858 } 21859 break; 21860 21861 case CDROMSBLKMODE: 21862 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21863 /* 21864 * There is no means of changing block size in case of atapi 21865 * drives, thus return ENOTTY if drive type is atapi 21866 */ 21867 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21868 err = ENOTTY; 21869 } else if (un->un_f_mmc_cap == TRUE) { 21870 21871 /* 21872 * MMC Devices do not support changing the 21873 * logical block size 21874 * 21875 * Note: EINVAL is being returned instead of ENOTTY to 21876 * maintain consistancy with the original mmc 21877 * driver update. 21878 */ 21879 err = EINVAL; 21880 } else { 21881 mutex_enter(SD_MUTEX(un)); 21882 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21883 (un->un_ncmds_in_transport > 0)) { 21884 mutex_exit(SD_MUTEX(un)); 21885 err = EINVAL; 21886 } else { 21887 mutex_exit(SD_MUTEX(un)); 21888 err = sr_change_blkmode(dev, cmd, arg, flag); 21889 } 21890 } 21891 break; 21892 21893 case CDROMGBLKMODE: 21894 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21895 if (!ISCD(un)) { 21896 err = ENOTTY; 21897 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21898 (un->un_f_blockcount_is_valid != FALSE)) { 21899 /* 21900 * Drive is an ATAPI drive so return target block 21901 * size for ATAPI drives since we cannot change the 21902 * blocksize on ATAPI drives. Used primarily to detect 21903 * if an ATAPI cdrom is present. 21904 */ 21905 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21906 sizeof (int), flag) != 0) { 21907 err = EFAULT; 21908 } else { 21909 err = 0; 21910 } 21911 21912 } else { 21913 /* 21914 * Drive supports changing block sizes via a Mode 21915 * Select. 21916 */ 21917 err = sr_change_blkmode(dev, cmd, arg, flag); 21918 } 21919 break; 21920 21921 case CDROMGDRVSPEED: 21922 case CDROMSDRVSPEED: 21923 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21924 if (!ISCD(un)) { 21925 err = ENOTTY; 21926 } else if (un->un_f_mmc_cap == TRUE) { 21927 /* 21928 * Note: In the future the driver implementation 21929 * for getting and 21930 * setting cd speed should entail: 21931 * 1) If non-mmc try the Toshiba mode page 21932 * (sr_change_speed) 21933 * 2) If mmc but no support for Real Time Streaming try 21934 * the SET CD SPEED (0xBB) command 21935 * (sr_atapi_change_speed) 21936 * 3) If mmc and support for Real Time Streaming 21937 * try the GET PERFORMANCE and SET STREAMING 21938 * commands (not yet implemented, 4380808) 21939 */ 21940 /* 21941 * As per recent MMC spec, CD-ROM speed is variable 21942 * and changes with LBA. Since there is no such 21943 * things as drive speed now, fail this ioctl. 21944 * 21945 * Note: EINVAL is returned for consistancy of original 21946 * implementation which included support for getting 21947 * the drive speed of mmc devices but not setting 21948 * the drive speed. Thus EINVAL would be returned 21949 * if a set request was made for an mmc device. 21950 * We no longer support get or set speed for 21951 * mmc but need to remain consistent with regard 21952 * to the error code returned. 21953 */ 21954 err = EINVAL; 21955 } else if (un->un_f_cfg_is_atapi == TRUE) { 21956 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21957 } else { 21958 err = sr_change_speed(dev, cmd, arg, flag); 21959 } 21960 break; 21961 21962 case CDROMCDDA: 21963 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21964 if (!ISCD(un)) { 21965 err = ENOTTY; 21966 } else { 21967 err = sr_read_cdda(dev, (void *)arg, flag); 21968 } 21969 break; 21970 21971 case CDROMCDXA: 21972 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21973 if (!ISCD(un)) { 21974 err = ENOTTY; 21975 } else { 21976 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21977 } 21978 break; 21979 21980 case CDROMSUBCODE: 21981 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21982 if (!ISCD(un)) { 21983 err = ENOTTY; 21984 } else { 21985 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21986 } 21987 break; 21988 21989 21990 #ifdef SDDEBUG 21991 /* RESET/ABORTS testing ioctls */ 21992 case DKIOCRESET: { 21993 int reset_level; 21994 21995 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21996 err = EFAULT; 21997 } else { 21998 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21999 "reset_level = 0x%lx\n", reset_level); 22000 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22001 err = 0; 22002 } else { 22003 err = EIO; 22004 } 22005 } 22006 break; 22007 } 22008 22009 case DKIOCABORT: 22010 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22011 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22012 err = 0; 22013 } else { 22014 err = EIO; 22015 } 22016 break; 22017 #endif 22018 22019 #ifdef SD_FAULT_INJECTION 22020 /* SDIOC FaultInjection testing ioctls */ 22021 case SDIOCSTART: 22022 case SDIOCSTOP: 22023 case SDIOCINSERTPKT: 22024 case SDIOCINSERTXB: 22025 case SDIOCINSERTUN: 22026 case SDIOCINSERTARQ: 22027 case SDIOCPUSH: 22028 case SDIOCRETRIEVE: 22029 case SDIOCRUN: 22030 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22031 "SDIOC detected cmd:0x%X:\n", cmd); 22032 /* call error generator */ 22033 sd_faultinjection_ioctl(cmd, arg, un); 22034 err = 0; 22035 break; 22036 22037 #endif /* SD_FAULT_INJECTION */ 22038 22039 case DKIOCFLUSHWRITECACHE: 22040 { 22041 struct dk_callback *dkc = (struct dk_callback *)arg; 22042 22043 mutex_enter(SD_MUTEX(un)); 22044 if (!un->un_f_sync_cache_supported || 22045 !un->un_f_write_cache_enabled) { 22046 err = un->un_f_sync_cache_supported ? 22047 0 : ENOTSUP; 22048 mutex_exit(SD_MUTEX(un)); 22049 if ((flag & FKIOCTL) && dkc != NULL && 22050 dkc->dkc_callback != NULL) { 22051 (*dkc->dkc_callback)(dkc->dkc_cookie, 22052 err); 22053 /* 22054 * Did callback and reported error. 22055 * Since we did a callback, ioctl 22056 * should return 0. 22057 */ 22058 err = 0; 22059 } 22060 break; 22061 } 22062 mutex_exit(SD_MUTEX(un)); 22063 22064 if ((flag & FKIOCTL) && dkc != NULL && 22065 dkc->dkc_callback != NULL) { 22066 /* async SYNC CACHE request */ 22067 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22068 } else { 22069 /* synchronous SYNC CACHE request */ 22070 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22071 } 22072 } 22073 break; 22074 22075 case DKIOCGETWCE: { 22076 22077 int wce; 22078 22079 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22080 break; 22081 } 22082 22083 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22084 err = EFAULT; 22085 } 22086 break; 22087 } 22088 22089 case DKIOCSETWCE: { 22090 22091 int wce, sync_supported; 22092 22093 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22094 err = EFAULT; 22095 break; 22096 } 22097 22098 /* 22099 * Synchronize multiple threads trying to enable 22100 * or disable the cache via the un_f_wcc_cv 22101 * condition variable. 22102 */ 22103 mutex_enter(SD_MUTEX(un)); 22104 22105 /* 22106 * Don't allow the cache to be enabled if the 22107 * config file has it disabled. 22108 */ 22109 if (un->un_f_opt_disable_cache && wce) { 22110 mutex_exit(SD_MUTEX(un)); 22111 err = EINVAL; 22112 break; 22113 } 22114 22115 /* 22116 * Wait for write cache change in progress 22117 * bit to be clear before proceeding. 22118 */ 22119 while (un->un_f_wcc_inprog) 22120 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22121 22122 un->un_f_wcc_inprog = 1; 22123 22124 if (un->un_f_write_cache_enabled && wce == 0) { 22125 /* 22126 * Disable the write cache. Don't clear 22127 * un_f_write_cache_enabled until after 22128 * the mode select and flush are complete. 22129 */ 22130 sync_supported = un->un_f_sync_cache_supported; 22131 22132 /* 22133 * If cache flush is suppressed, we assume that the 22134 * controller firmware will take care of managing the 22135 * write cache for us: no need to explicitly 22136 * disable it. 22137 */ 22138 if (!un->un_f_suppress_cache_flush) { 22139 mutex_exit(SD_MUTEX(un)); 22140 if ((err = sd_cache_control(ssc, 22141 SD_CACHE_NOCHANGE, 22142 SD_CACHE_DISABLE)) == 0 && 22143 sync_supported) { 22144 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22145 NULL); 22146 } 22147 } else { 22148 mutex_exit(SD_MUTEX(un)); 22149 } 22150 22151 mutex_enter(SD_MUTEX(un)); 22152 if (err == 0) { 22153 un->un_f_write_cache_enabled = 0; 22154 } 22155 22156 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22157 /* 22158 * Set un_f_write_cache_enabled first, so there is 22159 * no window where the cache is enabled, but the 22160 * bit says it isn't. 22161 */ 22162 un->un_f_write_cache_enabled = 1; 22163 22164 /* 22165 * If cache flush is suppressed, we assume that the 22166 * controller firmware will take care of managing the 22167 * write cache for us: no need to explicitly 22168 * enable it. 22169 */ 22170 if (!un->un_f_suppress_cache_flush) { 22171 mutex_exit(SD_MUTEX(un)); 22172 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22173 SD_CACHE_ENABLE); 22174 } else { 22175 mutex_exit(SD_MUTEX(un)); 22176 } 22177 22178 mutex_enter(SD_MUTEX(un)); 22179 22180 if (err) { 22181 un->un_f_write_cache_enabled = 0; 22182 } 22183 } 22184 22185 un->un_f_wcc_inprog = 0; 22186 cv_broadcast(&un->un_wcc_cv); 22187 mutex_exit(SD_MUTEX(un)); 22188 break; 22189 } 22190 22191 default: 22192 err = ENOTTY; 22193 break; 22194 } 22195 mutex_enter(SD_MUTEX(un)); 22196 un->un_ncmds_in_driver--; 22197 ASSERT(un->un_ncmds_in_driver >= 0); 22198 mutex_exit(SD_MUTEX(un)); 22199 22200 22201 done_without_assess: 22202 sd_ssc_fini(ssc); 22203 22204 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22205 return (err); 22206 22207 done_with_assess: 22208 mutex_enter(SD_MUTEX(un)); 22209 un->un_ncmds_in_driver--; 22210 ASSERT(un->un_ncmds_in_driver >= 0); 22211 mutex_exit(SD_MUTEX(un)); 22212 22213 done_quick_assess: 22214 if (err != 0) 22215 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22216 /* Uninitialize sd_ssc_t pointer */ 22217 sd_ssc_fini(ssc); 22218 22219 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22220 return (err); 22221 } 22222 22223 22224 /* 22225 * Function: sd_dkio_ctrl_info 22226 * 22227 * Description: This routine is the driver entry point for handling controller 22228 * information ioctl requests (DKIOCINFO). 22229 * 22230 * Arguments: dev - the device number 22231 * arg - pointer to user provided dk_cinfo structure 22232 * specifying the controller type and attributes. 22233 * flag - this argument is a pass through to ddi_copyxxx() 22234 * directly from the mode argument of ioctl(). 22235 * 22236 * Return Code: 0 22237 * EFAULT 22238 * ENXIO 22239 */ 22240 22241 static int 22242 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22243 { 22244 struct sd_lun *un = NULL; 22245 struct dk_cinfo *info; 22246 dev_info_t *pdip; 22247 int lun, tgt; 22248 22249 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22250 return (ENXIO); 22251 } 22252 22253 info = (struct dk_cinfo *) 22254 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22255 22256 switch (un->un_ctype) { 22257 case CTYPE_CDROM: 22258 info->dki_ctype = DKC_CDROM; 22259 break; 22260 default: 22261 info->dki_ctype = DKC_SCSI_CCS; 22262 break; 22263 } 22264 pdip = ddi_get_parent(SD_DEVINFO(un)); 22265 info->dki_cnum = ddi_get_instance(pdip); 22266 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22267 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22268 } else { 22269 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22270 DK_DEVLEN - 1); 22271 } 22272 22273 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22274 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22275 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22276 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22277 22278 /* Unit Information */ 22279 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22280 info->dki_slave = ((tgt << 3) | lun); 22281 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22282 DK_DEVLEN - 1); 22283 info->dki_flags = DKI_FMTVOL; 22284 info->dki_partition = SDPART(dev); 22285 22286 /* Max Transfer size of this device in blocks */ 22287 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22288 info->dki_addr = 0; 22289 info->dki_space = 0; 22290 info->dki_prio = 0; 22291 info->dki_vec = 0; 22292 22293 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22294 kmem_free(info, sizeof (struct dk_cinfo)); 22295 return (EFAULT); 22296 } else { 22297 kmem_free(info, sizeof (struct dk_cinfo)); 22298 return (0); 22299 } 22300 } 22301 22302 22303 /* 22304 * Function: sd_get_media_info 22305 * 22306 * Description: This routine is the driver entry point for handling ioctl 22307 * requests for the media type or command set profile used by the 22308 * drive to operate on the media (DKIOCGMEDIAINFO). 22309 * 22310 * Arguments: dev - the device number 22311 * arg - pointer to user provided dk_minfo structure 22312 * specifying the media type, logical block size and 22313 * drive capacity. 22314 * flag - this argument is a pass through to ddi_copyxxx() 22315 * directly from the mode argument of ioctl(). 22316 * 22317 * Return Code: 0 22318 * EACCESS 22319 * EFAULT 22320 * ENXIO 22321 * EIO 22322 */ 22323 22324 static int 22325 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22326 { 22327 struct sd_lun *un = NULL; 22328 struct uscsi_cmd com; 22329 struct scsi_inquiry *sinq; 22330 struct dk_minfo media_info; 22331 u_longlong_t media_capacity; 22332 uint64_t capacity; 22333 uint_t lbasize; 22334 uchar_t *out_data; 22335 uchar_t *rqbuf; 22336 int rval = 0; 22337 int rtn; 22338 sd_ssc_t *ssc; 22339 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22340 (un->un_state == SD_STATE_OFFLINE)) { 22341 return (ENXIO); 22342 } 22343 22344 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22345 22346 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22347 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22348 22349 /* Issue a TUR to determine if the drive is ready with media present */ 22350 ssc = sd_ssc_init(un); 22351 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22352 if (rval == ENXIO) { 22353 goto done; 22354 } else if (rval != 0) { 22355 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22356 } 22357 22358 /* Now get configuration data */ 22359 if (ISCD(un)) { 22360 media_info.dki_media_type = DK_CDROM; 22361 22362 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22363 if (un->un_f_mmc_cap == TRUE) { 22364 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22365 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22366 SD_PATH_STANDARD); 22367 22368 if (rtn) { 22369 /* 22370 * We ignore all failures for CD and need to 22371 * put the assessment before processing code 22372 * to avoid missing assessment for FMA. 22373 */ 22374 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22375 /* 22376 * Failed for other than an illegal request 22377 * or command not supported 22378 */ 22379 if ((com.uscsi_status == STATUS_CHECK) && 22380 (com.uscsi_rqstatus == STATUS_GOOD)) { 22381 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22382 (rqbuf[12] != 0x20)) { 22383 rval = EIO; 22384 goto no_assessment; 22385 } 22386 } 22387 } else { 22388 /* 22389 * The GET CONFIGURATION command succeeded 22390 * so set the media type according to the 22391 * returned data 22392 */ 22393 media_info.dki_media_type = out_data[6]; 22394 media_info.dki_media_type <<= 8; 22395 media_info.dki_media_type |= out_data[7]; 22396 } 22397 } 22398 } else { 22399 /* 22400 * The profile list is not available, so we attempt to identify 22401 * the media type based on the inquiry data 22402 */ 22403 sinq = un->un_sd->sd_inq; 22404 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22405 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22406 /* This is a direct access device or optical disk */ 22407 media_info.dki_media_type = DK_FIXED_DISK; 22408 22409 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22410 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22411 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22412 media_info.dki_media_type = DK_ZIP; 22413 } else if ( 22414 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22415 media_info.dki_media_type = DK_JAZ; 22416 } 22417 } 22418 } else { 22419 /* 22420 * Not a CD, direct access or optical disk so return 22421 * unknown media 22422 */ 22423 media_info.dki_media_type = DK_UNKNOWN; 22424 } 22425 } 22426 22427 /* Now read the capacity so we can provide the lbasize and capacity */ 22428 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22429 SD_PATH_DIRECT); 22430 switch (rval) { 22431 case 0: 22432 break; 22433 case EACCES: 22434 rval = EACCES; 22435 goto done; 22436 default: 22437 rval = EIO; 22438 goto done; 22439 } 22440 22441 /* 22442 * If lun is expanded dynamically, update the un structure. 22443 */ 22444 mutex_enter(SD_MUTEX(un)); 22445 if ((un->un_f_blockcount_is_valid == TRUE) && 22446 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22447 (capacity > un->un_blockcount)) { 22448 sd_update_block_info(un, lbasize, capacity); 22449 } 22450 mutex_exit(SD_MUTEX(un)); 22451 22452 media_info.dki_lbsize = lbasize; 22453 media_capacity = capacity; 22454 22455 /* 22456 * sd_send_scsi_READ_CAPACITY() reports capacity in 22457 * un->un_sys_blocksize chunks. So we need to convert it into 22458 * cap.lbasize chunks. 22459 */ 22460 media_capacity *= un->un_sys_blocksize; 22461 media_capacity /= lbasize; 22462 media_info.dki_capacity = media_capacity; 22463 22464 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22465 rval = EFAULT; 22466 /* Put goto. Anybody might add some code below in future */ 22467 goto no_assessment; 22468 } 22469 done: 22470 if (rval != 0) { 22471 if (rval == EIO) 22472 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22473 else 22474 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22475 } 22476 no_assessment: 22477 sd_ssc_fini(ssc); 22478 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22479 kmem_free(rqbuf, SENSE_LENGTH); 22480 return (rval); 22481 } 22482 22483 22484 /* 22485 * Function: sd_check_media 22486 * 22487 * Description: This utility routine implements the functionality for the 22488 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22489 * driver state changes from that specified by the user 22490 * (inserted or ejected). For example, if the user specifies 22491 * DKIO_EJECTED and the current media state is inserted this 22492 * routine will immediately return DKIO_INSERTED. However, if the 22493 * current media state is not inserted the user thread will be 22494 * blocked until the drive state changes. If DKIO_NONE is specified 22495 * the user thread will block until a drive state change occurs. 22496 * 22497 * Arguments: dev - the device number 22498 * state - user pointer to a dkio_state, updated with the current 22499 * drive state at return. 22500 * 22501 * Return Code: ENXIO 22502 * EIO 22503 * EAGAIN 22504 * EINTR 22505 */ 22506 22507 static int 22508 sd_check_media(dev_t dev, enum dkio_state state) 22509 { 22510 struct sd_lun *un = NULL; 22511 enum dkio_state prev_state; 22512 opaque_t token = NULL; 22513 int rval = 0; 22514 sd_ssc_t *ssc; 22515 22516 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22517 return (ENXIO); 22518 } 22519 22520 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22521 22522 ssc = sd_ssc_init(un); 22523 22524 mutex_enter(SD_MUTEX(un)); 22525 22526 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22527 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22528 22529 prev_state = un->un_mediastate; 22530 22531 /* is there anything to do? */ 22532 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22533 /* 22534 * submit the request to the scsi_watch service; 22535 * scsi_media_watch_cb() does the real work 22536 */ 22537 mutex_exit(SD_MUTEX(un)); 22538 22539 /* 22540 * This change handles the case where a scsi watch request is 22541 * added to a device that is powered down. To accomplish this 22542 * we power up the device before adding the scsi watch request, 22543 * since the scsi watch sends a TUR directly to the device 22544 * which the device cannot handle if it is powered down. 22545 */ 22546 if (sd_pm_entry(un) != DDI_SUCCESS) { 22547 mutex_enter(SD_MUTEX(un)); 22548 goto done; 22549 } 22550 22551 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22552 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22553 (caddr_t)dev); 22554 22555 sd_pm_exit(un); 22556 22557 mutex_enter(SD_MUTEX(un)); 22558 if (token == NULL) { 22559 rval = EAGAIN; 22560 goto done; 22561 } 22562 22563 /* 22564 * This is a special case IOCTL that doesn't return 22565 * until the media state changes. Routine sdpower 22566 * knows about and handles this so don't count it 22567 * as an active cmd in the driver, which would 22568 * keep the device busy to the pm framework. 22569 * If the count isn't decremented the device can't 22570 * be powered down. 22571 */ 22572 un->un_ncmds_in_driver--; 22573 ASSERT(un->un_ncmds_in_driver >= 0); 22574 22575 /* 22576 * if a prior request had been made, this will be the same 22577 * token, as scsi_watch was designed that way. 22578 */ 22579 un->un_swr_token = token; 22580 un->un_specified_mediastate = state; 22581 22582 /* 22583 * now wait for media change 22584 * we will not be signalled unless mediastate == state but it is 22585 * still better to test for this condition, since there is a 22586 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22587 */ 22588 SD_TRACE(SD_LOG_COMMON, un, 22589 "sd_check_media: waiting for media state change\n"); 22590 while (un->un_mediastate == state) { 22591 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22592 SD_TRACE(SD_LOG_COMMON, un, 22593 "sd_check_media: waiting for media state " 22594 "was interrupted\n"); 22595 un->un_ncmds_in_driver++; 22596 rval = EINTR; 22597 goto done; 22598 } 22599 SD_TRACE(SD_LOG_COMMON, un, 22600 "sd_check_media: received signal, state=%x\n", 22601 un->un_mediastate); 22602 } 22603 /* 22604 * Inc the counter to indicate the device once again 22605 * has an active outstanding cmd. 22606 */ 22607 un->un_ncmds_in_driver++; 22608 } 22609 22610 /* invalidate geometry */ 22611 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22612 sr_ejected(un); 22613 } 22614 22615 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22616 uint64_t capacity; 22617 uint_t lbasize; 22618 22619 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22620 mutex_exit(SD_MUTEX(un)); 22621 /* 22622 * Since the following routines use SD_PATH_DIRECT, we must 22623 * call PM directly before the upcoming disk accesses. This 22624 * may cause the disk to be power/spin up. 22625 */ 22626 22627 if (sd_pm_entry(un) == DDI_SUCCESS) { 22628 rval = sd_send_scsi_READ_CAPACITY(ssc, 22629 &capacity, &lbasize, SD_PATH_DIRECT); 22630 if (rval != 0) { 22631 sd_pm_exit(un); 22632 if (rval == EIO) 22633 sd_ssc_assessment(ssc, 22634 SD_FMT_STATUS_CHECK); 22635 else 22636 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22637 mutex_enter(SD_MUTEX(un)); 22638 goto done; 22639 } 22640 } else { 22641 rval = EIO; 22642 mutex_enter(SD_MUTEX(un)); 22643 goto done; 22644 } 22645 mutex_enter(SD_MUTEX(un)); 22646 22647 sd_update_block_info(un, lbasize, capacity); 22648 22649 /* 22650 * Check if the media in the device is writable or not 22651 */ 22652 if (ISCD(un)) { 22653 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22654 } 22655 22656 mutex_exit(SD_MUTEX(un)); 22657 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22658 if ((cmlb_validate(un->un_cmlbhandle, 0, 22659 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22660 sd_set_pstats(un); 22661 SD_TRACE(SD_LOG_IO_PARTITION, un, 22662 "sd_check_media: un:0x%p pstats created and " 22663 "set\n", un); 22664 } 22665 22666 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22667 SD_PATH_DIRECT); 22668 22669 sd_pm_exit(un); 22670 22671 if (rval != 0) { 22672 if (rval == EIO) 22673 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22674 else 22675 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22676 } 22677 22678 mutex_enter(SD_MUTEX(un)); 22679 } 22680 done: 22681 sd_ssc_fini(ssc); 22682 un->un_f_watcht_stopped = FALSE; 22683 /* 22684 * Use of this local token and the mutex ensures that we avoid 22685 * some race conditions associated with terminating the 22686 * scsi watch. 22687 */ 22688 if (token) { 22689 un->un_swr_token = (opaque_t)NULL; 22690 mutex_exit(SD_MUTEX(un)); 22691 (void) scsi_watch_request_terminate(token, 22692 SCSI_WATCH_TERMINATE_WAIT); 22693 mutex_enter(SD_MUTEX(un)); 22694 } 22695 22696 /* 22697 * Update the capacity kstat value, if no media previously 22698 * (capacity kstat is 0) and a media has been inserted 22699 * (un_f_blockcount_is_valid == TRUE) 22700 */ 22701 if (un->un_errstats) { 22702 struct sd_errstats *stp = NULL; 22703 22704 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22705 if ((stp->sd_capacity.value.ui64 == 0) && 22706 (un->un_f_blockcount_is_valid == TRUE)) { 22707 stp->sd_capacity.value.ui64 = 22708 (uint64_t)((uint64_t)un->un_blockcount * 22709 un->un_sys_blocksize); 22710 } 22711 } 22712 mutex_exit(SD_MUTEX(un)); 22713 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22714 return (rval); 22715 } 22716 22717 22718 /* 22719 * Function: sd_delayed_cv_broadcast 22720 * 22721 * Description: Delayed cv_broadcast to allow for target to recover from media 22722 * insertion. 22723 * 22724 * Arguments: arg - driver soft state (unit) structure 22725 */ 22726 22727 static void 22728 sd_delayed_cv_broadcast(void *arg) 22729 { 22730 struct sd_lun *un = arg; 22731 22732 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22733 22734 mutex_enter(SD_MUTEX(un)); 22735 un->un_dcvb_timeid = NULL; 22736 cv_broadcast(&un->un_state_cv); 22737 mutex_exit(SD_MUTEX(un)); 22738 } 22739 22740 22741 /* 22742 * Function: sd_media_watch_cb 22743 * 22744 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22745 * routine processes the TUR sense data and updates the driver 22746 * state if a transition has occurred. The user thread 22747 * (sd_check_media) is then signalled. 22748 * 22749 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22750 * among multiple watches that share this callback function 22751 * resultp - scsi watch facility result packet containing scsi 22752 * packet, status byte and sense data 22753 * 22754 * Return Code: 0 for success, -1 for failure 22755 */ 22756 22757 static int 22758 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22759 { 22760 struct sd_lun *un; 22761 struct scsi_status *statusp = resultp->statusp; 22762 uint8_t *sensep = (uint8_t *)resultp->sensep; 22763 enum dkio_state state = DKIO_NONE; 22764 dev_t dev = (dev_t)arg; 22765 uchar_t actual_sense_length; 22766 uint8_t skey, asc, ascq; 22767 22768 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22769 return (-1); 22770 } 22771 actual_sense_length = resultp->actual_sense_length; 22772 22773 mutex_enter(SD_MUTEX(un)); 22774 SD_TRACE(SD_LOG_COMMON, un, 22775 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22776 *((char *)statusp), (void *)sensep, actual_sense_length); 22777 22778 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22779 un->un_mediastate = DKIO_DEV_GONE; 22780 cv_broadcast(&un->un_state_cv); 22781 mutex_exit(SD_MUTEX(un)); 22782 22783 return (0); 22784 } 22785 22786 /* 22787 * If there was a check condition then sensep points to valid sense data 22788 * If status was not a check condition but a reservation or busy status 22789 * then the new state is DKIO_NONE 22790 */ 22791 if (sensep != NULL) { 22792 skey = scsi_sense_key(sensep); 22793 asc = scsi_sense_asc(sensep); 22794 ascq = scsi_sense_ascq(sensep); 22795 22796 SD_INFO(SD_LOG_COMMON, un, 22797 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22798 skey, asc, ascq); 22799 /* This routine only uses up to 13 bytes of sense data. */ 22800 if (actual_sense_length >= 13) { 22801 if (skey == KEY_UNIT_ATTENTION) { 22802 if (asc == 0x28) { 22803 state = DKIO_INSERTED; 22804 } 22805 } else if (skey == KEY_NOT_READY) { 22806 /* 22807 * Sense data of 02/06/00 means that the 22808 * drive could not read the media (No 22809 * reference position found). In this case 22810 * to prevent a hang on the DKIOCSTATE IOCTL 22811 * we set the media state to DKIO_INSERTED. 22812 */ 22813 if (asc == 0x06 && ascq == 0x00) 22814 state = DKIO_INSERTED; 22815 22816 /* 22817 * if 02/04/02 means that the host 22818 * should send start command. Explicitly 22819 * leave the media state as is 22820 * (inserted) as the media is inserted 22821 * and host has stopped device for PM 22822 * reasons. Upon next true read/write 22823 * to this media will bring the 22824 * device to the right state good for 22825 * media access. 22826 */ 22827 if (asc == 0x3a) { 22828 state = DKIO_EJECTED; 22829 } else { 22830 /* 22831 * If the drive is busy with an 22832 * operation or long write, keep the 22833 * media in an inserted state. 22834 */ 22835 22836 if ((asc == 0x04) && 22837 ((ascq == 0x02) || 22838 (ascq == 0x07) || 22839 (ascq == 0x08))) { 22840 state = DKIO_INSERTED; 22841 } 22842 } 22843 } else if (skey == KEY_NO_SENSE) { 22844 if ((asc == 0x00) && (ascq == 0x00)) { 22845 /* 22846 * Sense Data 00/00/00 does not provide 22847 * any information about the state of 22848 * the media. Ignore it. 22849 */ 22850 mutex_exit(SD_MUTEX(un)); 22851 return (0); 22852 } 22853 } 22854 } 22855 } else if ((*((char *)statusp) == STATUS_GOOD) && 22856 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22857 state = DKIO_INSERTED; 22858 } 22859 22860 SD_TRACE(SD_LOG_COMMON, un, 22861 "sd_media_watch_cb: state=%x, specified=%x\n", 22862 state, un->un_specified_mediastate); 22863 22864 /* 22865 * now signal the waiting thread if this is *not* the specified state; 22866 * delay the signal if the state is DKIO_INSERTED to allow the target 22867 * to recover 22868 */ 22869 if (state != un->un_specified_mediastate) { 22870 un->un_mediastate = state; 22871 if (state == DKIO_INSERTED) { 22872 /* 22873 * delay the signal to give the drive a chance 22874 * to do what it apparently needs to do 22875 */ 22876 SD_TRACE(SD_LOG_COMMON, un, 22877 "sd_media_watch_cb: delayed cv_broadcast\n"); 22878 if (un->un_dcvb_timeid == NULL) { 22879 un->un_dcvb_timeid = 22880 timeout(sd_delayed_cv_broadcast, un, 22881 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 22882 } 22883 } else { 22884 SD_TRACE(SD_LOG_COMMON, un, 22885 "sd_media_watch_cb: immediate cv_broadcast\n"); 22886 cv_broadcast(&un->un_state_cv); 22887 } 22888 } 22889 mutex_exit(SD_MUTEX(un)); 22890 return (0); 22891 } 22892 22893 22894 /* 22895 * Function: sd_dkio_get_temp 22896 * 22897 * Description: This routine is the driver entry point for handling ioctl 22898 * requests to get the disk temperature. 22899 * 22900 * Arguments: dev - the device number 22901 * arg - pointer to user provided dk_temperature structure. 22902 * flag - this argument is a pass through to ddi_copyxxx() 22903 * directly from the mode argument of ioctl(). 22904 * 22905 * Return Code: 0 22906 * EFAULT 22907 * ENXIO 22908 * EAGAIN 22909 */ 22910 22911 static int 22912 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 22913 { 22914 struct sd_lun *un = NULL; 22915 struct dk_temperature *dktemp = NULL; 22916 uchar_t *temperature_page; 22917 int rval = 0; 22918 int path_flag = SD_PATH_STANDARD; 22919 sd_ssc_t *ssc; 22920 22921 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22922 return (ENXIO); 22923 } 22924 22925 ssc = sd_ssc_init(un); 22926 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 22927 22928 /* copyin the disk temp argument to get the user flags */ 22929 if (ddi_copyin((void *)arg, dktemp, 22930 sizeof (struct dk_temperature), flag) != 0) { 22931 rval = EFAULT; 22932 goto done; 22933 } 22934 22935 /* Initialize the temperature to invalid. */ 22936 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22937 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22938 22939 /* 22940 * Note: Investigate removing the "bypass pm" semantic. 22941 * Can we just bypass PM always? 22942 */ 22943 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 22944 path_flag = SD_PATH_DIRECT; 22945 ASSERT(!mutex_owned(&un->un_pm_mutex)); 22946 mutex_enter(&un->un_pm_mutex); 22947 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22948 /* 22949 * If DKT_BYPASS_PM is set, and the drive happens to be 22950 * in low power mode, we can not wake it up, Need to 22951 * return EAGAIN. 22952 */ 22953 mutex_exit(&un->un_pm_mutex); 22954 rval = EAGAIN; 22955 goto done; 22956 } else { 22957 /* 22958 * Indicate to PM the device is busy. This is required 22959 * to avoid a race - i.e. the ioctl is issuing a 22960 * command and the pm framework brings down the device 22961 * to low power mode (possible power cut-off on some 22962 * platforms). 22963 */ 22964 mutex_exit(&un->un_pm_mutex); 22965 if (sd_pm_entry(un) != DDI_SUCCESS) { 22966 rval = EAGAIN; 22967 goto done; 22968 } 22969 } 22970 } 22971 22972 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 22973 22974 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 22975 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 22976 if (rval != 0) 22977 goto done2; 22978 22979 /* 22980 * For the current temperature verify that the parameter length is 0x02 22981 * and the parameter code is 0x00 22982 */ 22983 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 22984 (temperature_page[5] == 0x00)) { 22985 if (temperature_page[9] == 0xFF) { 22986 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22987 } else { 22988 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 22989 } 22990 } 22991 22992 /* 22993 * For the reference temperature verify that the parameter 22994 * length is 0x02 and the parameter code is 0x01 22995 */ 22996 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 22997 (temperature_page[11] == 0x01)) { 22998 if (temperature_page[15] == 0xFF) { 22999 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23000 } else { 23001 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23002 } 23003 } 23004 23005 /* Do the copyout regardless of the temperature commands status. */ 23006 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23007 flag) != 0) { 23008 rval = EFAULT; 23009 goto done1; 23010 } 23011 23012 done2: 23013 if (rval != 0) { 23014 if (rval == EIO) 23015 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23016 else 23017 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23018 } 23019 done1: 23020 if (path_flag == SD_PATH_DIRECT) { 23021 sd_pm_exit(un); 23022 } 23023 23024 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23025 done: 23026 sd_ssc_fini(ssc); 23027 if (dktemp != NULL) { 23028 kmem_free(dktemp, sizeof (struct dk_temperature)); 23029 } 23030 23031 return (rval); 23032 } 23033 23034 23035 /* 23036 * Function: sd_log_page_supported 23037 * 23038 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23039 * supported log pages. 23040 * 23041 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23042 * structure for this target. 23043 * log_page - 23044 * 23045 * Return Code: -1 - on error (log sense is optional and may not be supported). 23046 * 0 - log page not found. 23047 * 1 - log page found. 23048 */ 23049 23050 static int 23051 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23052 { 23053 uchar_t *log_page_data; 23054 int i; 23055 int match = 0; 23056 int log_size; 23057 int status = 0; 23058 struct sd_lun *un; 23059 23060 ASSERT(ssc != NULL); 23061 un = ssc->ssc_un; 23062 ASSERT(un != NULL); 23063 23064 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23065 23066 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23067 SD_PATH_DIRECT); 23068 23069 if (status != 0) { 23070 if (status == EIO) { 23071 /* 23072 * Some disks do not support log sense, we 23073 * should ignore this kind of error(sense key is 23074 * 0x5 - illegal request). 23075 */ 23076 uint8_t *sensep; 23077 int senlen; 23078 23079 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23080 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23081 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23082 23083 if (senlen > 0 && 23084 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23085 sd_ssc_assessment(ssc, 23086 SD_FMT_IGNORE_COMPROMISE); 23087 } else { 23088 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23089 } 23090 } else { 23091 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23092 } 23093 23094 SD_ERROR(SD_LOG_COMMON, un, 23095 "sd_log_page_supported: failed log page retrieval\n"); 23096 kmem_free(log_page_data, 0xFF); 23097 return (-1); 23098 } 23099 23100 log_size = log_page_data[3]; 23101 23102 /* 23103 * The list of supported log pages start from the fourth byte. Check 23104 * until we run out of log pages or a match is found. 23105 */ 23106 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23107 if (log_page_data[i] == log_page) { 23108 match++; 23109 } 23110 } 23111 kmem_free(log_page_data, 0xFF); 23112 return (match); 23113 } 23114 23115 23116 /* 23117 * Function: sd_mhdioc_failfast 23118 * 23119 * Description: This routine is the driver entry point for handling ioctl 23120 * requests to enable/disable the multihost failfast option. 23121 * (MHIOCENFAILFAST) 23122 * 23123 * Arguments: dev - the device number 23124 * arg - user specified probing interval. 23125 * flag - this argument is a pass through to ddi_copyxxx() 23126 * directly from the mode argument of ioctl(). 23127 * 23128 * Return Code: 0 23129 * EFAULT 23130 * ENXIO 23131 */ 23132 23133 static int 23134 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23135 { 23136 struct sd_lun *un = NULL; 23137 int mh_time; 23138 int rval = 0; 23139 23140 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23141 return (ENXIO); 23142 } 23143 23144 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23145 return (EFAULT); 23146 23147 if (mh_time) { 23148 mutex_enter(SD_MUTEX(un)); 23149 un->un_resvd_status |= SD_FAILFAST; 23150 mutex_exit(SD_MUTEX(un)); 23151 /* 23152 * If mh_time is INT_MAX, then this ioctl is being used for 23153 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23154 */ 23155 if (mh_time != INT_MAX) { 23156 rval = sd_check_mhd(dev, mh_time); 23157 } 23158 } else { 23159 (void) sd_check_mhd(dev, 0); 23160 mutex_enter(SD_MUTEX(un)); 23161 un->un_resvd_status &= ~SD_FAILFAST; 23162 mutex_exit(SD_MUTEX(un)); 23163 } 23164 return (rval); 23165 } 23166 23167 23168 /* 23169 * Function: sd_mhdioc_takeown 23170 * 23171 * Description: This routine is the driver entry point for handling ioctl 23172 * requests to forcefully acquire exclusive access rights to the 23173 * multihost disk (MHIOCTKOWN). 23174 * 23175 * Arguments: dev - the device number 23176 * arg - user provided structure specifying the delay 23177 * parameters in milliseconds 23178 * flag - this argument is a pass through to ddi_copyxxx() 23179 * directly from the mode argument of ioctl(). 23180 * 23181 * Return Code: 0 23182 * EFAULT 23183 * ENXIO 23184 */ 23185 23186 static int 23187 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23188 { 23189 struct sd_lun *un = NULL; 23190 struct mhioctkown *tkown = NULL; 23191 int rval = 0; 23192 23193 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23194 return (ENXIO); 23195 } 23196 23197 if (arg != NULL) { 23198 tkown = (struct mhioctkown *) 23199 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23200 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23201 if (rval != 0) { 23202 rval = EFAULT; 23203 goto error; 23204 } 23205 } 23206 23207 rval = sd_take_ownership(dev, tkown); 23208 mutex_enter(SD_MUTEX(un)); 23209 if (rval == 0) { 23210 un->un_resvd_status |= SD_RESERVE; 23211 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23212 sd_reinstate_resv_delay = 23213 tkown->reinstate_resv_delay * 1000; 23214 } else { 23215 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23216 } 23217 /* 23218 * Give the scsi_watch routine interval set by 23219 * the MHIOCENFAILFAST ioctl precedence here. 23220 */ 23221 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23222 mutex_exit(SD_MUTEX(un)); 23223 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23224 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23225 "sd_mhdioc_takeown : %d\n", 23226 sd_reinstate_resv_delay); 23227 } else { 23228 mutex_exit(SD_MUTEX(un)); 23229 } 23230 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23231 sd_mhd_reset_notify_cb, (caddr_t)un); 23232 } else { 23233 un->un_resvd_status &= ~SD_RESERVE; 23234 mutex_exit(SD_MUTEX(un)); 23235 } 23236 23237 error: 23238 if (tkown != NULL) { 23239 kmem_free(tkown, sizeof (struct mhioctkown)); 23240 } 23241 return (rval); 23242 } 23243 23244 23245 /* 23246 * Function: sd_mhdioc_release 23247 * 23248 * Description: This routine is the driver entry point for handling ioctl 23249 * requests to release exclusive access rights to the multihost 23250 * disk (MHIOCRELEASE). 23251 * 23252 * Arguments: dev - the device number 23253 * 23254 * Return Code: 0 23255 * ENXIO 23256 */ 23257 23258 static int 23259 sd_mhdioc_release(dev_t dev) 23260 { 23261 struct sd_lun *un = NULL; 23262 timeout_id_t resvd_timeid_save; 23263 int resvd_status_save; 23264 int rval = 0; 23265 23266 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23267 return (ENXIO); 23268 } 23269 23270 mutex_enter(SD_MUTEX(un)); 23271 resvd_status_save = un->un_resvd_status; 23272 un->un_resvd_status &= 23273 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23274 if (un->un_resvd_timeid) { 23275 resvd_timeid_save = un->un_resvd_timeid; 23276 un->un_resvd_timeid = NULL; 23277 mutex_exit(SD_MUTEX(un)); 23278 (void) untimeout(resvd_timeid_save); 23279 } else { 23280 mutex_exit(SD_MUTEX(un)); 23281 } 23282 23283 /* 23284 * destroy any pending timeout thread that may be attempting to 23285 * reinstate reservation on this device. 23286 */ 23287 sd_rmv_resv_reclaim_req(dev); 23288 23289 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23290 mutex_enter(SD_MUTEX(un)); 23291 if ((un->un_mhd_token) && 23292 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23293 mutex_exit(SD_MUTEX(un)); 23294 (void) sd_check_mhd(dev, 0); 23295 } else { 23296 mutex_exit(SD_MUTEX(un)); 23297 } 23298 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23299 sd_mhd_reset_notify_cb, (caddr_t)un); 23300 } else { 23301 /* 23302 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23303 */ 23304 mutex_enter(SD_MUTEX(un)); 23305 un->un_resvd_status = resvd_status_save; 23306 mutex_exit(SD_MUTEX(un)); 23307 } 23308 return (rval); 23309 } 23310 23311 23312 /* 23313 * Function: sd_mhdioc_register_devid 23314 * 23315 * Description: This routine is the driver entry point for handling ioctl 23316 * requests to register the device id (MHIOCREREGISTERDEVID). 23317 * 23318 * Note: The implementation for this ioctl has been updated to 23319 * be consistent with the original PSARC case (1999/357) 23320 * (4375899, 4241671, 4220005) 23321 * 23322 * Arguments: dev - the device number 23323 * 23324 * Return Code: 0 23325 * ENXIO 23326 */ 23327 23328 static int 23329 sd_mhdioc_register_devid(dev_t dev) 23330 { 23331 struct sd_lun *un = NULL; 23332 int rval = 0; 23333 sd_ssc_t *ssc; 23334 23335 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23336 return (ENXIO); 23337 } 23338 23339 ASSERT(!mutex_owned(SD_MUTEX(un))); 23340 23341 mutex_enter(SD_MUTEX(un)); 23342 23343 /* If a devid already exists, de-register it */ 23344 if (un->un_devid != NULL) { 23345 ddi_devid_unregister(SD_DEVINFO(un)); 23346 /* 23347 * After unregister devid, needs to free devid memory 23348 */ 23349 ddi_devid_free(un->un_devid); 23350 un->un_devid = NULL; 23351 } 23352 23353 /* Check for reservation conflict */ 23354 mutex_exit(SD_MUTEX(un)); 23355 ssc = sd_ssc_init(un); 23356 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23357 mutex_enter(SD_MUTEX(un)); 23358 23359 switch (rval) { 23360 case 0: 23361 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23362 break; 23363 case EACCES: 23364 break; 23365 default: 23366 rval = EIO; 23367 } 23368 23369 mutex_exit(SD_MUTEX(un)); 23370 if (rval != 0) { 23371 if (rval == EIO) 23372 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23373 else 23374 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23375 } 23376 sd_ssc_fini(ssc); 23377 return (rval); 23378 } 23379 23380 23381 /* 23382 * Function: sd_mhdioc_inkeys 23383 * 23384 * Description: This routine is the driver entry point for handling ioctl 23385 * requests to issue the SCSI-3 Persistent In Read Keys command 23386 * to the device (MHIOCGRP_INKEYS). 23387 * 23388 * Arguments: dev - the device number 23389 * arg - user provided in_keys structure 23390 * flag - this argument is a pass through to ddi_copyxxx() 23391 * directly from the mode argument of ioctl(). 23392 * 23393 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23394 * ENXIO 23395 * EFAULT 23396 */ 23397 23398 static int 23399 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23400 { 23401 struct sd_lun *un; 23402 mhioc_inkeys_t inkeys; 23403 int rval = 0; 23404 23405 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23406 return (ENXIO); 23407 } 23408 23409 #ifdef _MULTI_DATAMODEL 23410 switch (ddi_model_convert_from(flag & FMODELS)) { 23411 case DDI_MODEL_ILP32: { 23412 struct mhioc_inkeys32 inkeys32; 23413 23414 if (ddi_copyin(arg, &inkeys32, 23415 sizeof (struct mhioc_inkeys32), flag) != 0) { 23416 return (EFAULT); 23417 } 23418 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23419 if ((rval = sd_persistent_reservation_in_read_keys(un, 23420 &inkeys, flag)) != 0) { 23421 return (rval); 23422 } 23423 inkeys32.generation = inkeys.generation; 23424 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23425 flag) != 0) { 23426 return (EFAULT); 23427 } 23428 break; 23429 } 23430 case DDI_MODEL_NONE: 23431 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23432 flag) != 0) { 23433 return (EFAULT); 23434 } 23435 if ((rval = sd_persistent_reservation_in_read_keys(un, 23436 &inkeys, flag)) != 0) { 23437 return (rval); 23438 } 23439 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23440 flag) != 0) { 23441 return (EFAULT); 23442 } 23443 break; 23444 } 23445 23446 #else /* ! _MULTI_DATAMODEL */ 23447 23448 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23449 return (EFAULT); 23450 } 23451 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23452 if (rval != 0) { 23453 return (rval); 23454 } 23455 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23456 return (EFAULT); 23457 } 23458 23459 #endif /* _MULTI_DATAMODEL */ 23460 23461 return (rval); 23462 } 23463 23464 23465 /* 23466 * Function: sd_mhdioc_inresv 23467 * 23468 * Description: This routine is the driver entry point for handling ioctl 23469 * requests to issue the SCSI-3 Persistent In Read Reservations 23470 * command to the device (MHIOCGRP_INKEYS). 23471 * 23472 * Arguments: dev - the device number 23473 * arg - user provided in_resv structure 23474 * flag - this argument is a pass through to ddi_copyxxx() 23475 * directly from the mode argument of ioctl(). 23476 * 23477 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23478 * ENXIO 23479 * EFAULT 23480 */ 23481 23482 static int 23483 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23484 { 23485 struct sd_lun *un; 23486 mhioc_inresvs_t inresvs; 23487 int rval = 0; 23488 23489 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23490 return (ENXIO); 23491 } 23492 23493 #ifdef _MULTI_DATAMODEL 23494 23495 switch (ddi_model_convert_from(flag & FMODELS)) { 23496 case DDI_MODEL_ILP32: { 23497 struct mhioc_inresvs32 inresvs32; 23498 23499 if (ddi_copyin(arg, &inresvs32, 23500 sizeof (struct mhioc_inresvs32), flag) != 0) { 23501 return (EFAULT); 23502 } 23503 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23504 if ((rval = sd_persistent_reservation_in_read_resv(un, 23505 &inresvs, flag)) != 0) { 23506 return (rval); 23507 } 23508 inresvs32.generation = inresvs.generation; 23509 if (ddi_copyout(&inresvs32, arg, 23510 sizeof (struct mhioc_inresvs32), flag) != 0) { 23511 return (EFAULT); 23512 } 23513 break; 23514 } 23515 case DDI_MODEL_NONE: 23516 if (ddi_copyin(arg, &inresvs, 23517 sizeof (mhioc_inresvs_t), flag) != 0) { 23518 return (EFAULT); 23519 } 23520 if ((rval = sd_persistent_reservation_in_read_resv(un, 23521 &inresvs, flag)) != 0) { 23522 return (rval); 23523 } 23524 if (ddi_copyout(&inresvs, arg, 23525 sizeof (mhioc_inresvs_t), flag) != 0) { 23526 return (EFAULT); 23527 } 23528 break; 23529 } 23530 23531 #else /* ! _MULTI_DATAMODEL */ 23532 23533 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23534 return (EFAULT); 23535 } 23536 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23537 if (rval != 0) { 23538 return (rval); 23539 } 23540 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23541 return (EFAULT); 23542 } 23543 23544 #endif /* ! _MULTI_DATAMODEL */ 23545 23546 return (rval); 23547 } 23548 23549 23550 /* 23551 * The following routines support the clustering functionality described below 23552 * and implement lost reservation reclaim functionality. 23553 * 23554 * Clustering 23555 * ---------- 23556 * The clustering code uses two different, independent forms of SCSI 23557 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23558 * Persistent Group Reservations. For any particular disk, it will use either 23559 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23560 * 23561 * SCSI-2 23562 * The cluster software takes ownership of a multi-hosted disk by issuing the 23563 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23564 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23565 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23566 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23567 * driver. The meaning of failfast is that if the driver (on this host) ever 23568 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23569 * it should immediately panic the host. The motivation for this ioctl is that 23570 * if this host does encounter reservation conflict, the underlying cause is 23571 * that some other host of the cluster has decided that this host is no longer 23572 * in the cluster and has seized control of the disks for itself. Since this 23573 * host is no longer in the cluster, it ought to panic itself. The 23574 * MHIOCENFAILFAST ioctl does two things: 23575 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23576 * error to panic the host 23577 * (b) it sets up a periodic timer to test whether this host still has 23578 * "access" (in that no other host has reserved the device): if the 23579 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23580 * purpose of that periodic timer is to handle scenarios where the host is 23581 * otherwise temporarily quiescent, temporarily doing no real i/o. 23582 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23583 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23584 * the device itself. 23585 * 23586 * SCSI-3 PGR 23587 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23588 * facility is supported through the shared multihost disk ioctls 23589 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23590 * MHIOCGRP_PREEMPTANDABORT) 23591 * 23592 * Reservation Reclaim: 23593 * -------------------- 23594 * To support the lost reservation reclaim operations this driver creates a 23595 * single thread to handle reinstating reservations on all devices that have 23596 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23597 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23598 * and the reservation reclaim thread loops through the requests to regain the 23599 * lost reservations. 23600 */ 23601 23602 /* 23603 * Function: sd_check_mhd() 23604 * 23605 * Description: This function sets up and submits a scsi watch request or 23606 * terminates an existing watch request. This routine is used in 23607 * support of reservation reclaim. 23608 * 23609 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23610 * among multiple watches that share the callback function 23611 * interval - the number of microseconds specifying the watch 23612 * interval for issuing TEST UNIT READY commands. If 23613 * set to 0 the watch should be terminated. If the 23614 * interval is set to 0 and if the device is required 23615 * to hold reservation while disabling failfast, the 23616 * watch is restarted with an interval of 23617 * reinstate_resv_delay. 23618 * 23619 * Return Code: 0 - Successful submit/terminate of scsi watch request 23620 * ENXIO - Indicates an invalid device was specified 23621 * EAGAIN - Unable to submit the scsi watch request 23622 */ 23623 23624 static int 23625 sd_check_mhd(dev_t dev, int interval) 23626 { 23627 struct sd_lun *un; 23628 opaque_t token; 23629 23630 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23631 return (ENXIO); 23632 } 23633 23634 /* is this a watch termination request? */ 23635 if (interval == 0) { 23636 mutex_enter(SD_MUTEX(un)); 23637 /* if there is an existing watch task then terminate it */ 23638 if (un->un_mhd_token) { 23639 token = un->un_mhd_token; 23640 un->un_mhd_token = NULL; 23641 mutex_exit(SD_MUTEX(un)); 23642 (void) scsi_watch_request_terminate(token, 23643 SCSI_WATCH_TERMINATE_ALL_WAIT); 23644 mutex_enter(SD_MUTEX(un)); 23645 } else { 23646 mutex_exit(SD_MUTEX(un)); 23647 /* 23648 * Note: If we return here we don't check for the 23649 * failfast case. This is the original legacy 23650 * implementation but perhaps we should be checking 23651 * the failfast case. 23652 */ 23653 return (0); 23654 } 23655 /* 23656 * If the device is required to hold reservation while 23657 * disabling failfast, we need to restart the scsi_watch 23658 * routine with an interval of reinstate_resv_delay. 23659 */ 23660 if (un->un_resvd_status & SD_RESERVE) { 23661 interval = sd_reinstate_resv_delay/1000; 23662 } else { 23663 /* no failfast so bail */ 23664 mutex_exit(SD_MUTEX(un)); 23665 return (0); 23666 } 23667 mutex_exit(SD_MUTEX(un)); 23668 } 23669 23670 /* 23671 * adjust minimum time interval to 1 second, 23672 * and convert from msecs to usecs 23673 */ 23674 if (interval > 0 && interval < 1000) { 23675 interval = 1000; 23676 } 23677 interval *= 1000; 23678 23679 /* 23680 * submit the request to the scsi_watch service 23681 */ 23682 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23683 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23684 if (token == NULL) { 23685 return (EAGAIN); 23686 } 23687 23688 /* 23689 * save token for termination later on 23690 */ 23691 mutex_enter(SD_MUTEX(un)); 23692 un->un_mhd_token = token; 23693 mutex_exit(SD_MUTEX(un)); 23694 return (0); 23695 } 23696 23697 23698 /* 23699 * Function: sd_mhd_watch_cb() 23700 * 23701 * Description: This function is the call back function used by the scsi watch 23702 * facility. The scsi watch facility sends the "Test Unit Ready" 23703 * and processes the status. If applicable (i.e. a "Unit Attention" 23704 * status and automatic "Request Sense" not used) the scsi watch 23705 * facility will send a "Request Sense" and retrieve the sense data 23706 * to be passed to this callback function. In either case the 23707 * automatic "Request Sense" or the facility submitting one, this 23708 * callback is passed the status and sense data. 23709 * 23710 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23711 * among multiple watches that share this callback function 23712 * resultp - scsi watch facility result packet containing scsi 23713 * packet, status byte and sense data 23714 * 23715 * Return Code: 0 - continue the watch task 23716 * non-zero - terminate the watch task 23717 */ 23718 23719 static int 23720 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23721 { 23722 struct sd_lun *un; 23723 struct scsi_status *statusp; 23724 uint8_t *sensep; 23725 struct scsi_pkt *pkt; 23726 uchar_t actual_sense_length; 23727 dev_t dev = (dev_t)arg; 23728 23729 ASSERT(resultp != NULL); 23730 statusp = resultp->statusp; 23731 sensep = (uint8_t *)resultp->sensep; 23732 pkt = resultp->pkt; 23733 actual_sense_length = resultp->actual_sense_length; 23734 23735 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23736 return (ENXIO); 23737 } 23738 23739 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23740 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23741 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23742 23743 /* Begin processing of the status and/or sense data */ 23744 if (pkt->pkt_reason != CMD_CMPLT) { 23745 /* Handle the incomplete packet */ 23746 sd_mhd_watch_incomplete(un, pkt); 23747 return (0); 23748 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23749 if (*((unsigned char *)statusp) 23750 == STATUS_RESERVATION_CONFLICT) { 23751 /* 23752 * Handle a reservation conflict by panicking if 23753 * configured for failfast or by logging the conflict 23754 * and updating the reservation status 23755 */ 23756 mutex_enter(SD_MUTEX(un)); 23757 if ((un->un_resvd_status & SD_FAILFAST) && 23758 (sd_failfast_enable)) { 23759 sd_panic_for_res_conflict(un); 23760 /*NOTREACHED*/ 23761 } 23762 SD_INFO(SD_LOG_IOCTL_MHD, un, 23763 "sd_mhd_watch_cb: Reservation Conflict\n"); 23764 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23765 mutex_exit(SD_MUTEX(un)); 23766 } 23767 } 23768 23769 if (sensep != NULL) { 23770 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23771 mutex_enter(SD_MUTEX(un)); 23772 if ((scsi_sense_asc(sensep) == 23773 SD_SCSI_RESET_SENSE_CODE) && 23774 (un->un_resvd_status & SD_RESERVE)) { 23775 /* 23776 * The additional sense code indicates a power 23777 * on or bus device reset has occurred; update 23778 * the reservation status. 23779 */ 23780 un->un_resvd_status |= 23781 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23782 SD_INFO(SD_LOG_IOCTL_MHD, un, 23783 "sd_mhd_watch_cb: Lost Reservation\n"); 23784 } 23785 } else { 23786 return (0); 23787 } 23788 } else { 23789 mutex_enter(SD_MUTEX(un)); 23790 } 23791 23792 if ((un->un_resvd_status & SD_RESERVE) && 23793 (un->un_resvd_status & SD_LOST_RESERVE)) { 23794 if (un->un_resvd_status & SD_WANT_RESERVE) { 23795 /* 23796 * A reset occurred in between the last probe and this 23797 * one so if a timeout is pending cancel it. 23798 */ 23799 if (un->un_resvd_timeid) { 23800 timeout_id_t temp_id = un->un_resvd_timeid; 23801 un->un_resvd_timeid = NULL; 23802 mutex_exit(SD_MUTEX(un)); 23803 (void) untimeout(temp_id); 23804 mutex_enter(SD_MUTEX(un)); 23805 } 23806 un->un_resvd_status &= ~SD_WANT_RESERVE; 23807 } 23808 if (un->un_resvd_timeid == 0) { 23809 /* Schedule a timeout to handle the lost reservation */ 23810 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23811 (void *)dev, 23812 drv_usectohz(sd_reinstate_resv_delay)); 23813 } 23814 } 23815 mutex_exit(SD_MUTEX(un)); 23816 return (0); 23817 } 23818 23819 23820 /* 23821 * Function: sd_mhd_watch_incomplete() 23822 * 23823 * Description: This function is used to find out why a scsi pkt sent by the 23824 * scsi watch facility was not completed. Under some scenarios this 23825 * routine will return. Otherwise it will send a bus reset to see 23826 * if the drive is still online. 23827 * 23828 * Arguments: un - driver soft state (unit) structure 23829 * pkt - incomplete scsi pkt 23830 */ 23831 23832 static void 23833 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23834 { 23835 int be_chatty; 23836 int perr; 23837 23838 ASSERT(pkt != NULL); 23839 ASSERT(un != NULL); 23840 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23841 perr = (pkt->pkt_statistics & STAT_PERR); 23842 23843 mutex_enter(SD_MUTEX(un)); 23844 if (un->un_state == SD_STATE_DUMPING) { 23845 mutex_exit(SD_MUTEX(un)); 23846 return; 23847 } 23848 23849 switch (pkt->pkt_reason) { 23850 case CMD_UNX_BUS_FREE: 23851 /* 23852 * If we had a parity error that caused the target to drop BSY*, 23853 * don't be chatty about it. 23854 */ 23855 if (perr && be_chatty) { 23856 be_chatty = 0; 23857 } 23858 break; 23859 case CMD_TAG_REJECT: 23860 /* 23861 * The SCSI-2 spec states that a tag reject will be sent by the 23862 * target if tagged queuing is not supported. A tag reject may 23863 * also be sent during certain initialization periods or to 23864 * control internal resources. For the latter case the target 23865 * may also return Queue Full. 23866 * 23867 * If this driver receives a tag reject from a target that is 23868 * going through an init period or controlling internal 23869 * resources tagged queuing will be disabled. This is a less 23870 * than optimal behavior but the driver is unable to determine 23871 * the target state and assumes tagged queueing is not supported 23872 */ 23873 pkt->pkt_flags = 0; 23874 un->un_tagflags = 0; 23875 23876 if (un->un_f_opt_queueing == TRUE) { 23877 un->un_throttle = min(un->un_throttle, 3); 23878 } else { 23879 un->un_throttle = 1; 23880 } 23881 mutex_exit(SD_MUTEX(un)); 23882 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 23883 mutex_enter(SD_MUTEX(un)); 23884 break; 23885 case CMD_INCOMPLETE: 23886 /* 23887 * The transport stopped with an abnormal state, fallthrough and 23888 * reset the target and/or bus unless selection did not complete 23889 * (indicated by STATE_GOT_BUS) in which case we don't want to 23890 * go through a target/bus reset 23891 */ 23892 if (pkt->pkt_state == STATE_GOT_BUS) { 23893 break; 23894 } 23895 /*FALLTHROUGH*/ 23896 23897 case CMD_TIMEOUT: 23898 default: 23899 /* 23900 * The lun may still be running the command, so a lun reset 23901 * should be attempted. If the lun reset fails or cannot be 23902 * issued, than try a target reset. Lastly try a bus reset. 23903 */ 23904 if ((pkt->pkt_statistics & 23905 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 23906 int reset_retval = 0; 23907 mutex_exit(SD_MUTEX(un)); 23908 if (un->un_f_allow_bus_device_reset == TRUE) { 23909 if (un->un_f_lun_reset_enabled == TRUE) { 23910 reset_retval = 23911 scsi_reset(SD_ADDRESS(un), 23912 RESET_LUN); 23913 } 23914 if (reset_retval == 0) { 23915 reset_retval = 23916 scsi_reset(SD_ADDRESS(un), 23917 RESET_TARGET); 23918 } 23919 } 23920 if (reset_retval == 0) { 23921 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23922 } 23923 mutex_enter(SD_MUTEX(un)); 23924 } 23925 break; 23926 } 23927 23928 /* A device/bus reset has occurred; update the reservation status. */ 23929 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 23930 (STAT_BUS_RESET | STAT_DEV_RESET))) { 23931 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 23932 un->un_resvd_status |= 23933 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23934 SD_INFO(SD_LOG_IOCTL_MHD, un, 23935 "sd_mhd_watch_incomplete: Lost Reservation\n"); 23936 } 23937 } 23938 23939 /* 23940 * The disk has been turned off; Update the device state. 23941 * 23942 * Note: Should we be offlining the disk here? 23943 */ 23944 if (pkt->pkt_state == STATE_GOT_BUS) { 23945 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 23946 "Disk not responding to selection\n"); 23947 if (un->un_state != SD_STATE_OFFLINE) { 23948 New_state(un, SD_STATE_OFFLINE); 23949 } 23950 } else if (be_chatty) { 23951 /* 23952 * suppress messages if they are all the same pkt reason; 23953 * with TQ, many (up to 256) are returned with the same 23954 * pkt_reason 23955 */ 23956 if (pkt->pkt_reason != un->un_last_pkt_reason) { 23957 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23958 "sd_mhd_watch_incomplete: " 23959 "SCSI transport failed: reason '%s'\n", 23960 scsi_rname(pkt->pkt_reason)); 23961 } 23962 } 23963 un->un_last_pkt_reason = pkt->pkt_reason; 23964 mutex_exit(SD_MUTEX(un)); 23965 } 23966 23967 23968 /* 23969 * Function: sd_sname() 23970 * 23971 * Description: This is a simple little routine to return a string containing 23972 * a printable description of command status byte for use in 23973 * logging. 23974 * 23975 * Arguments: status - pointer to a status byte 23976 * 23977 * Return Code: char * - string containing status description. 23978 */ 23979 23980 static char * 23981 sd_sname(uchar_t status) 23982 { 23983 switch (status & STATUS_MASK) { 23984 case STATUS_GOOD: 23985 return ("good status"); 23986 case STATUS_CHECK: 23987 return ("check condition"); 23988 case STATUS_MET: 23989 return ("condition met"); 23990 case STATUS_BUSY: 23991 return ("busy"); 23992 case STATUS_INTERMEDIATE: 23993 return ("intermediate"); 23994 case STATUS_INTERMEDIATE_MET: 23995 return ("intermediate - condition met"); 23996 case STATUS_RESERVATION_CONFLICT: 23997 return ("reservation_conflict"); 23998 case STATUS_TERMINATED: 23999 return ("command terminated"); 24000 case STATUS_QFULL: 24001 return ("queue full"); 24002 default: 24003 return ("<unknown status>"); 24004 } 24005 } 24006 24007 24008 /* 24009 * Function: sd_mhd_resvd_recover() 24010 * 24011 * Description: This function adds a reservation entry to the 24012 * sd_resv_reclaim_request list and signals the reservation 24013 * reclaim thread that there is work pending. If the reservation 24014 * reclaim thread has not been previously created this function 24015 * will kick it off. 24016 * 24017 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24018 * among multiple watches that share this callback function 24019 * 24020 * Context: This routine is called by timeout() and is run in interrupt 24021 * context. It must not sleep or call other functions which may 24022 * sleep. 24023 */ 24024 24025 static void 24026 sd_mhd_resvd_recover(void *arg) 24027 { 24028 dev_t dev = (dev_t)arg; 24029 struct sd_lun *un; 24030 struct sd_thr_request *sd_treq = NULL; 24031 struct sd_thr_request *sd_cur = NULL; 24032 struct sd_thr_request *sd_prev = NULL; 24033 int already_there = 0; 24034 24035 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24036 return; 24037 } 24038 24039 mutex_enter(SD_MUTEX(un)); 24040 un->un_resvd_timeid = NULL; 24041 if (un->un_resvd_status & SD_WANT_RESERVE) { 24042 /* 24043 * There was a reset so don't issue the reserve, allow the 24044 * sd_mhd_watch_cb callback function to notice this and 24045 * reschedule the timeout for reservation. 24046 */ 24047 mutex_exit(SD_MUTEX(un)); 24048 return; 24049 } 24050 mutex_exit(SD_MUTEX(un)); 24051 24052 /* 24053 * Add this device to the sd_resv_reclaim_request list and the 24054 * sd_resv_reclaim_thread should take care of the rest. 24055 * 24056 * Note: We can't sleep in this context so if the memory allocation 24057 * fails allow the sd_mhd_watch_cb callback function to notice this and 24058 * reschedule the timeout for reservation. (4378460) 24059 */ 24060 sd_treq = (struct sd_thr_request *) 24061 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24062 if (sd_treq == NULL) { 24063 return; 24064 } 24065 24066 sd_treq->sd_thr_req_next = NULL; 24067 sd_treq->dev = dev; 24068 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24069 if (sd_tr.srq_thr_req_head == NULL) { 24070 sd_tr.srq_thr_req_head = sd_treq; 24071 } else { 24072 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24073 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24074 if (sd_cur->dev == dev) { 24075 /* 24076 * already in Queue so don't log 24077 * another request for the device 24078 */ 24079 already_there = 1; 24080 break; 24081 } 24082 sd_prev = sd_cur; 24083 } 24084 if (!already_there) { 24085 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24086 "logging request for %lx\n", dev); 24087 sd_prev->sd_thr_req_next = sd_treq; 24088 } else { 24089 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24090 } 24091 } 24092 24093 /* 24094 * Create a kernel thread to do the reservation reclaim and free up this 24095 * thread. We cannot block this thread while we go away to do the 24096 * reservation reclaim 24097 */ 24098 if (sd_tr.srq_resv_reclaim_thread == NULL) 24099 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24100 sd_resv_reclaim_thread, NULL, 24101 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24102 24103 /* Tell the reservation reclaim thread that it has work to do */ 24104 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24105 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24106 } 24107 24108 /* 24109 * Function: sd_resv_reclaim_thread() 24110 * 24111 * Description: This function implements the reservation reclaim operations 24112 * 24113 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24114 * among multiple watches that share this callback function 24115 */ 24116 24117 static void 24118 sd_resv_reclaim_thread() 24119 { 24120 struct sd_lun *un; 24121 struct sd_thr_request *sd_mhreq; 24122 24123 /* Wait for work */ 24124 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24125 if (sd_tr.srq_thr_req_head == NULL) { 24126 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24127 &sd_tr.srq_resv_reclaim_mutex); 24128 } 24129 24130 /* Loop while we have work */ 24131 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24132 un = ddi_get_soft_state(sd_state, 24133 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24134 if (un == NULL) { 24135 /* 24136 * softstate structure is NULL so just 24137 * dequeue the request and continue 24138 */ 24139 sd_tr.srq_thr_req_head = 24140 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24141 kmem_free(sd_tr.srq_thr_cur_req, 24142 sizeof (struct sd_thr_request)); 24143 continue; 24144 } 24145 24146 /* dequeue the request */ 24147 sd_mhreq = sd_tr.srq_thr_cur_req; 24148 sd_tr.srq_thr_req_head = 24149 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24150 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24151 24152 /* 24153 * Reclaim reservation only if SD_RESERVE is still set. There 24154 * may have been a call to MHIOCRELEASE before we got here. 24155 */ 24156 mutex_enter(SD_MUTEX(un)); 24157 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24158 /* 24159 * Note: The SD_LOST_RESERVE flag is cleared before 24160 * reclaiming the reservation. If this is done after the 24161 * call to sd_reserve_release a reservation loss in the 24162 * window between pkt completion of reserve cmd and 24163 * mutex_enter below may not be recognized 24164 */ 24165 un->un_resvd_status &= ~SD_LOST_RESERVE; 24166 mutex_exit(SD_MUTEX(un)); 24167 24168 if (sd_reserve_release(sd_mhreq->dev, 24169 SD_RESERVE) == 0) { 24170 mutex_enter(SD_MUTEX(un)); 24171 un->un_resvd_status |= SD_RESERVE; 24172 mutex_exit(SD_MUTEX(un)); 24173 SD_INFO(SD_LOG_IOCTL_MHD, un, 24174 "sd_resv_reclaim_thread: " 24175 "Reservation Recovered\n"); 24176 } else { 24177 mutex_enter(SD_MUTEX(un)); 24178 un->un_resvd_status |= SD_LOST_RESERVE; 24179 mutex_exit(SD_MUTEX(un)); 24180 SD_INFO(SD_LOG_IOCTL_MHD, un, 24181 "sd_resv_reclaim_thread: Failed " 24182 "Reservation Recovery\n"); 24183 } 24184 } else { 24185 mutex_exit(SD_MUTEX(un)); 24186 } 24187 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24188 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24189 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24190 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24191 /* 24192 * wakeup the destroy thread if anyone is waiting on 24193 * us to complete. 24194 */ 24195 cv_signal(&sd_tr.srq_inprocess_cv); 24196 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24197 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24198 } 24199 24200 /* 24201 * cleanup the sd_tr structure now that this thread will not exist 24202 */ 24203 ASSERT(sd_tr.srq_thr_req_head == NULL); 24204 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24205 sd_tr.srq_resv_reclaim_thread = NULL; 24206 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24207 thread_exit(); 24208 } 24209 24210 24211 /* 24212 * Function: sd_rmv_resv_reclaim_req() 24213 * 24214 * Description: This function removes any pending reservation reclaim requests 24215 * for the specified device. 24216 * 24217 * Arguments: dev - the device 'dev_t' 24218 */ 24219 24220 static void 24221 sd_rmv_resv_reclaim_req(dev_t dev) 24222 { 24223 struct sd_thr_request *sd_mhreq; 24224 struct sd_thr_request *sd_prev; 24225 24226 /* Remove a reservation reclaim request from the list */ 24227 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24228 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24229 /* 24230 * We are attempting to reinstate reservation for 24231 * this device. We wait for sd_reserve_release() 24232 * to return before we return. 24233 */ 24234 cv_wait(&sd_tr.srq_inprocess_cv, 24235 &sd_tr.srq_resv_reclaim_mutex); 24236 } else { 24237 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24238 if (sd_mhreq && sd_mhreq->dev == dev) { 24239 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24240 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24241 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24242 return; 24243 } 24244 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24245 if (sd_mhreq && sd_mhreq->dev == dev) { 24246 break; 24247 } 24248 sd_prev = sd_mhreq; 24249 } 24250 if (sd_mhreq != NULL) { 24251 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24252 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24253 } 24254 } 24255 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24256 } 24257 24258 24259 /* 24260 * Function: sd_mhd_reset_notify_cb() 24261 * 24262 * Description: This is a call back function for scsi_reset_notify. This 24263 * function updates the softstate reserved status and logs the 24264 * reset. The driver scsi watch facility callback function 24265 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24266 * will reclaim the reservation. 24267 * 24268 * Arguments: arg - driver soft state (unit) structure 24269 */ 24270 24271 static void 24272 sd_mhd_reset_notify_cb(caddr_t arg) 24273 { 24274 struct sd_lun *un = (struct sd_lun *)arg; 24275 24276 mutex_enter(SD_MUTEX(un)); 24277 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24278 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24279 SD_INFO(SD_LOG_IOCTL_MHD, un, 24280 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24281 } 24282 mutex_exit(SD_MUTEX(un)); 24283 } 24284 24285 24286 /* 24287 * Function: sd_take_ownership() 24288 * 24289 * Description: This routine implements an algorithm to achieve a stable 24290 * reservation on disks which don't implement priority reserve, 24291 * and makes sure that other host lose re-reservation attempts. 24292 * This algorithm contains of a loop that keeps issuing the RESERVE 24293 * for some period of time (min_ownership_delay, default 6 seconds) 24294 * During that loop, it looks to see if there has been a bus device 24295 * reset or bus reset (both of which cause an existing reservation 24296 * to be lost). If the reservation is lost issue RESERVE until a 24297 * period of min_ownership_delay with no resets has gone by, or 24298 * until max_ownership_delay has expired. This loop ensures that 24299 * the host really did manage to reserve the device, in spite of 24300 * resets. The looping for min_ownership_delay (default six 24301 * seconds) is important to early generation clustering products, 24302 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24303 * MHIOCENFAILFAST periodic timer of two seconds. By having 24304 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24305 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24306 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24307 * have already noticed, via the MHIOCENFAILFAST polling, that it 24308 * no longer "owns" the disk and will have panicked itself. Thus, 24309 * the host issuing the MHIOCTKOWN is assured (with timing 24310 * dependencies) that by the time it actually starts to use the 24311 * disk for real work, the old owner is no longer accessing it. 24312 * 24313 * min_ownership_delay is the minimum amount of time for which the 24314 * disk must be reserved continuously devoid of resets before the 24315 * MHIOCTKOWN ioctl will return success. 24316 * 24317 * max_ownership_delay indicates the amount of time by which the 24318 * take ownership should succeed or timeout with an error. 24319 * 24320 * Arguments: dev - the device 'dev_t' 24321 * *p - struct containing timing info. 24322 * 24323 * Return Code: 0 for success or error code 24324 */ 24325 24326 static int 24327 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24328 { 24329 struct sd_lun *un; 24330 int rval; 24331 int err; 24332 int reservation_count = 0; 24333 int min_ownership_delay = 6000000; /* in usec */ 24334 int max_ownership_delay = 30000000; /* in usec */ 24335 clock_t start_time; /* starting time of this algorithm */ 24336 clock_t end_time; /* time limit for giving up */ 24337 clock_t ownership_time; /* time limit for stable ownership */ 24338 clock_t current_time; 24339 clock_t previous_current_time; 24340 24341 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24342 return (ENXIO); 24343 } 24344 24345 /* 24346 * Attempt a device reservation. A priority reservation is requested. 24347 */ 24348 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24349 != SD_SUCCESS) { 24350 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24351 "sd_take_ownership: return(1)=%d\n", rval); 24352 return (rval); 24353 } 24354 24355 /* Update the softstate reserved status to indicate the reservation */ 24356 mutex_enter(SD_MUTEX(un)); 24357 un->un_resvd_status |= SD_RESERVE; 24358 un->un_resvd_status &= 24359 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24360 mutex_exit(SD_MUTEX(un)); 24361 24362 if (p != NULL) { 24363 if (p->min_ownership_delay != 0) { 24364 min_ownership_delay = p->min_ownership_delay * 1000; 24365 } 24366 if (p->max_ownership_delay != 0) { 24367 max_ownership_delay = p->max_ownership_delay * 1000; 24368 } 24369 } 24370 SD_INFO(SD_LOG_IOCTL_MHD, un, 24371 "sd_take_ownership: min, max delays: %d, %d\n", 24372 min_ownership_delay, max_ownership_delay); 24373 24374 start_time = ddi_get_lbolt(); 24375 current_time = start_time; 24376 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24377 end_time = start_time + drv_usectohz(max_ownership_delay); 24378 24379 while (current_time - end_time < 0) { 24380 delay(drv_usectohz(500000)); 24381 24382 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24383 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24384 mutex_enter(SD_MUTEX(un)); 24385 rval = (un->un_resvd_status & 24386 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24387 mutex_exit(SD_MUTEX(un)); 24388 break; 24389 } 24390 } 24391 previous_current_time = current_time; 24392 current_time = ddi_get_lbolt(); 24393 mutex_enter(SD_MUTEX(un)); 24394 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24395 ownership_time = ddi_get_lbolt() + 24396 drv_usectohz(min_ownership_delay); 24397 reservation_count = 0; 24398 } else { 24399 reservation_count++; 24400 } 24401 un->un_resvd_status |= SD_RESERVE; 24402 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24403 mutex_exit(SD_MUTEX(un)); 24404 24405 SD_INFO(SD_LOG_IOCTL_MHD, un, 24406 "sd_take_ownership: ticks for loop iteration=%ld, " 24407 "reservation=%s\n", (current_time - previous_current_time), 24408 reservation_count ? "ok" : "reclaimed"); 24409 24410 if (current_time - ownership_time >= 0 && 24411 reservation_count >= 4) { 24412 rval = 0; /* Achieved a stable ownership */ 24413 break; 24414 } 24415 if (current_time - end_time >= 0) { 24416 rval = EACCES; /* No ownership in max possible time */ 24417 break; 24418 } 24419 } 24420 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24421 "sd_take_ownership: return(2)=%d\n", rval); 24422 return (rval); 24423 } 24424 24425 24426 /* 24427 * Function: sd_reserve_release() 24428 * 24429 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24430 * PRIORITY RESERVE commands based on a user specified command type 24431 * 24432 * Arguments: dev - the device 'dev_t' 24433 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24434 * SD_RESERVE, SD_RELEASE 24435 * 24436 * Return Code: 0 or Error Code 24437 */ 24438 24439 static int 24440 sd_reserve_release(dev_t dev, int cmd) 24441 { 24442 struct uscsi_cmd *com = NULL; 24443 struct sd_lun *un = NULL; 24444 char cdb[CDB_GROUP0]; 24445 int rval; 24446 24447 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24448 (cmd == SD_PRIORITY_RESERVE)); 24449 24450 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24451 return (ENXIO); 24452 } 24453 24454 /* instantiate and initialize the command and cdb */ 24455 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24456 bzero(cdb, CDB_GROUP0); 24457 com->uscsi_flags = USCSI_SILENT; 24458 com->uscsi_timeout = un->un_reserve_release_time; 24459 com->uscsi_cdblen = CDB_GROUP0; 24460 com->uscsi_cdb = cdb; 24461 if (cmd == SD_RELEASE) { 24462 cdb[0] = SCMD_RELEASE; 24463 } else { 24464 cdb[0] = SCMD_RESERVE; 24465 } 24466 24467 /* Send the command. */ 24468 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24469 SD_PATH_STANDARD); 24470 24471 /* 24472 * "break" a reservation that is held by another host, by issuing a 24473 * reset if priority reserve is desired, and we could not get the 24474 * device. 24475 */ 24476 if ((cmd == SD_PRIORITY_RESERVE) && 24477 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24478 /* 24479 * First try to reset the LUN. If we cannot, then try a target 24480 * reset, followed by a bus reset if the target reset fails. 24481 */ 24482 int reset_retval = 0; 24483 if (un->un_f_lun_reset_enabled == TRUE) { 24484 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24485 } 24486 if (reset_retval == 0) { 24487 /* The LUN reset either failed or was not issued */ 24488 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24489 } 24490 if ((reset_retval == 0) && 24491 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24492 rval = EIO; 24493 kmem_free(com, sizeof (*com)); 24494 return (rval); 24495 } 24496 24497 bzero(com, sizeof (struct uscsi_cmd)); 24498 com->uscsi_flags = USCSI_SILENT; 24499 com->uscsi_cdb = cdb; 24500 com->uscsi_cdblen = CDB_GROUP0; 24501 com->uscsi_timeout = 5; 24502 24503 /* 24504 * Reissue the last reserve command, this time without request 24505 * sense. Assume that it is just a regular reserve command. 24506 */ 24507 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24508 SD_PATH_STANDARD); 24509 } 24510 24511 /* Return an error if still getting a reservation conflict. */ 24512 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24513 rval = EACCES; 24514 } 24515 24516 kmem_free(com, sizeof (*com)); 24517 return (rval); 24518 } 24519 24520 24521 #define SD_NDUMP_RETRIES 12 24522 /* 24523 * System Crash Dump routine 24524 */ 24525 24526 static int 24527 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24528 { 24529 int instance; 24530 int partition; 24531 int i; 24532 int err; 24533 struct sd_lun *un; 24534 struct scsi_pkt *wr_pktp; 24535 struct buf *wr_bp; 24536 struct buf wr_buf; 24537 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24538 daddr_t tgt_blkno; /* rmw - blkno for target */ 24539 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24540 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24541 size_t io_start_offset; 24542 int doing_rmw = FALSE; 24543 int rval; 24544 ssize_t dma_resid; 24545 daddr_t oblkno; 24546 diskaddr_t nblks = 0; 24547 diskaddr_t start_block; 24548 24549 instance = SDUNIT(dev); 24550 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24551 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24552 return (ENXIO); 24553 } 24554 24555 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24556 24557 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24558 24559 partition = SDPART(dev); 24560 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24561 24562 /* Validate blocks to dump at against partition size. */ 24563 24564 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24565 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24566 24567 if ((blkno + nblk) > nblks) { 24568 SD_TRACE(SD_LOG_DUMP, un, 24569 "sddump: dump range larger than partition: " 24570 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24571 blkno, nblk, nblks); 24572 return (EINVAL); 24573 } 24574 24575 mutex_enter(&un->un_pm_mutex); 24576 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24577 struct scsi_pkt *start_pktp; 24578 24579 mutex_exit(&un->un_pm_mutex); 24580 24581 /* 24582 * use pm framework to power on HBA 1st 24583 */ 24584 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24585 24586 /* 24587 * Dump no long uses sdpower to power on a device, it's 24588 * in-line here so it can be done in polled mode. 24589 */ 24590 24591 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24592 24593 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24594 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24595 24596 if (start_pktp == NULL) { 24597 /* We were not given a SCSI packet, fail. */ 24598 return (EIO); 24599 } 24600 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24601 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24602 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24603 start_pktp->pkt_flags = FLAG_NOINTR; 24604 24605 mutex_enter(SD_MUTEX(un)); 24606 SD_FILL_SCSI1_LUN(un, start_pktp); 24607 mutex_exit(SD_MUTEX(un)); 24608 /* 24609 * Scsi_poll returns 0 (success) if the command completes and 24610 * the status block is STATUS_GOOD. 24611 */ 24612 if (sd_scsi_poll(un, start_pktp) != 0) { 24613 scsi_destroy_pkt(start_pktp); 24614 return (EIO); 24615 } 24616 scsi_destroy_pkt(start_pktp); 24617 (void) sd_ddi_pm_resume(un); 24618 } else { 24619 mutex_exit(&un->un_pm_mutex); 24620 } 24621 24622 mutex_enter(SD_MUTEX(un)); 24623 un->un_throttle = 0; 24624 24625 /* 24626 * The first time through, reset the specific target device. 24627 * However, when cpr calls sddump we know that sd is in a 24628 * a good state so no bus reset is required. 24629 * Clear sense data via Request Sense cmd. 24630 * In sddump we don't care about allow_bus_device_reset anymore 24631 */ 24632 24633 if ((un->un_state != SD_STATE_SUSPENDED) && 24634 (un->un_state != SD_STATE_DUMPING)) { 24635 24636 New_state(un, SD_STATE_DUMPING); 24637 24638 if (un->un_f_is_fibre == FALSE) { 24639 mutex_exit(SD_MUTEX(un)); 24640 /* 24641 * Attempt a bus reset for parallel scsi. 24642 * 24643 * Note: A bus reset is required because on some host 24644 * systems (i.e. E420R) a bus device reset is 24645 * insufficient to reset the state of the target. 24646 * 24647 * Note: Don't issue the reset for fibre-channel, 24648 * because this tends to hang the bus (loop) for 24649 * too long while everyone is logging out and in 24650 * and the deadman timer for dumping will fire 24651 * before the dump is complete. 24652 */ 24653 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24654 mutex_enter(SD_MUTEX(un)); 24655 Restore_state(un); 24656 mutex_exit(SD_MUTEX(un)); 24657 return (EIO); 24658 } 24659 24660 /* Delay to give the device some recovery time. */ 24661 drv_usecwait(10000); 24662 24663 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24664 SD_INFO(SD_LOG_DUMP, un, 24665 "sddump: sd_send_polled_RQS failed\n"); 24666 } 24667 mutex_enter(SD_MUTEX(un)); 24668 } 24669 } 24670 24671 /* 24672 * Convert the partition-relative block number to a 24673 * disk physical block number. 24674 */ 24675 blkno += start_block; 24676 24677 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24678 24679 24680 /* 24681 * Check if the device has a non-512 block size. 24682 */ 24683 wr_bp = NULL; 24684 if (NOT_DEVBSIZE(un)) { 24685 tgt_byte_offset = blkno * un->un_sys_blocksize; 24686 tgt_byte_count = nblk * un->un_sys_blocksize; 24687 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24688 (tgt_byte_count % un->un_tgt_blocksize)) { 24689 doing_rmw = TRUE; 24690 /* 24691 * Calculate the block number and number of block 24692 * in terms of the media block size. 24693 */ 24694 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24695 tgt_nblk = 24696 ((tgt_byte_offset + tgt_byte_count + 24697 (un->un_tgt_blocksize - 1)) / 24698 un->un_tgt_blocksize) - tgt_blkno; 24699 24700 /* 24701 * Invoke the routine which is going to do read part 24702 * of read-modify-write. 24703 * Note that this routine returns a pointer to 24704 * a valid bp in wr_bp. 24705 */ 24706 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24707 &wr_bp); 24708 if (err) { 24709 mutex_exit(SD_MUTEX(un)); 24710 return (err); 24711 } 24712 /* 24713 * Offset is being calculated as - 24714 * (original block # * system block size) - 24715 * (new block # * target block size) 24716 */ 24717 io_start_offset = 24718 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24719 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24720 24721 ASSERT((io_start_offset >= 0) && 24722 (io_start_offset < un->un_tgt_blocksize)); 24723 /* 24724 * Do the modify portion of read modify write. 24725 */ 24726 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24727 (size_t)nblk * un->un_sys_blocksize); 24728 } else { 24729 doing_rmw = FALSE; 24730 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24731 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24732 } 24733 24734 /* Convert blkno and nblk to target blocks */ 24735 blkno = tgt_blkno; 24736 nblk = tgt_nblk; 24737 } else { 24738 wr_bp = &wr_buf; 24739 bzero(wr_bp, sizeof (struct buf)); 24740 wr_bp->b_flags = B_BUSY; 24741 wr_bp->b_un.b_addr = addr; 24742 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24743 wr_bp->b_resid = 0; 24744 } 24745 24746 mutex_exit(SD_MUTEX(un)); 24747 24748 /* 24749 * Obtain a SCSI packet for the write command. 24750 * It should be safe to call the allocator here without 24751 * worrying about being locked for DVMA mapping because 24752 * the address we're passed is already a DVMA mapping 24753 * 24754 * We are also not going to worry about semaphore ownership 24755 * in the dump buffer. Dumping is single threaded at present. 24756 */ 24757 24758 wr_pktp = NULL; 24759 24760 dma_resid = wr_bp->b_bcount; 24761 oblkno = blkno; 24762 24763 while (dma_resid != 0) { 24764 24765 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24766 wr_bp->b_flags &= ~B_ERROR; 24767 24768 if (un->un_partial_dma_supported == 1) { 24769 blkno = oblkno + 24770 ((wr_bp->b_bcount - dma_resid) / 24771 un->un_tgt_blocksize); 24772 nblk = dma_resid / un->un_tgt_blocksize; 24773 24774 if (wr_pktp) { 24775 /* 24776 * Partial DMA transfers after initial transfer 24777 */ 24778 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24779 blkno, nblk); 24780 } else { 24781 /* Initial transfer */ 24782 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24783 un->un_pkt_flags, NULL_FUNC, NULL, 24784 blkno, nblk); 24785 } 24786 } else { 24787 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24788 0, NULL_FUNC, NULL, blkno, nblk); 24789 } 24790 24791 if (rval == 0) { 24792 /* We were given a SCSI packet, continue. */ 24793 break; 24794 } 24795 24796 if (i == 0) { 24797 if (wr_bp->b_flags & B_ERROR) { 24798 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24799 "no resources for dumping; " 24800 "error code: 0x%x, retrying", 24801 geterror(wr_bp)); 24802 } else { 24803 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24804 "no resources for dumping; retrying"); 24805 } 24806 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24807 if (wr_bp->b_flags & B_ERROR) { 24808 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24809 "no resources for dumping; error code: " 24810 "0x%x, retrying\n", geterror(wr_bp)); 24811 } 24812 } else { 24813 if (wr_bp->b_flags & B_ERROR) { 24814 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24815 "no resources for dumping; " 24816 "error code: 0x%x, retries failed, " 24817 "giving up.\n", geterror(wr_bp)); 24818 } else { 24819 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24820 "no resources for dumping; " 24821 "retries failed, giving up.\n"); 24822 } 24823 mutex_enter(SD_MUTEX(un)); 24824 Restore_state(un); 24825 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24826 mutex_exit(SD_MUTEX(un)); 24827 scsi_free_consistent_buf(wr_bp); 24828 } else { 24829 mutex_exit(SD_MUTEX(un)); 24830 } 24831 return (EIO); 24832 } 24833 drv_usecwait(10000); 24834 } 24835 24836 if (un->un_partial_dma_supported == 1) { 24837 /* 24838 * save the resid from PARTIAL_DMA 24839 */ 24840 dma_resid = wr_pktp->pkt_resid; 24841 if (dma_resid != 0) 24842 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24843 wr_pktp->pkt_resid = 0; 24844 } else { 24845 dma_resid = 0; 24846 } 24847 24848 /* SunBug 1222170 */ 24849 wr_pktp->pkt_flags = FLAG_NOINTR; 24850 24851 err = EIO; 24852 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24853 24854 /* 24855 * Scsi_poll returns 0 (success) if the command completes and 24856 * the status block is STATUS_GOOD. We should only check 24857 * errors if this condition is not true. Even then we should 24858 * send our own request sense packet only if we have a check 24859 * condition and auto request sense has not been performed by 24860 * the hba. 24861 */ 24862 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 24863 24864 if ((sd_scsi_poll(un, wr_pktp) == 0) && 24865 (wr_pktp->pkt_resid == 0)) { 24866 err = SD_SUCCESS; 24867 break; 24868 } 24869 24870 /* 24871 * Check CMD_DEV_GONE 1st, give up if device is gone. 24872 */ 24873 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 24874 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24875 "Error while dumping state...Device is gone\n"); 24876 break; 24877 } 24878 24879 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 24880 SD_INFO(SD_LOG_DUMP, un, 24881 "sddump: write failed with CHECK, try # %d\n", i); 24882 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 24883 (void) sd_send_polled_RQS(un); 24884 } 24885 24886 continue; 24887 } 24888 24889 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 24890 int reset_retval = 0; 24891 24892 SD_INFO(SD_LOG_DUMP, un, 24893 "sddump: write failed with BUSY, try # %d\n", i); 24894 24895 if (un->un_f_lun_reset_enabled == TRUE) { 24896 reset_retval = scsi_reset(SD_ADDRESS(un), 24897 RESET_LUN); 24898 } 24899 if (reset_retval == 0) { 24900 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24901 } 24902 (void) sd_send_polled_RQS(un); 24903 24904 } else { 24905 SD_INFO(SD_LOG_DUMP, un, 24906 "sddump: write failed with 0x%x, try # %d\n", 24907 SD_GET_PKT_STATUS(wr_pktp), i); 24908 mutex_enter(SD_MUTEX(un)); 24909 sd_reset_target(un, wr_pktp); 24910 mutex_exit(SD_MUTEX(un)); 24911 } 24912 24913 /* 24914 * If we are not getting anywhere with lun/target resets, 24915 * let's reset the bus. 24916 */ 24917 if (i == SD_NDUMP_RETRIES/2) { 24918 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24919 (void) sd_send_polled_RQS(un); 24920 } 24921 } 24922 } 24923 24924 scsi_destroy_pkt(wr_pktp); 24925 mutex_enter(SD_MUTEX(un)); 24926 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 24927 mutex_exit(SD_MUTEX(un)); 24928 scsi_free_consistent_buf(wr_bp); 24929 } else { 24930 mutex_exit(SD_MUTEX(un)); 24931 } 24932 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 24933 return (err); 24934 } 24935 24936 /* 24937 * Function: sd_scsi_poll() 24938 * 24939 * Description: This is a wrapper for the scsi_poll call. 24940 * 24941 * Arguments: sd_lun - The unit structure 24942 * scsi_pkt - The scsi packet being sent to the device. 24943 * 24944 * Return Code: 0 - Command completed successfully with good status 24945 * -1 - Command failed. This could indicate a check condition 24946 * or other status value requiring recovery action. 24947 * 24948 * NOTE: This code is only called off sddump(). 24949 */ 24950 24951 static int 24952 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 24953 { 24954 int status; 24955 24956 ASSERT(un != NULL); 24957 ASSERT(!mutex_owned(SD_MUTEX(un))); 24958 ASSERT(pktp != NULL); 24959 24960 status = SD_SUCCESS; 24961 24962 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 24963 pktp->pkt_flags |= un->un_tagflags; 24964 pktp->pkt_flags &= ~FLAG_NODISCON; 24965 } 24966 24967 status = sd_ddi_scsi_poll(pktp); 24968 /* 24969 * Scsi_poll returns 0 (success) if the command completes and the 24970 * status block is STATUS_GOOD. We should only check errors if this 24971 * condition is not true. Even then we should send our own request 24972 * sense packet only if we have a check condition and auto 24973 * request sense has not been performed by the hba. 24974 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 24975 */ 24976 if ((status != SD_SUCCESS) && 24977 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 24978 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 24979 (pktp->pkt_reason != CMD_DEV_GONE)) 24980 (void) sd_send_polled_RQS(un); 24981 24982 return (status); 24983 } 24984 24985 /* 24986 * Function: sd_send_polled_RQS() 24987 * 24988 * Description: This sends the request sense command to a device. 24989 * 24990 * Arguments: sd_lun - The unit structure 24991 * 24992 * Return Code: 0 - Command completed successfully with good status 24993 * -1 - Command failed. 24994 * 24995 */ 24996 24997 static int 24998 sd_send_polled_RQS(struct sd_lun *un) 24999 { 25000 int ret_val; 25001 struct scsi_pkt *rqs_pktp; 25002 struct buf *rqs_bp; 25003 25004 ASSERT(un != NULL); 25005 ASSERT(!mutex_owned(SD_MUTEX(un))); 25006 25007 ret_val = SD_SUCCESS; 25008 25009 rqs_pktp = un->un_rqs_pktp; 25010 rqs_bp = un->un_rqs_bp; 25011 25012 mutex_enter(SD_MUTEX(un)); 25013 25014 if (un->un_sense_isbusy) { 25015 ret_val = SD_FAILURE; 25016 mutex_exit(SD_MUTEX(un)); 25017 return (ret_val); 25018 } 25019 25020 /* 25021 * If the request sense buffer (and packet) is not in use, 25022 * let's set the un_sense_isbusy and send our packet 25023 */ 25024 un->un_sense_isbusy = 1; 25025 rqs_pktp->pkt_resid = 0; 25026 rqs_pktp->pkt_reason = 0; 25027 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25028 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25029 25030 mutex_exit(SD_MUTEX(un)); 25031 25032 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25033 " 0x%p\n", rqs_bp->b_un.b_addr); 25034 25035 /* 25036 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25037 * axle - it has a call into us! 25038 */ 25039 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25040 SD_INFO(SD_LOG_COMMON, un, 25041 "sd_send_polled_RQS: RQS failed\n"); 25042 } 25043 25044 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25045 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25046 25047 mutex_enter(SD_MUTEX(un)); 25048 un->un_sense_isbusy = 0; 25049 mutex_exit(SD_MUTEX(un)); 25050 25051 return (ret_val); 25052 } 25053 25054 /* 25055 * Defines needed for localized version of the scsi_poll routine. 25056 */ 25057 #define CSEC 10000 /* usecs */ 25058 #define SEC_TO_CSEC (1000000/CSEC) 25059 25060 /* 25061 * Function: sd_ddi_scsi_poll() 25062 * 25063 * Description: Localized version of the scsi_poll routine. The purpose is to 25064 * send a scsi_pkt to a device as a polled command. This version 25065 * is to ensure more robust handling of transport errors. 25066 * Specifically this routine cures not ready, coming ready 25067 * transition for power up and reset of sonoma's. This can take 25068 * up to 45 seconds for power-on and 20 seconds for reset of a 25069 * sonoma lun. 25070 * 25071 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25072 * 25073 * Return Code: 0 - Command completed successfully with good status 25074 * -1 - Command failed. 25075 * 25076 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25077 * be fixed (removing this code), we need to determine how to handle the 25078 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25079 * 25080 * NOTE: This code is only called off sddump(). 25081 */ 25082 static int 25083 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25084 { 25085 int rval = -1; 25086 int savef; 25087 long savet; 25088 void (*savec)(); 25089 int timeout; 25090 int busy_count; 25091 int poll_delay; 25092 int rc; 25093 uint8_t *sensep; 25094 struct scsi_arq_status *arqstat; 25095 extern int do_polled_io; 25096 25097 ASSERT(pkt->pkt_scbp); 25098 25099 /* 25100 * save old flags.. 25101 */ 25102 savef = pkt->pkt_flags; 25103 savec = pkt->pkt_comp; 25104 savet = pkt->pkt_time; 25105 25106 pkt->pkt_flags |= FLAG_NOINTR; 25107 25108 /* 25109 * XXX there is nothing in the SCSA spec that states that we should not 25110 * do a callback for polled cmds; however, removing this will break sd 25111 * and probably other target drivers 25112 */ 25113 pkt->pkt_comp = NULL; 25114 25115 /* 25116 * we don't like a polled command without timeout. 25117 * 60 seconds seems long enough. 25118 */ 25119 if (pkt->pkt_time == 0) 25120 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25121 25122 /* 25123 * Send polled cmd. 25124 * 25125 * We do some error recovery for various errors. Tran_busy, 25126 * queue full, and non-dispatched commands are retried every 10 msec. 25127 * as they are typically transient failures. Busy status and Not 25128 * Ready are retried every second as this status takes a while to 25129 * change. 25130 */ 25131 timeout = pkt->pkt_time * SEC_TO_CSEC; 25132 25133 for (busy_count = 0; busy_count < timeout; busy_count++) { 25134 /* 25135 * Initialize pkt status variables. 25136 */ 25137 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25138 25139 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25140 if (rc != TRAN_BUSY) { 25141 /* Transport failed - give up. */ 25142 break; 25143 } else { 25144 /* Transport busy - try again. */ 25145 poll_delay = 1 * CSEC; /* 10 msec. */ 25146 } 25147 } else { 25148 /* 25149 * Transport accepted - check pkt status. 25150 */ 25151 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25152 if ((pkt->pkt_reason == CMD_CMPLT) && 25153 (rc == STATUS_CHECK) && 25154 (pkt->pkt_state & STATE_ARQ_DONE)) { 25155 arqstat = 25156 (struct scsi_arq_status *)(pkt->pkt_scbp); 25157 sensep = (uint8_t *)&arqstat->sts_sensedata; 25158 } else { 25159 sensep = NULL; 25160 } 25161 25162 if ((pkt->pkt_reason == CMD_CMPLT) && 25163 (rc == STATUS_GOOD)) { 25164 /* No error - we're done */ 25165 rval = 0; 25166 break; 25167 25168 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25169 /* Lost connection - give up */ 25170 break; 25171 25172 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25173 (pkt->pkt_state == 0)) { 25174 /* Pkt not dispatched - try again. */ 25175 poll_delay = 1 * CSEC; /* 10 msec. */ 25176 25177 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25178 (rc == STATUS_QFULL)) { 25179 /* Queue full - try again. */ 25180 poll_delay = 1 * CSEC; /* 10 msec. */ 25181 25182 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25183 (rc == STATUS_BUSY)) { 25184 /* Busy - try again. */ 25185 poll_delay = 100 * CSEC; /* 1 sec. */ 25186 busy_count += (SEC_TO_CSEC - 1); 25187 25188 } else if ((sensep != NULL) && 25189 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25190 /* 25191 * Unit Attention - try again. 25192 * Pretend it took 1 sec. 25193 * NOTE: 'continue' avoids poll_delay 25194 */ 25195 busy_count += (SEC_TO_CSEC - 1); 25196 continue; 25197 25198 } else if ((sensep != NULL) && 25199 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25200 (scsi_sense_asc(sensep) == 0x04) && 25201 (scsi_sense_ascq(sensep) == 0x01)) { 25202 /* 25203 * Not ready -> ready - try again. 25204 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25205 * ...same as STATUS_BUSY 25206 */ 25207 poll_delay = 100 * CSEC; /* 1 sec. */ 25208 busy_count += (SEC_TO_CSEC - 1); 25209 25210 } else { 25211 /* BAD status - give up. */ 25212 break; 25213 } 25214 } 25215 25216 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25217 !do_polled_io) { 25218 delay(drv_usectohz(poll_delay)); 25219 } else { 25220 /* we busy wait during cpr_dump or interrupt threads */ 25221 drv_usecwait(poll_delay); 25222 } 25223 } 25224 25225 pkt->pkt_flags = savef; 25226 pkt->pkt_comp = savec; 25227 pkt->pkt_time = savet; 25228 25229 /* return on error */ 25230 if (rval) 25231 return (rval); 25232 25233 /* 25234 * This is not a performance critical code path. 25235 * 25236 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25237 * issues associated with looking at DMA memory prior to 25238 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25239 */ 25240 scsi_sync_pkt(pkt); 25241 return (0); 25242 } 25243 25244 25245 25246 /* 25247 * Function: sd_persistent_reservation_in_read_keys 25248 * 25249 * Description: This routine is the driver entry point for handling CD-ROM 25250 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25251 * by sending the SCSI-3 PRIN commands to the device. 25252 * Processes the read keys command response by copying the 25253 * reservation key information into the user provided buffer. 25254 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25255 * 25256 * Arguments: un - Pointer to soft state struct for the target. 25257 * usrp - user provided pointer to multihost Persistent In Read 25258 * Keys structure (mhioc_inkeys_t) 25259 * flag - this argument is a pass through to ddi_copyxxx() 25260 * directly from the mode argument of ioctl(). 25261 * 25262 * Return Code: 0 - Success 25263 * EACCES 25264 * ENOTSUP 25265 * errno return code from sd_send_scsi_cmd() 25266 * 25267 * Context: Can sleep. Does not return until command is completed. 25268 */ 25269 25270 static int 25271 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25272 mhioc_inkeys_t *usrp, int flag) 25273 { 25274 #ifdef _MULTI_DATAMODEL 25275 struct mhioc_key_list32 li32; 25276 #endif 25277 sd_prin_readkeys_t *in; 25278 mhioc_inkeys_t *ptr; 25279 mhioc_key_list_t li; 25280 uchar_t *data_bufp; 25281 int data_len; 25282 int rval = 0; 25283 size_t copysz; 25284 sd_ssc_t *ssc; 25285 25286 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25287 return (EINVAL); 25288 } 25289 bzero(&li, sizeof (mhioc_key_list_t)); 25290 25291 ssc = sd_ssc_init(un); 25292 25293 /* 25294 * Get the listsize from user 25295 */ 25296 #ifdef _MULTI_DATAMODEL 25297 25298 switch (ddi_model_convert_from(flag & FMODELS)) { 25299 case DDI_MODEL_ILP32: 25300 copysz = sizeof (struct mhioc_key_list32); 25301 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25302 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25303 "sd_persistent_reservation_in_read_keys: " 25304 "failed ddi_copyin: mhioc_key_list32_t\n"); 25305 rval = EFAULT; 25306 goto done; 25307 } 25308 li.listsize = li32.listsize; 25309 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25310 break; 25311 25312 case DDI_MODEL_NONE: 25313 copysz = sizeof (mhioc_key_list_t); 25314 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25315 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25316 "sd_persistent_reservation_in_read_keys: " 25317 "failed ddi_copyin: mhioc_key_list_t\n"); 25318 rval = EFAULT; 25319 goto done; 25320 } 25321 break; 25322 } 25323 25324 #else /* ! _MULTI_DATAMODEL */ 25325 copysz = sizeof (mhioc_key_list_t); 25326 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25327 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25328 "sd_persistent_reservation_in_read_keys: " 25329 "failed ddi_copyin: mhioc_key_list_t\n"); 25330 rval = EFAULT; 25331 goto done; 25332 } 25333 #endif 25334 25335 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25336 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25337 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25338 25339 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25340 data_len, data_bufp); 25341 if (rval != 0) { 25342 if (rval == EIO) 25343 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25344 else 25345 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25346 goto done; 25347 } 25348 in = (sd_prin_readkeys_t *)data_bufp; 25349 ptr->generation = BE_32(in->generation); 25350 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25351 25352 /* 25353 * Return the min(listsize, listlen) keys 25354 */ 25355 #ifdef _MULTI_DATAMODEL 25356 25357 switch (ddi_model_convert_from(flag & FMODELS)) { 25358 case DDI_MODEL_ILP32: 25359 li32.listlen = li.listlen; 25360 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25361 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25362 "sd_persistent_reservation_in_read_keys: " 25363 "failed ddi_copyout: mhioc_key_list32_t\n"); 25364 rval = EFAULT; 25365 goto done; 25366 } 25367 break; 25368 25369 case DDI_MODEL_NONE: 25370 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25371 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25372 "sd_persistent_reservation_in_read_keys: " 25373 "failed ddi_copyout: mhioc_key_list_t\n"); 25374 rval = EFAULT; 25375 goto done; 25376 } 25377 break; 25378 } 25379 25380 #else /* ! _MULTI_DATAMODEL */ 25381 25382 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25383 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25384 "sd_persistent_reservation_in_read_keys: " 25385 "failed ddi_copyout: mhioc_key_list_t\n"); 25386 rval = EFAULT; 25387 goto done; 25388 } 25389 25390 #endif /* _MULTI_DATAMODEL */ 25391 25392 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25393 li.listsize * MHIOC_RESV_KEY_SIZE); 25394 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25395 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25396 "sd_persistent_reservation_in_read_keys: " 25397 "failed ddi_copyout: keylist\n"); 25398 rval = EFAULT; 25399 } 25400 done: 25401 sd_ssc_fini(ssc); 25402 kmem_free(data_bufp, data_len); 25403 return (rval); 25404 } 25405 25406 25407 /* 25408 * Function: sd_persistent_reservation_in_read_resv 25409 * 25410 * Description: This routine is the driver entry point for handling CD-ROM 25411 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25412 * by sending the SCSI-3 PRIN commands to the device. 25413 * Process the read persistent reservations command response by 25414 * copying the reservation information into the user provided 25415 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25416 * 25417 * Arguments: un - Pointer to soft state struct for the target. 25418 * usrp - user provided pointer to multihost Persistent In Read 25419 * Keys structure (mhioc_inkeys_t) 25420 * flag - this argument is a pass through to ddi_copyxxx() 25421 * directly from the mode argument of ioctl(). 25422 * 25423 * Return Code: 0 - Success 25424 * EACCES 25425 * ENOTSUP 25426 * errno return code from sd_send_scsi_cmd() 25427 * 25428 * Context: Can sleep. Does not return until command is completed. 25429 */ 25430 25431 static int 25432 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25433 mhioc_inresvs_t *usrp, int flag) 25434 { 25435 #ifdef _MULTI_DATAMODEL 25436 struct mhioc_resv_desc_list32 resvlist32; 25437 #endif 25438 sd_prin_readresv_t *in; 25439 mhioc_inresvs_t *ptr; 25440 sd_readresv_desc_t *readresv_ptr; 25441 mhioc_resv_desc_list_t resvlist; 25442 mhioc_resv_desc_t resvdesc; 25443 uchar_t *data_bufp = NULL; 25444 int data_len; 25445 int rval = 0; 25446 int i; 25447 size_t copysz; 25448 mhioc_resv_desc_t *bufp; 25449 sd_ssc_t *ssc; 25450 25451 if ((ptr = usrp) == NULL) { 25452 return (EINVAL); 25453 } 25454 25455 ssc = sd_ssc_init(un); 25456 25457 /* 25458 * Get the listsize from user 25459 */ 25460 #ifdef _MULTI_DATAMODEL 25461 switch (ddi_model_convert_from(flag & FMODELS)) { 25462 case DDI_MODEL_ILP32: 25463 copysz = sizeof (struct mhioc_resv_desc_list32); 25464 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25465 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25466 "sd_persistent_reservation_in_read_resv: " 25467 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25468 rval = EFAULT; 25469 goto done; 25470 } 25471 resvlist.listsize = resvlist32.listsize; 25472 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25473 break; 25474 25475 case DDI_MODEL_NONE: 25476 copysz = sizeof (mhioc_resv_desc_list_t); 25477 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25478 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25479 "sd_persistent_reservation_in_read_resv: " 25480 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25481 rval = EFAULT; 25482 goto done; 25483 } 25484 break; 25485 } 25486 #else /* ! _MULTI_DATAMODEL */ 25487 copysz = sizeof (mhioc_resv_desc_list_t); 25488 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25489 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25490 "sd_persistent_reservation_in_read_resv: " 25491 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25492 rval = EFAULT; 25493 goto done; 25494 } 25495 #endif /* ! _MULTI_DATAMODEL */ 25496 25497 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25498 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25499 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25500 25501 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25502 data_len, data_bufp); 25503 if (rval != 0) { 25504 if (rval == EIO) 25505 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25506 else 25507 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25508 goto done; 25509 } 25510 in = (sd_prin_readresv_t *)data_bufp; 25511 ptr->generation = BE_32(in->generation); 25512 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25513 25514 /* 25515 * Return the min(listsize, listlen( keys 25516 */ 25517 #ifdef _MULTI_DATAMODEL 25518 25519 switch (ddi_model_convert_from(flag & FMODELS)) { 25520 case DDI_MODEL_ILP32: 25521 resvlist32.listlen = resvlist.listlen; 25522 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25523 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25524 "sd_persistent_reservation_in_read_resv: " 25525 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25526 rval = EFAULT; 25527 goto done; 25528 } 25529 break; 25530 25531 case DDI_MODEL_NONE: 25532 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25533 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25534 "sd_persistent_reservation_in_read_resv: " 25535 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25536 rval = EFAULT; 25537 goto done; 25538 } 25539 break; 25540 } 25541 25542 #else /* ! _MULTI_DATAMODEL */ 25543 25544 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25545 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25546 "sd_persistent_reservation_in_read_resv: " 25547 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25548 rval = EFAULT; 25549 goto done; 25550 } 25551 25552 #endif /* ! _MULTI_DATAMODEL */ 25553 25554 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25555 bufp = resvlist.list; 25556 copysz = sizeof (mhioc_resv_desc_t); 25557 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25558 i++, readresv_ptr++, bufp++) { 25559 25560 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25561 MHIOC_RESV_KEY_SIZE); 25562 resvdesc.type = readresv_ptr->type; 25563 resvdesc.scope = readresv_ptr->scope; 25564 resvdesc.scope_specific_addr = 25565 BE_32(readresv_ptr->scope_specific_addr); 25566 25567 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25568 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25569 "sd_persistent_reservation_in_read_resv: " 25570 "failed ddi_copyout: resvlist\n"); 25571 rval = EFAULT; 25572 goto done; 25573 } 25574 } 25575 done: 25576 sd_ssc_fini(ssc); 25577 /* only if data_bufp is allocated, we need to free it */ 25578 if (data_bufp) { 25579 kmem_free(data_bufp, data_len); 25580 } 25581 return (rval); 25582 } 25583 25584 25585 /* 25586 * Function: sr_change_blkmode() 25587 * 25588 * Description: This routine is the driver entry point for handling CD-ROM 25589 * block mode ioctl requests. Support for returning and changing 25590 * the current block size in use by the device is implemented. The 25591 * LBA size is changed via a MODE SELECT Block Descriptor. 25592 * 25593 * This routine issues a mode sense with an allocation length of 25594 * 12 bytes for the mode page header and a single block descriptor. 25595 * 25596 * Arguments: dev - the device 'dev_t' 25597 * cmd - the request type; one of CDROMGBLKMODE (get) or 25598 * CDROMSBLKMODE (set) 25599 * data - current block size or requested block size 25600 * flag - this argument is a pass through to ddi_copyxxx() directly 25601 * from the mode argument of ioctl(). 25602 * 25603 * Return Code: the code returned by sd_send_scsi_cmd() 25604 * EINVAL if invalid arguments are provided 25605 * EFAULT if ddi_copyxxx() fails 25606 * ENXIO if fail ddi_get_soft_state 25607 * EIO if invalid mode sense block descriptor length 25608 * 25609 */ 25610 25611 static int 25612 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25613 { 25614 struct sd_lun *un = NULL; 25615 struct mode_header *sense_mhp, *select_mhp; 25616 struct block_descriptor *sense_desc, *select_desc; 25617 int current_bsize; 25618 int rval = EINVAL; 25619 uchar_t *sense = NULL; 25620 uchar_t *select = NULL; 25621 sd_ssc_t *ssc; 25622 25623 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25624 25625 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25626 return (ENXIO); 25627 } 25628 25629 /* 25630 * The block length is changed via the Mode Select block descriptor, the 25631 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25632 * required as part of this routine. Therefore the mode sense allocation 25633 * length is specified to be the length of a mode page header and a 25634 * block descriptor. 25635 */ 25636 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25637 25638 ssc = sd_ssc_init(un); 25639 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25640 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25641 sd_ssc_fini(ssc); 25642 if (rval != 0) { 25643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25644 "sr_change_blkmode: Mode Sense Failed\n"); 25645 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25646 return (rval); 25647 } 25648 25649 /* Check the block descriptor len to handle only 1 block descriptor */ 25650 sense_mhp = (struct mode_header *)sense; 25651 if ((sense_mhp->bdesc_length == 0) || 25652 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25653 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25654 "sr_change_blkmode: Mode Sense returned invalid block" 25655 " descriptor length\n"); 25656 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25657 return (EIO); 25658 } 25659 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25660 current_bsize = ((sense_desc->blksize_hi << 16) | 25661 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25662 25663 /* Process command */ 25664 switch (cmd) { 25665 case CDROMGBLKMODE: 25666 /* Return the block size obtained during the mode sense */ 25667 if (ddi_copyout(¤t_bsize, (void *)data, 25668 sizeof (int), flag) != 0) 25669 rval = EFAULT; 25670 break; 25671 case CDROMSBLKMODE: 25672 /* Validate the requested block size */ 25673 switch (data) { 25674 case CDROM_BLK_512: 25675 case CDROM_BLK_1024: 25676 case CDROM_BLK_2048: 25677 case CDROM_BLK_2056: 25678 case CDROM_BLK_2336: 25679 case CDROM_BLK_2340: 25680 case CDROM_BLK_2352: 25681 case CDROM_BLK_2368: 25682 case CDROM_BLK_2448: 25683 case CDROM_BLK_2646: 25684 case CDROM_BLK_2647: 25685 break; 25686 default: 25687 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25688 "sr_change_blkmode: " 25689 "Block Size '%ld' Not Supported\n", data); 25690 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25691 return (EINVAL); 25692 } 25693 25694 /* 25695 * The current block size matches the requested block size so 25696 * there is no need to send the mode select to change the size 25697 */ 25698 if (current_bsize == data) { 25699 break; 25700 } 25701 25702 /* Build the select data for the requested block size */ 25703 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25704 select_mhp = (struct mode_header *)select; 25705 select_desc = 25706 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25707 /* 25708 * The LBA size is changed via the block descriptor, so the 25709 * descriptor is built according to the user data 25710 */ 25711 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25712 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25713 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25714 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25715 25716 /* Send the mode select for the requested block size */ 25717 ssc = sd_ssc_init(un); 25718 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25719 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25720 SD_PATH_STANDARD); 25721 sd_ssc_fini(ssc); 25722 if (rval != 0) { 25723 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25724 "sr_change_blkmode: Mode Select Failed\n"); 25725 /* 25726 * The mode select failed for the requested block size, 25727 * so reset the data for the original block size and 25728 * send it to the target. The error is indicated by the 25729 * return value for the failed mode select. 25730 */ 25731 select_desc->blksize_hi = sense_desc->blksize_hi; 25732 select_desc->blksize_mid = sense_desc->blksize_mid; 25733 select_desc->blksize_lo = sense_desc->blksize_lo; 25734 ssc = sd_ssc_init(un); 25735 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25736 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25737 SD_PATH_STANDARD); 25738 sd_ssc_fini(ssc); 25739 } else { 25740 ASSERT(!mutex_owned(SD_MUTEX(un))); 25741 mutex_enter(SD_MUTEX(un)); 25742 sd_update_block_info(un, (uint32_t)data, 0); 25743 mutex_exit(SD_MUTEX(un)); 25744 } 25745 break; 25746 default: 25747 /* should not reach here, but check anyway */ 25748 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25749 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25750 rval = EINVAL; 25751 break; 25752 } 25753 25754 if (select) { 25755 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25756 } 25757 if (sense) { 25758 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25759 } 25760 return (rval); 25761 } 25762 25763 25764 /* 25765 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25766 * implement driver support for getting and setting the CD speed. The command 25767 * set used will be based on the device type. If the device has not been 25768 * identified as MMC the Toshiba vendor specific mode page will be used. If 25769 * the device is MMC but does not support the Real Time Streaming feature 25770 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25771 * be used to read the speed. 25772 */ 25773 25774 /* 25775 * Function: sr_change_speed() 25776 * 25777 * Description: This routine is the driver entry point for handling CD-ROM 25778 * drive speed ioctl requests for devices supporting the Toshiba 25779 * vendor specific drive speed mode page. Support for returning 25780 * and changing the current drive speed in use by the device is 25781 * implemented. 25782 * 25783 * Arguments: dev - the device 'dev_t' 25784 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25785 * CDROMSDRVSPEED (set) 25786 * data - current drive speed or requested drive speed 25787 * flag - this argument is a pass through to ddi_copyxxx() directly 25788 * from the mode argument of ioctl(). 25789 * 25790 * Return Code: the code returned by sd_send_scsi_cmd() 25791 * EINVAL if invalid arguments are provided 25792 * EFAULT if ddi_copyxxx() fails 25793 * ENXIO if fail ddi_get_soft_state 25794 * EIO if invalid mode sense block descriptor length 25795 */ 25796 25797 static int 25798 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25799 { 25800 struct sd_lun *un = NULL; 25801 struct mode_header *sense_mhp, *select_mhp; 25802 struct mode_speed *sense_page, *select_page; 25803 int current_speed; 25804 int rval = EINVAL; 25805 int bd_len; 25806 uchar_t *sense = NULL; 25807 uchar_t *select = NULL; 25808 sd_ssc_t *ssc; 25809 25810 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25811 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25812 return (ENXIO); 25813 } 25814 25815 /* 25816 * Note: The drive speed is being modified here according to a Toshiba 25817 * vendor specific mode page (0x31). 25818 */ 25819 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25820 25821 ssc = sd_ssc_init(un); 25822 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25823 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25824 SD_PATH_STANDARD); 25825 sd_ssc_fini(ssc); 25826 if (rval != 0) { 25827 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25828 "sr_change_speed: Mode Sense Failed\n"); 25829 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25830 return (rval); 25831 } 25832 sense_mhp = (struct mode_header *)sense; 25833 25834 /* Check the block descriptor len to handle only 1 block descriptor */ 25835 bd_len = sense_mhp->bdesc_length; 25836 if (bd_len > MODE_BLK_DESC_LENGTH) { 25837 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25838 "sr_change_speed: Mode Sense returned invalid block " 25839 "descriptor length\n"); 25840 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25841 return (EIO); 25842 } 25843 25844 sense_page = (struct mode_speed *) 25845 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25846 current_speed = sense_page->speed; 25847 25848 /* Process command */ 25849 switch (cmd) { 25850 case CDROMGDRVSPEED: 25851 /* Return the drive speed obtained during the mode sense */ 25852 if (current_speed == 0x2) { 25853 current_speed = CDROM_TWELVE_SPEED; 25854 } 25855 if (ddi_copyout(¤t_speed, (void *)data, 25856 sizeof (int), flag) != 0) { 25857 rval = EFAULT; 25858 } 25859 break; 25860 case CDROMSDRVSPEED: 25861 /* Validate the requested drive speed */ 25862 switch ((uchar_t)data) { 25863 case CDROM_TWELVE_SPEED: 25864 data = 0x2; 25865 /*FALLTHROUGH*/ 25866 case CDROM_NORMAL_SPEED: 25867 case CDROM_DOUBLE_SPEED: 25868 case CDROM_QUAD_SPEED: 25869 case CDROM_MAXIMUM_SPEED: 25870 break; 25871 default: 25872 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25873 "sr_change_speed: " 25874 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 25875 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25876 return (EINVAL); 25877 } 25878 25879 /* 25880 * The current drive speed matches the requested drive speed so 25881 * there is no need to send the mode select to change the speed 25882 */ 25883 if (current_speed == data) { 25884 break; 25885 } 25886 25887 /* Build the select data for the requested drive speed */ 25888 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25889 select_mhp = (struct mode_header *)select; 25890 select_mhp->bdesc_length = 0; 25891 select_page = 25892 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25893 select_page = 25894 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25895 select_page->mode_page.code = CDROM_MODE_SPEED; 25896 select_page->mode_page.length = 2; 25897 select_page->speed = (uchar_t)data; 25898 25899 /* Send the mode select for the requested block size */ 25900 ssc = sd_ssc_init(un); 25901 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25902 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25903 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25904 sd_ssc_fini(ssc); 25905 if (rval != 0) { 25906 /* 25907 * The mode select failed for the requested drive speed, 25908 * so reset the data for the original drive speed and 25909 * send it to the target. The error is indicated by the 25910 * return value for the failed mode select. 25911 */ 25912 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25913 "sr_drive_speed: Mode Select Failed\n"); 25914 select_page->speed = sense_page->speed; 25915 ssc = sd_ssc_init(un); 25916 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25917 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25918 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25919 sd_ssc_fini(ssc); 25920 } 25921 break; 25922 default: 25923 /* should not reach here, but check anyway */ 25924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25925 "sr_change_speed: Command '%x' Not Supported\n", cmd); 25926 rval = EINVAL; 25927 break; 25928 } 25929 25930 if (select) { 25931 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 25932 } 25933 if (sense) { 25934 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25935 } 25936 25937 return (rval); 25938 } 25939 25940 25941 /* 25942 * Function: sr_atapi_change_speed() 25943 * 25944 * Description: This routine is the driver entry point for handling CD-ROM 25945 * drive speed ioctl requests for MMC devices that do not support 25946 * the Real Time Streaming feature (0x107). 25947 * 25948 * Note: This routine will use the SET SPEED command which may not 25949 * be supported by all devices. 25950 * 25951 * Arguments: dev- the device 'dev_t' 25952 * cmd- the request type; one of CDROMGDRVSPEED (get) or 25953 * CDROMSDRVSPEED (set) 25954 * data- current drive speed or requested drive speed 25955 * flag- this argument is a pass through to ddi_copyxxx() directly 25956 * from the mode argument of ioctl(). 25957 * 25958 * Return Code: the code returned by sd_send_scsi_cmd() 25959 * EINVAL if invalid arguments are provided 25960 * EFAULT if ddi_copyxxx() fails 25961 * ENXIO if fail ddi_get_soft_state 25962 * EIO if invalid mode sense block descriptor length 25963 */ 25964 25965 static int 25966 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25967 { 25968 struct sd_lun *un; 25969 struct uscsi_cmd *com = NULL; 25970 struct mode_header_grp2 *sense_mhp; 25971 uchar_t *sense_page; 25972 uchar_t *sense = NULL; 25973 char cdb[CDB_GROUP5]; 25974 int bd_len; 25975 int current_speed = 0; 25976 int max_speed = 0; 25977 int rval; 25978 sd_ssc_t *ssc; 25979 25980 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25981 25982 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25983 return (ENXIO); 25984 } 25985 25986 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 25987 25988 ssc = sd_ssc_init(un); 25989 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 25990 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 25991 SD_PATH_STANDARD); 25992 sd_ssc_fini(ssc); 25993 if (rval != 0) { 25994 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25995 "sr_atapi_change_speed: Mode Sense Failed\n"); 25996 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25997 return (rval); 25998 } 25999 26000 /* Check the block descriptor len to handle only 1 block descriptor */ 26001 sense_mhp = (struct mode_header_grp2 *)sense; 26002 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26003 if (bd_len > MODE_BLK_DESC_LENGTH) { 26004 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26005 "sr_atapi_change_speed: Mode Sense returned invalid " 26006 "block descriptor length\n"); 26007 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26008 return (EIO); 26009 } 26010 26011 /* Calculate the current and maximum drive speeds */ 26012 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26013 current_speed = (sense_page[14] << 8) | sense_page[15]; 26014 max_speed = (sense_page[8] << 8) | sense_page[9]; 26015 26016 /* Process the command */ 26017 switch (cmd) { 26018 case CDROMGDRVSPEED: 26019 current_speed /= SD_SPEED_1X; 26020 if (ddi_copyout(¤t_speed, (void *)data, 26021 sizeof (int), flag) != 0) 26022 rval = EFAULT; 26023 break; 26024 case CDROMSDRVSPEED: 26025 /* Convert the speed code to KB/sec */ 26026 switch ((uchar_t)data) { 26027 case CDROM_NORMAL_SPEED: 26028 current_speed = SD_SPEED_1X; 26029 break; 26030 case CDROM_DOUBLE_SPEED: 26031 current_speed = 2 * SD_SPEED_1X; 26032 break; 26033 case CDROM_QUAD_SPEED: 26034 current_speed = 4 * SD_SPEED_1X; 26035 break; 26036 case CDROM_TWELVE_SPEED: 26037 current_speed = 12 * SD_SPEED_1X; 26038 break; 26039 case CDROM_MAXIMUM_SPEED: 26040 current_speed = 0xffff; 26041 break; 26042 default: 26043 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26044 "sr_atapi_change_speed: invalid drive speed %d\n", 26045 (uchar_t)data); 26046 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26047 return (EINVAL); 26048 } 26049 26050 /* Check the request against the drive's max speed. */ 26051 if (current_speed != 0xffff) { 26052 if (current_speed > max_speed) { 26053 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26054 return (EINVAL); 26055 } 26056 } 26057 26058 /* 26059 * Build and send the SET SPEED command 26060 * 26061 * Note: The SET SPEED (0xBB) command used in this routine is 26062 * obsolete per the SCSI MMC spec but still supported in the 26063 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26064 * therefore the command is still implemented in this routine. 26065 */ 26066 bzero(cdb, sizeof (cdb)); 26067 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26068 cdb[2] = (uchar_t)(current_speed >> 8); 26069 cdb[3] = (uchar_t)current_speed; 26070 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26071 com->uscsi_cdb = (caddr_t)cdb; 26072 com->uscsi_cdblen = CDB_GROUP5; 26073 com->uscsi_bufaddr = NULL; 26074 com->uscsi_buflen = 0; 26075 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26076 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26077 break; 26078 default: 26079 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26080 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26081 rval = EINVAL; 26082 } 26083 26084 if (sense) { 26085 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26086 } 26087 if (com) { 26088 kmem_free(com, sizeof (*com)); 26089 } 26090 return (rval); 26091 } 26092 26093 26094 /* 26095 * Function: sr_pause_resume() 26096 * 26097 * Description: This routine is the driver entry point for handling CD-ROM 26098 * pause/resume ioctl requests. This only affects the audio play 26099 * operation. 26100 * 26101 * Arguments: dev - the device 'dev_t' 26102 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26103 * for setting the resume bit of the cdb. 26104 * 26105 * Return Code: the code returned by sd_send_scsi_cmd() 26106 * EINVAL if invalid mode specified 26107 * 26108 */ 26109 26110 static int 26111 sr_pause_resume(dev_t dev, int cmd) 26112 { 26113 struct sd_lun *un; 26114 struct uscsi_cmd *com; 26115 char cdb[CDB_GROUP1]; 26116 int rval; 26117 26118 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26119 return (ENXIO); 26120 } 26121 26122 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26123 bzero(cdb, CDB_GROUP1); 26124 cdb[0] = SCMD_PAUSE_RESUME; 26125 switch (cmd) { 26126 case CDROMRESUME: 26127 cdb[8] = 1; 26128 break; 26129 case CDROMPAUSE: 26130 cdb[8] = 0; 26131 break; 26132 default: 26133 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26134 " Command '%x' Not Supported\n", cmd); 26135 rval = EINVAL; 26136 goto done; 26137 } 26138 26139 com->uscsi_cdb = cdb; 26140 com->uscsi_cdblen = CDB_GROUP1; 26141 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26142 26143 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26144 SD_PATH_STANDARD); 26145 26146 done: 26147 kmem_free(com, sizeof (*com)); 26148 return (rval); 26149 } 26150 26151 26152 /* 26153 * Function: sr_play_msf() 26154 * 26155 * Description: This routine is the driver entry point for handling CD-ROM 26156 * ioctl requests to output the audio signals at the specified 26157 * starting address and continue the audio play until the specified 26158 * ending address (CDROMPLAYMSF) The address is in Minute Second 26159 * Frame (MSF) format. 26160 * 26161 * Arguments: dev - the device 'dev_t' 26162 * data - pointer to user provided audio msf structure, 26163 * specifying start/end addresses. 26164 * flag - this argument is a pass through to ddi_copyxxx() 26165 * directly from the mode argument of ioctl(). 26166 * 26167 * Return Code: the code returned by sd_send_scsi_cmd() 26168 * EFAULT if ddi_copyxxx() fails 26169 * ENXIO if fail ddi_get_soft_state 26170 * EINVAL if data pointer is NULL 26171 */ 26172 26173 static int 26174 sr_play_msf(dev_t dev, caddr_t data, int flag) 26175 { 26176 struct sd_lun *un; 26177 struct uscsi_cmd *com; 26178 struct cdrom_msf msf_struct; 26179 struct cdrom_msf *msf = &msf_struct; 26180 char cdb[CDB_GROUP1]; 26181 int rval; 26182 26183 if (data == NULL) { 26184 return (EINVAL); 26185 } 26186 26187 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26188 return (ENXIO); 26189 } 26190 26191 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26192 return (EFAULT); 26193 } 26194 26195 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26196 bzero(cdb, CDB_GROUP1); 26197 cdb[0] = SCMD_PLAYAUDIO_MSF; 26198 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26199 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26200 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26201 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26202 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26203 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26204 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26205 } else { 26206 cdb[3] = msf->cdmsf_min0; 26207 cdb[4] = msf->cdmsf_sec0; 26208 cdb[5] = msf->cdmsf_frame0; 26209 cdb[6] = msf->cdmsf_min1; 26210 cdb[7] = msf->cdmsf_sec1; 26211 cdb[8] = msf->cdmsf_frame1; 26212 } 26213 com->uscsi_cdb = cdb; 26214 com->uscsi_cdblen = CDB_GROUP1; 26215 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26216 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26217 SD_PATH_STANDARD); 26218 kmem_free(com, sizeof (*com)); 26219 return (rval); 26220 } 26221 26222 26223 /* 26224 * Function: sr_play_trkind() 26225 * 26226 * Description: This routine is the driver entry point for handling CD-ROM 26227 * ioctl requests to output the audio signals at the specified 26228 * starting address and continue the audio play until the specified 26229 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26230 * format. 26231 * 26232 * Arguments: dev - the device 'dev_t' 26233 * data - pointer to user provided audio track/index structure, 26234 * specifying start/end addresses. 26235 * flag - this argument is a pass through to ddi_copyxxx() 26236 * directly from the mode argument of ioctl(). 26237 * 26238 * Return Code: the code returned by sd_send_scsi_cmd() 26239 * EFAULT if ddi_copyxxx() fails 26240 * ENXIO if fail ddi_get_soft_state 26241 * EINVAL if data pointer is NULL 26242 */ 26243 26244 static int 26245 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26246 { 26247 struct cdrom_ti ti_struct; 26248 struct cdrom_ti *ti = &ti_struct; 26249 struct uscsi_cmd *com = NULL; 26250 char cdb[CDB_GROUP1]; 26251 int rval; 26252 26253 if (data == NULL) { 26254 return (EINVAL); 26255 } 26256 26257 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26258 return (EFAULT); 26259 } 26260 26261 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26262 bzero(cdb, CDB_GROUP1); 26263 cdb[0] = SCMD_PLAYAUDIO_TI; 26264 cdb[4] = ti->cdti_trk0; 26265 cdb[5] = ti->cdti_ind0; 26266 cdb[7] = ti->cdti_trk1; 26267 cdb[8] = ti->cdti_ind1; 26268 com->uscsi_cdb = cdb; 26269 com->uscsi_cdblen = CDB_GROUP1; 26270 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26271 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26272 SD_PATH_STANDARD); 26273 kmem_free(com, sizeof (*com)); 26274 return (rval); 26275 } 26276 26277 26278 /* 26279 * Function: sr_read_all_subcodes() 26280 * 26281 * Description: This routine is the driver entry point for handling CD-ROM 26282 * ioctl requests to return raw subcode data while the target is 26283 * playing audio (CDROMSUBCODE). 26284 * 26285 * Arguments: dev - the device 'dev_t' 26286 * data - pointer to user provided cdrom subcode structure, 26287 * specifying the transfer length and address. 26288 * flag - this argument is a pass through to ddi_copyxxx() 26289 * directly from the mode argument of ioctl(). 26290 * 26291 * Return Code: the code returned by sd_send_scsi_cmd() 26292 * EFAULT if ddi_copyxxx() fails 26293 * ENXIO if fail ddi_get_soft_state 26294 * EINVAL if data pointer is NULL 26295 */ 26296 26297 static int 26298 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26299 { 26300 struct sd_lun *un = NULL; 26301 struct uscsi_cmd *com = NULL; 26302 struct cdrom_subcode *subcode = NULL; 26303 int rval; 26304 size_t buflen; 26305 char cdb[CDB_GROUP5]; 26306 26307 #ifdef _MULTI_DATAMODEL 26308 /* To support ILP32 applications in an LP64 world */ 26309 struct cdrom_subcode32 cdrom_subcode32; 26310 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26311 #endif 26312 if (data == NULL) { 26313 return (EINVAL); 26314 } 26315 26316 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26317 return (ENXIO); 26318 } 26319 26320 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26321 26322 #ifdef _MULTI_DATAMODEL 26323 switch (ddi_model_convert_from(flag & FMODELS)) { 26324 case DDI_MODEL_ILP32: 26325 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26326 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26327 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26328 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26329 return (EFAULT); 26330 } 26331 /* Convert the ILP32 uscsi data from the application to LP64 */ 26332 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26333 break; 26334 case DDI_MODEL_NONE: 26335 if (ddi_copyin(data, subcode, 26336 sizeof (struct cdrom_subcode), flag)) { 26337 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26338 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26339 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26340 return (EFAULT); 26341 } 26342 break; 26343 } 26344 #else /* ! _MULTI_DATAMODEL */ 26345 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26346 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26347 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26348 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26349 return (EFAULT); 26350 } 26351 #endif /* _MULTI_DATAMODEL */ 26352 26353 /* 26354 * Since MMC-2 expects max 3 bytes for length, check if the 26355 * length input is greater than 3 bytes 26356 */ 26357 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26358 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26359 "sr_read_all_subcodes: " 26360 "cdrom transfer length too large: %d (limit %d)\n", 26361 subcode->cdsc_length, 0xFFFFFF); 26362 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26363 return (EINVAL); 26364 } 26365 26366 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26367 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26368 bzero(cdb, CDB_GROUP5); 26369 26370 if (un->un_f_mmc_cap == TRUE) { 26371 cdb[0] = (char)SCMD_READ_CD; 26372 cdb[2] = (char)0xff; 26373 cdb[3] = (char)0xff; 26374 cdb[4] = (char)0xff; 26375 cdb[5] = (char)0xff; 26376 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26377 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26378 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26379 cdb[10] = 1; 26380 } else { 26381 /* 26382 * Note: A vendor specific command (0xDF) is being used her to 26383 * request a read of all subcodes. 26384 */ 26385 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26386 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26387 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26388 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26389 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26390 } 26391 com->uscsi_cdb = cdb; 26392 com->uscsi_cdblen = CDB_GROUP5; 26393 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26394 com->uscsi_buflen = buflen; 26395 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26396 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26397 SD_PATH_STANDARD); 26398 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26399 kmem_free(com, sizeof (*com)); 26400 return (rval); 26401 } 26402 26403 26404 /* 26405 * Function: sr_read_subchannel() 26406 * 26407 * Description: This routine is the driver entry point for handling CD-ROM 26408 * ioctl requests to return the Q sub-channel data of the CD 26409 * current position block. (CDROMSUBCHNL) The data includes the 26410 * track number, index number, absolute CD-ROM address (LBA or MSF 26411 * format per the user) , track relative CD-ROM address (LBA or MSF 26412 * format per the user), control data and audio status. 26413 * 26414 * Arguments: dev - the device 'dev_t' 26415 * data - pointer to user provided cdrom sub-channel structure 26416 * flag - this argument is a pass through to ddi_copyxxx() 26417 * directly from the mode argument of ioctl(). 26418 * 26419 * Return Code: the code returned by sd_send_scsi_cmd() 26420 * EFAULT if ddi_copyxxx() fails 26421 * ENXIO if fail ddi_get_soft_state 26422 * EINVAL if data pointer is NULL 26423 */ 26424 26425 static int 26426 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26427 { 26428 struct sd_lun *un; 26429 struct uscsi_cmd *com; 26430 struct cdrom_subchnl subchanel; 26431 struct cdrom_subchnl *subchnl = &subchanel; 26432 char cdb[CDB_GROUP1]; 26433 caddr_t buffer; 26434 int rval; 26435 26436 if (data == NULL) { 26437 return (EINVAL); 26438 } 26439 26440 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26441 (un->un_state == SD_STATE_OFFLINE)) { 26442 return (ENXIO); 26443 } 26444 26445 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26446 return (EFAULT); 26447 } 26448 26449 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26450 bzero(cdb, CDB_GROUP1); 26451 cdb[0] = SCMD_READ_SUBCHANNEL; 26452 /* Set the MSF bit based on the user requested address format */ 26453 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26454 /* 26455 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26456 * returned 26457 */ 26458 cdb[2] = 0x40; 26459 /* 26460 * Set byte 3 to specify the return data format. A value of 0x01 26461 * indicates that the CD-ROM current position should be returned. 26462 */ 26463 cdb[3] = 0x01; 26464 cdb[8] = 0x10; 26465 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26466 com->uscsi_cdb = cdb; 26467 com->uscsi_cdblen = CDB_GROUP1; 26468 com->uscsi_bufaddr = buffer; 26469 com->uscsi_buflen = 16; 26470 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26471 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26472 SD_PATH_STANDARD); 26473 if (rval != 0) { 26474 kmem_free(buffer, 16); 26475 kmem_free(com, sizeof (*com)); 26476 return (rval); 26477 } 26478 26479 /* Process the returned Q sub-channel data */ 26480 subchnl->cdsc_audiostatus = buffer[1]; 26481 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26482 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26483 subchnl->cdsc_trk = buffer[6]; 26484 subchnl->cdsc_ind = buffer[7]; 26485 if (subchnl->cdsc_format & CDROM_LBA) { 26486 subchnl->cdsc_absaddr.lba = 26487 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26488 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26489 subchnl->cdsc_reladdr.lba = 26490 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26491 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26492 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26493 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26494 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26495 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26496 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26497 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26498 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26499 } else { 26500 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26501 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26502 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26503 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26504 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26505 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26506 } 26507 kmem_free(buffer, 16); 26508 kmem_free(com, sizeof (*com)); 26509 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26510 != 0) { 26511 return (EFAULT); 26512 } 26513 return (rval); 26514 } 26515 26516 26517 /* 26518 * Function: sr_read_tocentry() 26519 * 26520 * Description: This routine is the driver entry point for handling CD-ROM 26521 * ioctl requests to read from the Table of Contents (TOC) 26522 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26523 * fields, the starting address (LBA or MSF format per the user) 26524 * and the data mode if the user specified track is a data track. 26525 * 26526 * Note: The READ HEADER (0x44) command used in this routine is 26527 * obsolete per the SCSI MMC spec but still supported in the 26528 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26529 * therefore the command is still implemented in this routine. 26530 * 26531 * Arguments: dev - the device 'dev_t' 26532 * data - pointer to user provided toc entry structure, 26533 * specifying the track # and the address format 26534 * (LBA or MSF). 26535 * flag - this argument is a pass through to ddi_copyxxx() 26536 * directly from the mode argument of ioctl(). 26537 * 26538 * Return Code: the code returned by sd_send_scsi_cmd() 26539 * EFAULT if ddi_copyxxx() fails 26540 * ENXIO if fail ddi_get_soft_state 26541 * EINVAL if data pointer is NULL 26542 */ 26543 26544 static int 26545 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26546 { 26547 struct sd_lun *un = NULL; 26548 struct uscsi_cmd *com; 26549 struct cdrom_tocentry toc_entry; 26550 struct cdrom_tocentry *entry = &toc_entry; 26551 caddr_t buffer; 26552 int rval; 26553 char cdb[CDB_GROUP1]; 26554 26555 if (data == NULL) { 26556 return (EINVAL); 26557 } 26558 26559 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26560 (un->un_state == SD_STATE_OFFLINE)) { 26561 return (ENXIO); 26562 } 26563 26564 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26565 return (EFAULT); 26566 } 26567 26568 /* Validate the requested track and address format */ 26569 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26570 return (EINVAL); 26571 } 26572 26573 if (entry->cdte_track == 0) { 26574 return (EINVAL); 26575 } 26576 26577 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26578 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26579 bzero(cdb, CDB_GROUP1); 26580 26581 cdb[0] = SCMD_READ_TOC; 26582 /* Set the MSF bit based on the user requested address format */ 26583 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26584 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26585 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26586 } else { 26587 cdb[6] = entry->cdte_track; 26588 } 26589 26590 /* 26591 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26592 * (4 byte TOC response header + 8 byte track descriptor) 26593 */ 26594 cdb[8] = 12; 26595 com->uscsi_cdb = cdb; 26596 com->uscsi_cdblen = CDB_GROUP1; 26597 com->uscsi_bufaddr = buffer; 26598 com->uscsi_buflen = 0x0C; 26599 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26600 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26601 SD_PATH_STANDARD); 26602 if (rval != 0) { 26603 kmem_free(buffer, 12); 26604 kmem_free(com, sizeof (*com)); 26605 return (rval); 26606 } 26607 26608 /* Process the toc entry */ 26609 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26610 entry->cdte_ctrl = (buffer[5] & 0x0F); 26611 if (entry->cdte_format & CDROM_LBA) { 26612 entry->cdte_addr.lba = 26613 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26614 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26615 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26616 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26617 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26618 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26619 /* 26620 * Send a READ TOC command using the LBA address format to get 26621 * the LBA for the track requested so it can be used in the 26622 * READ HEADER request 26623 * 26624 * Note: The MSF bit of the READ HEADER command specifies the 26625 * output format. The block address specified in that command 26626 * must be in LBA format. 26627 */ 26628 cdb[1] = 0; 26629 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26630 SD_PATH_STANDARD); 26631 if (rval != 0) { 26632 kmem_free(buffer, 12); 26633 kmem_free(com, sizeof (*com)); 26634 return (rval); 26635 } 26636 } else { 26637 entry->cdte_addr.msf.minute = buffer[9]; 26638 entry->cdte_addr.msf.second = buffer[10]; 26639 entry->cdte_addr.msf.frame = buffer[11]; 26640 /* 26641 * Send a READ TOC command using the LBA address format to get 26642 * the LBA for the track requested so it can be used in the 26643 * READ HEADER request 26644 * 26645 * Note: The MSF bit of the READ HEADER command specifies the 26646 * output format. The block address specified in that command 26647 * must be in LBA format. 26648 */ 26649 cdb[1] = 0; 26650 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26651 SD_PATH_STANDARD); 26652 if (rval != 0) { 26653 kmem_free(buffer, 12); 26654 kmem_free(com, sizeof (*com)); 26655 return (rval); 26656 } 26657 } 26658 26659 /* 26660 * Build and send the READ HEADER command to determine the data mode of 26661 * the user specified track. 26662 */ 26663 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26664 (entry->cdte_track != CDROM_LEADOUT)) { 26665 bzero(cdb, CDB_GROUP1); 26666 cdb[0] = SCMD_READ_HEADER; 26667 cdb[2] = buffer[8]; 26668 cdb[3] = buffer[9]; 26669 cdb[4] = buffer[10]; 26670 cdb[5] = buffer[11]; 26671 cdb[8] = 0x08; 26672 com->uscsi_buflen = 0x08; 26673 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26674 SD_PATH_STANDARD); 26675 if (rval == 0) { 26676 entry->cdte_datamode = buffer[0]; 26677 } else { 26678 /* 26679 * READ HEADER command failed, since this is 26680 * obsoleted in one spec, its better to return 26681 * -1 for an invlid track so that we can still 26682 * receive the rest of the TOC data. 26683 */ 26684 entry->cdte_datamode = (uchar_t)-1; 26685 } 26686 } else { 26687 entry->cdte_datamode = (uchar_t)-1; 26688 } 26689 26690 kmem_free(buffer, 12); 26691 kmem_free(com, sizeof (*com)); 26692 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26693 return (EFAULT); 26694 26695 return (rval); 26696 } 26697 26698 26699 /* 26700 * Function: sr_read_tochdr() 26701 * 26702 * Description: This routine is the driver entry point for handling CD-ROM 26703 * ioctl requests to read the Table of Contents (TOC) header 26704 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26705 * and ending track numbers 26706 * 26707 * Arguments: dev - the device 'dev_t' 26708 * data - pointer to user provided toc header structure, 26709 * specifying the starting and ending track numbers. 26710 * flag - this argument is a pass through to ddi_copyxxx() 26711 * directly from the mode argument of ioctl(). 26712 * 26713 * Return Code: the code returned by sd_send_scsi_cmd() 26714 * EFAULT if ddi_copyxxx() fails 26715 * ENXIO if fail ddi_get_soft_state 26716 * EINVAL if data pointer is NULL 26717 */ 26718 26719 static int 26720 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26721 { 26722 struct sd_lun *un; 26723 struct uscsi_cmd *com; 26724 struct cdrom_tochdr toc_header; 26725 struct cdrom_tochdr *hdr = &toc_header; 26726 char cdb[CDB_GROUP1]; 26727 int rval; 26728 caddr_t buffer; 26729 26730 if (data == NULL) { 26731 return (EINVAL); 26732 } 26733 26734 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26735 (un->un_state == SD_STATE_OFFLINE)) { 26736 return (ENXIO); 26737 } 26738 26739 buffer = kmem_zalloc(4, KM_SLEEP); 26740 bzero(cdb, CDB_GROUP1); 26741 cdb[0] = SCMD_READ_TOC; 26742 /* 26743 * Specifying a track number of 0x00 in the READ TOC command indicates 26744 * that the TOC header should be returned 26745 */ 26746 cdb[6] = 0x00; 26747 /* 26748 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26749 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26750 */ 26751 cdb[8] = 0x04; 26752 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26753 com->uscsi_cdb = cdb; 26754 com->uscsi_cdblen = CDB_GROUP1; 26755 com->uscsi_bufaddr = buffer; 26756 com->uscsi_buflen = 0x04; 26757 com->uscsi_timeout = 300; 26758 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26759 26760 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26761 SD_PATH_STANDARD); 26762 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26763 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26764 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26765 } else { 26766 hdr->cdth_trk0 = buffer[2]; 26767 hdr->cdth_trk1 = buffer[3]; 26768 } 26769 kmem_free(buffer, 4); 26770 kmem_free(com, sizeof (*com)); 26771 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26772 return (EFAULT); 26773 } 26774 return (rval); 26775 } 26776 26777 26778 /* 26779 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26780 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26781 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26782 * digital audio and extended architecture digital audio. These modes are 26783 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26784 * MMC specs. 26785 * 26786 * In addition to support for the various data formats these routines also 26787 * include support for devices that implement only the direct access READ 26788 * commands (0x08, 0x28), devices that implement the READ_CD commands 26789 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26790 * READ CDXA commands (0xD8, 0xDB) 26791 */ 26792 26793 /* 26794 * Function: sr_read_mode1() 26795 * 26796 * Description: This routine is the driver entry point for handling CD-ROM 26797 * ioctl read mode1 requests (CDROMREADMODE1). 26798 * 26799 * Arguments: dev - the device 'dev_t' 26800 * data - pointer to user provided cd read structure specifying 26801 * the lba buffer address and length. 26802 * flag - this argument is a pass through to ddi_copyxxx() 26803 * directly from the mode argument of ioctl(). 26804 * 26805 * Return Code: the code returned by sd_send_scsi_cmd() 26806 * EFAULT if ddi_copyxxx() fails 26807 * ENXIO if fail ddi_get_soft_state 26808 * EINVAL if data pointer is NULL 26809 */ 26810 26811 static int 26812 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26813 { 26814 struct sd_lun *un; 26815 struct cdrom_read mode1_struct; 26816 struct cdrom_read *mode1 = &mode1_struct; 26817 int rval; 26818 sd_ssc_t *ssc; 26819 26820 #ifdef _MULTI_DATAMODEL 26821 /* To support ILP32 applications in an LP64 world */ 26822 struct cdrom_read32 cdrom_read32; 26823 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26824 #endif /* _MULTI_DATAMODEL */ 26825 26826 if (data == NULL) { 26827 return (EINVAL); 26828 } 26829 26830 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26831 (un->un_state == SD_STATE_OFFLINE)) { 26832 return (ENXIO); 26833 } 26834 26835 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26836 "sd_read_mode1: entry: un:0x%p\n", un); 26837 26838 #ifdef _MULTI_DATAMODEL 26839 switch (ddi_model_convert_from(flag & FMODELS)) { 26840 case DDI_MODEL_ILP32: 26841 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26842 return (EFAULT); 26843 } 26844 /* Convert the ILP32 uscsi data from the application to LP64 */ 26845 cdrom_read32tocdrom_read(cdrd32, mode1); 26846 break; 26847 case DDI_MODEL_NONE: 26848 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26849 return (EFAULT); 26850 } 26851 } 26852 #else /* ! _MULTI_DATAMODEL */ 26853 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26854 return (EFAULT); 26855 } 26856 #endif /* _MULTI_DATAMODEL */ 26857 26858 ssc = sd_ssc_init(un); 26859 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 26860 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 26861 sd_ssc_fini(ssc); 26862 26863 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26864 "sd_read_mode1: exit: un:0x%p\n", un); 26865 26866 return (rval); 26867 } 26868 26869 26870 /* 26871 * Function: sr_read_cd_mode2() 26872 * 26873 * Description: This routine is the driver entry point for handling CD-ROM 26874 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26875 * support the READ CD (0xBE) command or the 1st generation 26876 * READ CD (0xD4) command. 26877 * 26878 * Arguments: dev - the device 'dev_t' 26879 * data - pointer to user provided cd read structure specifying 26880 * the lba buffer address and length. 26881 * flag - this argument is a pass through to ddi_copyxxx() 26882 * directly from the mode argument of ioctl(). 26883 * 26884 * Return Code: the code returned by sd_send_scsi_cmd() 26885 * EFAULT if ddi_copyxxx() fails 26886 * ENXIO if fail ddi_get_soft_state 26887 * EINVAL if data pointer is NULL 26888 */ 26889 26890 static int 26891 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 26892 { 26893 struct sd_lun *un; 26894 struct uscsi_cmd *com; 26895 struct cdrom_read mode2_struct; 26896 struct cdrom_read *mode2 = &mode2_struct; 26897 uchar_t cdb[CDB_GROUP5]; 26898 int nblocks; 26899 int rval; 26900 #ifdef _MULTI_DATAMODEL 26901 /* To support ILP32 applications in an LP64 world */ 26902 struct cdrom_read32 cdrom_read32; 26903 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26904 #endif /* _MULTI_DATAMODEL */ 26905 26906 if (data == NULL) { 26907 return (EINVAL); 26908 } 26909 26910 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26911 (un->un_state == SD_STATE_OFFLINE)) { 26912 return (ENXIO); 26913 } 26914 26915 #ifdef _MULTI_DATAMODEL 26916 switch (ddi_model_convert_from(flag & FMODELS)) { 26917 case DDI_MODEL_ILP32: 26918 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26919 return (EFAULT); 26920 } 26921 /* Convert the ILP32 uscsi data from the application to LP64 */ 26922 cdrom_read32tocdrom_read(cdrd32, mode2); 26923 break; 26924 case DDI_MODEL_NONE: 26925 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26926 return (EFAULT); 26927 } 26928 break; 26929 } 26930 26931 #else /* ! _MULTI_DATAMODEL */ 26932 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26933 return (EFAULT); 26934 } 26935 #endif /* _MULTI_DATAMODEL */ 26936 26937 bzero(cdb, sizeof (cdb)); 26938 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 26939 /* Read command supported by 1st generation atapi drives */ 26940 cdb[0] = SCMD_READ_CDD4; 26941 } else { 26942 /* Universal CD Access Command */ 26943 cdb[0] = SCMD_READ_CD; 26944 } 26945 26946 /* 26947 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 26948 */ 26949 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 26950 26951 /* set the start address */ 26952 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 26953 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 26954 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 26955 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 26956 26957 /* set the transfer length */ 26958 nblocks = mode2->cdread_buflen / 2336; 26959 cdb[6] = (uchar_t)(nblocks >> 16); 26960 cdb[7] = (uchar_t)(nblocks >> 8); 26961 cdb[8] = (uchar_t)nblocks; 26962 26963 /* set the filter bits */ 26964 cdb[9] = CDROM_READ_CD_USERDATA; 26965 26966 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26967 com->uscsi_cdb = (caddr_t)cdb; 26968 com->uscsi_cdblen = sizeof (cdb); 26969 com->uscsi_bufaddr = mode2->cdread_bufaddr; 26970 com->uscsi_buflen = mode2->cdread_buflen; 26971 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26972 26973 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26974 SD_PATH_STANDARD); 26975 kmem_free(com, sizeof (*com)); 26976 return (rval); 26977 } 26978 26979 26980 /* 26981 * Function: sr_read_mode2() 26982 * 26983 * Description: This routine is the driver entry point for handling CD-ROM 26984 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26985 * do not support the READ CD (0xBE) command. 26986 * 26987 * Arguments: dev - the device 'dev_t' 26988 * data - pointer to user provided cd read structure specifying 26989 * the lba buffer address and length. 26990 * flag - this argument is a pass through to ddi_copyxxx() 26991 * directly from the mode argument of ioctl(). 26992 * 26993 * Return Code: the code returned by sd_send_scsi_cmd() 26994 * EFAULT if ddi_copyxxx() fails 26995 * ENXIO if fail ddi_get_soft_state 26996 * EINVAL if data pointer is NULL 26997 * EIO if fail to reset block size 26998 * EAGAIN if commands are in progress in the driver 26999 */ 27000 27001 static int 27002 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27003 { 27004 struct sd_lun *un; 27005 struct cdrom_read mode2_struct; 27006 struct cdrom_read *mode2 = &mode2_struct; 27007 int rval; 27008 uint32_t restore_blksize; 27009 struct uscsi_cmd *com; 27010 uchar_t cdb[CDB_GROUP0]; 27011 int nblocks; 27012 27013 #ifdef _MULTI_DATAMODEL 27014 /* To support ILP32 applications in an LP64 world */ 27015 struct cdrom_read32 cdrom_read32; 27016 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27017 #endif /* _MULTI_DATAMODEL */ 27018 27019 if (data == NULL) { 27020 return (EINVAL); 27021 } 27022 27023 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27024 (un->un_state == SD_STATE_OFFLINE)) { 27025 return (ENXIO); 27026 } 27027 27028 /* 27029 * Because this routine will update the device and driver block size 27030 * being used we want to make sure there are no commands in progress. 27031 * If commands are in progress the user will have to try again. 27032 * 27033 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27034 * in sdioctl to protect commands from sdioctl through to the top of 27035 * sd_uscsi_strategy. See sdioctl for details. 27036 */ 27037 mutex_enter(SD_MUTEX(un)); 27038 if (un->un_ncmds_in_driver != 1) { 27039 mutex_exit(SD_MUTEX(un)); 27040 return (EAGAIN); 27041 } 27042 mutex_exit(SD_MUTEX(un)); 27043 27044 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27045 "sd_read_mode2: entry: un:0x%p\n", un); 27046 27047 #ifdef _MULTI_DATAMODEL 27048 switch (ddi_model_convert_from(flag & FMODELS)) { 27049 case DDI_MODEL_ILP32: 27050 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27051 return (EFAULT); 27052 } 27053 /* Convert the ILP32 uscsi data from the application to LP64 */ 27054 cdrom_read32tocdrom_read(cdrd32, mode2); 27055 break; 27056 case DDI_MODEL_NONE: 27057 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27058 return (EFAULT); 27059 } 27060 break; 27061 } 27062 #else /* ! _MULTI_DATAMODEL */ 27063 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27064 return (EFAULT); 27065 } 27066 #endif /* _MULTI_DATAMODEL */ 27067 27068 /* Store the current target block size for restoration later */ 27069 restore_blksize = un->un_tgt_blocksize; 27070 27071 /* Change the device and soft state target block size to 2336 */ 27072 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27073 rval = EIO; 27074 goto done; 27075 } 27076 27077 27078 bzero(cdb, sizeof (cdb)); 27079 27080 /* set READ operation */ 27081 cdb[0] = SCMD_READ; 27082 27083 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27084 mode2->cdread_lba >>= 2; 27085 27086 /* set the start address */ 27087 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27088 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27089 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27090 27091 /* set the transfer length */ 27092 nblocks = mode2->cdread_buflen / 2336; 27093 cdb[4] = (uchar_t)nblocks & 0xFF; 27094 27095 /* build command */ 27096 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27097 com->uscsi_cdb = (caddr_t)cdb; 27098 com->uscsi_cdblen = sizeof (cdb); 27099 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27100 com->uscsi_buflen = mode2->cdread_buflen; 27101 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27102 27103 /* 27104 * Issue SCSI command with user space address for read buffer. 27105 * 27106 * This sends the command through main channel in the driver. 27107 * 27108 * Since this is accessed via an IOCTL call, we go through the 27109 * standard path, so that if the device was powered down, then 27110 * it would be 'awakened' to handle the command. 27111 */ 27112 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27113 SD_PATH_STANDARD); 27114 27115 kmem_free(com, sizeof (*com)); 27116 27117 /* Restore the device and soft state target block size */ 27118 if (sr_sector_mode(dev, restore_blksize) != 0) { 27119 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27120 "can't do switch back to mode 1\n"); 27121 /* 27122 * If sd_send_scsi_READ succeeded we still need to report 27123 * an error because we failed to reset the block size 27124 */ 27125 if (rval == 0) { 27126 rval = EIO; 27127 } 27128 } 27129 27130 done: 27131 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27132 "sd_read_mode2: exit: un:0x%p\n", un); 27133 27134 return (rval); 27135 } 27136 27137 27138 /* 27139 * Function: sr_sector_mode() 27140 * 27141 * Description: This utility function is used by sr_read_mode2 to set the target 27142 * block size based on the user specified size. This is a legacy 27143 * implementation based upon a vendor specific mode page 27144 * 27145 * Arguments: dev - the device 'dev_t' 27146 * data - flag indicating if block size is being set to 2336 or 27147 * 512. 27148 * 27149 * Return Code: the code returned by sd_send_scsi_cmd() 27150 * EFAULT if ddi_copyxxx() fails 27151 * ENXIO if fail ddi_get_soft_state 27152 * EINVAL if data pointer is NULL 27153 */ 27154 27155 static int 27156 sr_sector_mode(dev_t dev, uint32_t blksize) 27157 { 27158 struct sd_lun *un; 27159 uchar_t *sense; 27160 uchar_t *select; 27161 int rval; 27162 sd_ssc_t *ssc; 27163 27164 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27165 (un->un_state == SD_STATE_OFFLINE)) { 27166 return (ENXIO); 27167 } 27168 27169 sense = kmem_zalloc(20, KM_SLEEP); 27170 27171 /* Note: This is a vendor specific mode page (0x81) */ 27172 ssc = sd_ssc_init(un); 27173 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27174 SD_PATH_STANDARD); 27175 sd_ssc_fini(ssc); 27176 if (rval != 0) { 27177 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27178 "sr_sector_mode: Mode Sense failed\n"); 27179 kmem_free(sense, 20); 27180 return (rval); 27181 } 27182 select = kmem_zalloc(20, KM_SLEEP); 27183 select[3] = 0x08; 27184 select[10] = ((blksize >> 8) & 0xff); 27185 select[11] = (blksize & 0xff); 27186 select[12] = 0x01; 27187 select[13] = 0x06; 27188 select[14] = sense[14]; 27189 select[15] = sense[15]; 27190 if (blksize == SD_MODE2_BLKSIZE) { 27191 select[14] |= 0x01; 27192 } 27193 27194 ssc = sd_ssc_init(un); 27195 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27196 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27197 sd_ssc_fini(ssc); 27198 if (rval != 0) { 27199 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27200 "sr_sector_mode: Mode Select failed\n"); 27201 } else { 27202 /* 27203 * Only update the softstate block size if we successfully 27204 * changed the device block mode. 27205 */ 27206 mutex_enter(SD_MUTEX(un)); 27207 sd_update_block_info(un, blksize, 0); 27208 mutex_exit(SD_MUTEX(un)); 27209 } 27210 kmem_free(sense, 20); 27211 kmem_free(select, 20); 27212 return (rval); 27213 } 27214 27215 27216 /* 27217 * Function: sr_read_cdda() 27218 * 27219 * Description: This routine is the driver entry point for handling CD-ROM 27220 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27221 * the target supports CDDA these requests are handled via a vendor 27222 * specific command (0xD8) If the target does not support CDDA 27223 * these requests are handled via the READ CD command (0xBE). 27224 * 27225 * Arguments: dev - the device 'dev_t' 27226 * data - pointer to user provided CD-DA structure specifying 27227 * the track starting address, transfer length, and 27228 * subcode options. 27229 * flag - this argument is a pass through to ddi_copyxxx() 27230 * directly from the mode argument of ioctl(). 27231 * 27232 * Return Code: the code returned by sd_send_scsi_cmd() 27233 * EFAULT if ddi_copyxxx() fails 27234 * ENXIO if fail ddi_get_soft_state 27235 * EINVAL if invalid arguments are provided 27236 * ENOTTY 27237 */ 27238 27239 static int 27240 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27241 { 27242 struct sd_lun *un; 27243 struct uscsi_cmd *com; 27244 struct cdrom_cdda *cdda; 27245 int rval; 27246 size_t buflen; 27247 char cdb[CDB_GROUP5]; 27248 27249 #ifdef _MULTI_DATAMODEL 27250 /* To support ILP32 applications in an LP64 world */ 27251 struct cdrom_cdda32 cdrom_cdda32; 27252 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27253 #endif /* _MULTI_DATAMODEL */ 27254 27255 if (data == NULL) { 27256 return (EINVAL); 27257 } 27258 27259 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27260 return (ENXIO); 27261 } 27262 27263 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27264 27265 #ifdef _MULTI_DATAMODEL 27266 switch (ddi_model_convert_from(flag & FMODELS)) { 27267 case DDI_MODEL_ILP32: 27268 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27269 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27270 "sr_read_cdda: ddi_copyin Failed\n"); 27271 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27272 return (EFAULT); 27273 } 27274 /* Convert the ILP32 uscsi data from the application to LP64 */ 27275 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27276 break; 27277 case DDI_MODEL_NONE: 27278 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27279 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27280 "sr_read_cdda: ddi_copyin Failed\n"); 27281 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27282 return (EFAULT); 27283 } 27284 break; 27285 } 27286 #else /* ! _MULTI_DATAMODEL */ 27287 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27288 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27289 "sr_read_cdda: ddi_copyin Failed\n"); 27290 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27291 return (EFAULT); 27292 } 27293 #endif /* _MULTI_DATAMODEL */ 27294 27295 /* 27296 * Since MMC-2 expects max 3 bytes for length, check if the 27297 * length input is greater than 3 bytes 27298 */ 27299 if ((cdda->cdda_length & 0xFF000000) != 0) { 27300 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27301 "cdrom transfer length too large: %d (limit %d)\n", 27302 cdda->cdda_length, 0xFFFFFF); 27303 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27304 return (EINVAL); 27305 } 27306 27307 switch (cdda->cdda_subcode) { 27308 case CDROM_DA_NO_SUBCODE: 27309 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27310 break; 27311 case CDROM_DA_SUBQ: 27312 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27313 break; 27314 case CDROM_DA_ALL_SUBCODE: 27315 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27316 break; 27317 case CDROM_DA_SUBCODE_ONLY: 27318 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27319 break; 27320 default: 27321 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27322 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27323 cdda->cdda_subcode); 27324 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27325 return (EINVAL); 27326 } 27327 27328 /* Build and send the command */ 27329 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27330 bzero(cdb, CDB_GROUP5); 27331 27332 if (un->un_f_cfg_cdda == TRUE) { 27333 cdb[0] = (char)SCMD_READ_CD; 27334 cdb[1] = 0x04; 27335 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27336 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27337 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27338 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27339 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27340 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27341 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27342 cdb[9] = 0x10; 27343 switch (cdda->cdda_subcode) { 27344 case CDROM_DA_NO_SUBCODE : 27345 cdb[10] = 0x0; 27346 break; 27347 case CDROM_DA_SUBQ : 27348 cdb[10] = 0x2; 27349 break; 27350 case CDROM_DA_ALL_SUBCODE : 27351 cdb[10] = 0x1; 27352 break; 27353 case CDROM_DA_SUBCODE_ONLY : 27354 /* FALLTHROUGH */ 27355 default : 27356 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27357 kmem_free(com, sizeof (*com)); 27358 return (ENOTTY); 27359 } 27360 } else { 27361 cdb[0] = (char)SCMD_READ_CDDA; 27362 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27363 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27364 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27365 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27366 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27367 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27368 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27369 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27370 cdb[10] = cdda->cdda_subcode; 27371 } 27372 27373 com->uscsi_cdb = cdb; 27374 com->uscsi_cdblen = CDB_GROUP5; 27375 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27376 com->uscsi_buflen = buflen; 27377 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27378 27379 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27380 SD_PATH_STANDARD); 27381 27382 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27383 kmem_free(com, sizeof (*com)); 27384 return (rval); 27385 } 27386 27387 27388 /* 27389 * Function: sr_read_cdxa() 27390 * 27391 * Description: This routine is the driver entry point for handling CD-ROM 27392 * ioctl requests to return CD-XA (Extended Architecture) data. 27393 * (CDROMCDXA). 27394 * 27395 * Arguments: dev - the device 'dev_t' 27396 * data - pointer to user provided CD-XA structure specifying 27397 * the data starting address, transfer length, and format 27398 * flag - this argument is a pass through to ddi_copyxxx() 27399 * directly from the mode argument of ioctl(). 27400 * 27401 * Return Code: the code returned by sd_send_scsi_cmd() 27402 * EFAULT if ddi_copyxxx() fails 27403 * ENXIO if fail ddi_get_soft_state 27404 * EINVAL if data pointer is NULL 27405 */ 27406 27407 static int 27408 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27409 { 27410 struct sd_lun *un; 27411 struct uscsi_cmd *com; 27412 struct cdrom_cdxa *cdxa; 27413 int rval; 27414 size_t buflen; 27415 char cdb[CDB_GROUP5]; 27416 uchar_t read_flags; 27417 27418 #ifdef _MULTI_DATAMODEL 27419 /* To support ILP32 applications in an LP64 world */ 27420 struct cdrom_cdxa32 cdrom_cdxa32; 27421 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27422 #endif /* _MULTI_DATAMODEL */ 27423 27424 if (data == NULL) { 27425 return (EINVAL); 27426 } 27427 27428 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27429 return (ENXIO); 27430 } 27431 27432 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27433 27434 #ifdef _MULTI_DATAMODEL 27435 switch (ddi_model_convert_from(flag & FMODELS)) { 27436 case DDI_MODEL_ILP32: 27437 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27438 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27439 return (EFAULT); 27440 } 27441 /* 27442 * Convert the ILP32 uscsi data from the 27443 * application to LP64 for internal use. 27444 */ 27445 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27446 break; 27447 case DDI_MODEL_NONE: 27448 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27449 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27450 return (EFAULT); 27451 } 27452 break; 27453 } 27454 #else /* ! _MULTI_DATAMODEL */ 27455 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27456 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27457 return (EFAULT); 27458 } 27459 #endif /* _MULTI_DATAMODEL */ 27460 27461 /* 27462 * Since MMC-2 expects max 3 bytes for length, check if the 27463 * length input is greater than 3 bytes 27464 */ 27465 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27466 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27467 "cdrom transfer length too large: %d (limit %d)\n", 27468 cdxa->cdxa_length, 0xFFFFFF); 27469 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27470 return (EINVAL); 27471 } 27472 27473 switch (cdxa->cdxa_format) { 27474 case CDROM_XA_DATA: 27475 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27476 read_flags = 0x10; 27477 break; 27478 case CDROM_XA_SECTOR_DATA: 27479 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27480 read_flags = 0xf8; 27481 break; 27482 case CDROM_XA_DATA_W_ERROR: 27483 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27484 read_flags = 0xfc; 27485 break; 27486 default: 27487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27488 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27489 cdxa->cdxa_format); 27490 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27491 return (EINVAL); 27492 } 27493 27494 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27495 bzero(cdb, CDB_GROUP5); 27496 if (un->un_f_mmc_cap == TRUE) { 27497 cdb[0] = (char)SCMD_READ_CD; 27498 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27499 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27500 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27501 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27502 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27503 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27504 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27505 cdb[9] = (char)read_flags; 27506 } else { 27507 /* 27508 * Note: A vendor specific command (0xDB) is being used her to 27509 * request a read of all subcodes. 27510 */ 27511 cdb[0] = (char)SCMD_READ_CDXA; 27512 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27513 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27514 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27515 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27516 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27517 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27518 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27519 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27520 cdb[10] = cdxa->cdxa_format; 27521 } 27522 com->uscsi_cdb = cdb; 27523 com->uscsi_cdblen = CDB_GROUP5; 27524 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27525 com->uscsi_buflen = buflen; 27526 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27527 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27528 SD_PATH_STANDARD); 27529 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27530 kmem_free(com, sizeof (*com)); 27531 return (rval); 27532 } 27533 27534 27535 /* 27536 * Function: sr_eject() 27537 * 27538 * Description: This routine is the driver entry point for handling CD-ROM 27539 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27540 * 27541 * Arguments: dev - the device 'dev_t' 27542 * 27543 * Return Code: the code returned by sd_send_scsi_cmd() 27544 */ 27545 27546 static int 27547 sr_eject(dev_t dev) 27548 { 27549 struct sd_lun *un; 27550 int rval; 27551 sd_ssc_t *ssc; 27552 27553 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27554 (un->un_state == SD_STATE_OFFLINE)) { 27555 return (ENXIO); 27556 } 27557 27558 /* 27559 * To prevent race conditions with the eject 27560 * command, keep track of an eject command as 27561 * it progresses. If we are already handling 27562 * an eject command in the driver for the given 27563 * unit and another request to eject is received 27564 * immediately return EAGAIN so we don't lose 27565 * the command if the current eject command fails. 27566 */ 27567 mutex_enter(SD_MUTEX(un)); 27568 if (un->un_f_ejecting == TRUE) { 27569 mutex_exit(SD_MUTEX(un)); 27570 return (EAGAIN); 27571 } 27572 un->un_f_ejecting = TRUE; 27573 mutex_exit(SD_MUTEX(un)); 27574 27575 ssc = sd_ssc_init(un); 27576 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27577 SD_PATH_STANDARD); 27578 sd_ssc_fini(ssc); 27579 27580 if (rval != 0) { 27581 mutex_enter(SD_MUTEX(un)); 27582 un->un_f_ejecting = FALSE; 27583 mutex_exit(SD_MUTEX(un)); 27584 return (rval); 27585 } 27586 27587 ssc = sd_ssc_init(un); 27588 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27589 SD_PATH_STANDARD); 27590 sd_ssc_fini(ssc); 27591 27592 if (rval == 0) { 27593 mutex_enter(SD_MUTEX(un)); 27594 sr_ejected(un); 27595 un->un_mediastate = DKIO_EJECTED; 27596 un->un_f_ejecting = FALSE; 27597 cv_broadcast(&un->un_state_cv); 27598 mutex_exit(SD_MUTEX(un)); 27599 } else { 27600 mutex_enter(SD_MUTEX(un)); 27601 un->un_f_ejecting = FALSE; 27602 mutex_exit(SD_MUTEX(un)); 27603 } 27604 return (rval); 27605 } 27606 27607 27608 /* 27609 * Function: sr_ejected() 27610 * 27611 * Description: This routine updates the soft state structure to invalidate the 27612 * geometry information after the media has been ejected or a 27613 * media eject has been detected. 27614 * 27615 * Arguments: un - driver soft state (unit) structure 27616 */ 27617 27618 static void 27619 sr_ejected(struct sd_lun *un) 27620 { 27621 struct sd_errstats *stp; 27622 27623 ASSERT(un != NULL); 27624 ASSERT(mutex_owned(SD_MUTEX(un))); 27625 27626 un->un_f_blockcount_is_valid = FALSE; 27627 un->un_f_tgt_blocksize_is_valid = FALSE; 27628 mutex_exit(SD_MUTEX(un)); 27629 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27630 mutex_enter(SD_MUTEX(un)); 27631 27632 if (un->un_errstats != NULL) { 27633 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27634 stp->sd_capacity.value.ui64 = 0; 27635 } 27636 } 27637 27638 27639 /* 27640 * Function: sr_check_wp() 27641 * 27642 * Description: This routine checks the write protection of a removable 27643 * media disk and hotpluggable devices via the write protect bit of 27644 * the Mode Page Header device specific field. Some devices choke 27645 * on unsupported mode page. In order to workaround this issue, 27646 * this routine has been implemented to use 0x3f mode page(request 27647 * for all pages) for all device types. 27648 * 27649 * Arguments: dev - the device 'dev_t' 27650 * 27651 * Return Code: int indicating if the device is write protected (1) or not (0) 27652 * 27653 * Context: Kernel thread. 27654 * 27655 */ 27656 27657 static int 27658 sr_check_wp(dev_t dev) 27659 { 27660 struct sd_lun *un; 27661 uchar_t device_specific; 27662 uchar_t *sense; 27663 int hdrlen; 27664 int rval = FALSE; 27665 int status; 27666 sd_ssc_t *ssc; 27667 27668 /* 27669 * Note: The return codes for this routine should be reworked to 27670 * properly handle the case of a NULL softstate. 27671 */ 27672 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27673 return (FALSE); 27674 } 27675 27676 if (un->un_f_cfg_is_atapi == TRUE) { 27677 /* 27678 * The mode page contents are not required; set the allocation 27679 * length for the mode page header only 27680 */ 27681 hdrlen = MODE_HEADER_LENGTH_GRP2; 27682 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27683 ssc = sd_ssc_init(un); 27684 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27685 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27686 sd_ssc_fini(ssc); 27687 if (status != 0) 27688 goto err_exit; 27689 device_specific = 27690 ((struct mode_header_grp2 *)sense)->device_specific; 27691 } else { 27692 hdrlen = MODE_HEADER_LENGTH; 27693 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27694 ssc = sd_ssc_init(un); 27695 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27696 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27697 sd_ssc_fini(ssc); 27698 if (status != 0) 27699 goto err_exit; 27700 device_specific = 27701 ((struct mode_header *)sense)->device_specific; 27702 } 27703 27704 27705 /* 27706 * Write protect mode sense failed; not all disks 27707 * understand this query. Return FALSE assuming that 27708 * these devices are not writable. 27709 */ 27710 if (device_specific & WRITE_PROTECT) { 27711 rval = TRUE; 27712 } 27713 27714 err_exit: 27715 kmem_free(sense, hdrlen); 27716 return (rval); 27717 } 27718 27719 /* 27720 * Function: sr_volume_ctrl() 27721 * 27722 * Description: This routine is the driver entry point for handling CD-ROM 27723 * audio output volume ioctl requests. (CDROMVOLCTRL) 27724 * 27725 * Arguments: dev - the device 'dev_t' 27726 * data - pointer to user audio volume control structure 27727 * flag - this argument is a pass through to ddi_copyxxx() 27728 * directly from the mode argument of ioctl(). 27729 * 27730 * Return Code: the code returned by sd_send_scsi_cmd() 27731 * EFAULT if ddi_copyxxx() fails 27732 * ENXIO if fail ddi_get_soft_state 27733 * EINVAL if data pointer is NULL 27734 * 27735 */ 27736 27737 static int 27738 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27739 { 27740 struct sd_lun *un; 27741 struct cdrom_volctrl volume; 27742 struct cdrom_volctrl *vol = &volume; 27743 uchar_t *sense_page; 27744 uchar_t *select_page; 27745 uchar_t *sense; 27746 uchar_t *select; 27747 int sense_buflen; 27748 int select_buflen; 27749 int rval; 27750 sd_ssc_t *ssc; 27751 27752 if (data == NULL) { 27753 return (EINVAL); 27754 } 27755 27756 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27757 (un->un_state == SD_STATE_OFFLINE)) { 27758 return (ENXIO); 27759 } 27760 27761 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27762 return (EFAULT); 27763 } 27764 27765 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27766 struct mode_header_grp2 *sense_mhp; 27767 struct mode_header_grp2 *select_mhp; 27768 int bd_len; 27769 27770 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27771 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27772 MODEPAGE_AUDIO_CTRL_LEN; 27773 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27774 select = kmem_zalloc(select_buflen, KM_SLEEP); 27775 ssc = sd_ssc_init(un); 27776 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27777 sense_buflen, MODEPAGE_AUDIO_CTRL, 27778 SD_PATH_STANDARD); 27779 sd_ssc_fini(ssc); 27780 27781 if (rval != 0) { 27782 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27783 "sr_volume_ctrl: Mode Sense Failed\n"); 27784 kmem_free(sense, sense_buflen); 27785 kmem_free(select, select_buflen); 27786 return (rval); 27787 } 27788 sense_mhp = (struct mode_header_grp2 *)sense; 27789 select_mhp = (struct mode_header_grp2 *)select; 27790 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27791 sense_mhp->bdesc_length_lo; 27792 if (bd_len > MODE_BLK_DESC_LENGTH) { 27793 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27794 "sr_volume_ctrl: Mode Sense returned invalid " 27795 "block descriptor length\n"); 27796 kmem_free(sense, sense_buflen); 27797 kmem_free(select, select_buflen); 27798 return (EIO); 27799 } 27800 sense_page = (uchar_t *) 27801 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27802 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27803 select_mhp->length_msb = 0; 27804 select_mhp->length_lsb = 0; 27805 select_mhp->bdesc_length_hi = 0; 27806 select_mhp->bdesc_length_lo = 0; 27807 } else { 27808 struct mode_header *sense_mhp, *select_mhp; 27809 27810 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27811 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27812 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27813 select = kmem_zalloc(select_buflen, KM_SLEEP); 27814 ssc = sd_ssc_init(un); 27815 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27816 sense_buflen, MODEPAGE_AUDIO_CTRL, 27817 SD_PATH_STANDARD); 27818 sd_ssc_fini(ssc); 27819 27820 if (rval != 0) { 27821 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27822 "sr_volume_ctrl: Mode Sense Failed\n"); 27823 kmem_free(sense, sense_buflen); 27824 kmem_free(select, select_buflen); 27825 return (rval); 27826 } 27827 sense_mhp = (struct mode_header *)sense; 27828 select_mhp = (struct mode_header *)select; 27829 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27830 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27831 "sr_volume_ctrl: Mode Sense returned invalid " 27832 "block descriptor length\n"); 27833 kmem_free(sense, sense_buflen); 27834 kmem_free(select, select_buflen); 27835 return (EIO); 27836 } 27837 sense_page = (uchar_t *) 27838 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27839 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27840 select_mhp->length = 0; 27841 select_mhp->bdesc_length = 0; 27842 } 27843 /* 27844 * Note: An audio control data structure could be created and overlayed 27845 * on the following in place of the array indexing method implemented. 27846 */ 27847 27848 /* Build the select data for the user volume data */ 27849 select_page[0] = MODEPAGE_AUDIO_CTRL; 27850 select_page[1] = 0xE; 27851 /* Set the immediate bit */ 27852 select_page[2] = 0x04; 27853 /* Zero out reserved fields */ 27854 select_page[3] = 0x00; 27855 select_page[4] = 0x00; 27856 /* Return sense data for fields not to be modified */ 27857 select_page[5] = sense_page[5]; 27858 select_page[6] = sense_page[6]; 27859 select_page[7] = sense_page[7]; 27860 /* Set the user specified volume levels for channel 0 and 1 */ 27861 select_page[8] = 0x01; 27862 select_page[9] = vol->channel0; 27863 select_page[10] = 0x02; 27864 select_page[11] = vol->channel1; 27865 /* Channel 2 and 3 are currently unsupported so return the sense data */ 27866 select_page[12] = sense_page[12]; 27867 select_page[13] = sense_page[13]; 27868 select_page[14] = sense_page[14]; 27869 select_page[15] = sense_page[15]; 27870 27871 ssc = sd_ssc_init(un); 27872 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27873 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 27874 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27875 } else { 27876 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27877 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27878 } 27879 sd_ssc_fini(ssc); 27880 27881 kmem_free(sense, sense_buflen); 27882 kmem_free(select, select_buflen); 27883 return (rval); 27884 } 27885 27886 27887 /* 27888 * Function: sr_read_sony_session_offset() 27889 * 27890 * Description: This routine is the driver entry point for handling CD-ROM 27891 * ioctl requests for session offset information. (CDROMREADOFFSET) 27892 * The address of the first track in the last session of a 27893 * multi-session CD-ROM is returned 27894 * 27895 * Note: This routine uses a vendor specific key value in the 27896 * command control field without implementing any vendor check here 27897 * or in the ioctl routine. 27898 * 27899 * Arguments: dev - the device 'dev_t' 27900 * data - pointer to an int to hold the requested address 27901 * flag - this argument is a pass through to ddi_copyxxx() 27902 * directly from the mode argument of ioctl(). 27903 * 27904 * Return Code: the code returned by sd_send_scsi_cmd() 27905 * EFAULT if ddi_copyxxx() fails 27906 * ENXIO if fail ddi_get_soft_state 27907 * EINVAL if data pointer is NULL 27908 */ 27909 27910 static int 27911 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 27912 { 27913 struct sd_lun *un; 27914 struct uscsi_cmd *com; 27915 caddr_t buffer; 27916 char cdb[CDB_GROUP1]; 27917 int session_offset = 0; 27918 int rval; 27919 27920 if (data == NULL) { 27921 return (EINVAL); 27922 } 27923 27924 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27925 (un->un_state == SD_STATE_OFFLINE)) { 27926 return (ENXIO); 27927 } 27928 27929 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 27930 bzero(cdb, CDB_GROUP1); 27931 cdb[0] = SCMD_READ_TOC; 27932 /* 27933 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27934 * (4 byte TOC response header + 8 byte response data) 27935 */ 27936 cdb[8] = SONY_SESSION_OFFSET_LEN; 27937 /* Byte 9 is the control byte. A vendor specific value is used */ 27938 cdb[9] = SONY_SESSION_OFFSET_KEY; 27939 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27940 com->uscsi_cdb = cdb; 27941 com->uscsi_cdblen = CDB_GROUP1; 27942 com->uscsi_bufaddr = buffer; 27943 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 27944 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27945 27946 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27947 SD_PATH_STANDARD); 27948 if (rval != 0) { 27949 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27950 kmem_free(com, sizeof (*com)); 27951 return (rval); 27952 } 27953 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 27954 session_offset = 27955 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27956 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27957 /* 27958 * Offset returned offset in current lbasize block's. Convert to 27959 * 2k block's to return to the user 27960 */ 27961 if (un->un_tgt_blocksize == CDROM_BLK_512) { 27962 session_offset >>= 2; 27963 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 27964 session_offset >>= 1; 27965 } 27966 } 27967 27968 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 27969 rval = EFAULT; 27970 } 27971 27972 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27973 kmem_free(com, sizeof (*com)); 27974 return (rval); 27975 } 27976 27977 27978 /* 27979 * Function: sd_wm_cache_constructor() 27980 * 27981 * Description: Cache Constructor for the wmap cache for the read/modify/write 27982 * devices. 27983 * 27984 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27985 * un - sd_lun structure for the device. 27986 * flag - the km flags passed to constructor 27987 * 27988 * Return Code: 0 on success. 27989 * -1 on failure. 27990 */ 27991 27992 /*ARGSUSED*/ 27993 static int 27994 sd_wm_cache_constructor(void *wm, void *un, int flags) 27995 { 27996 bzero(wm, sizeof (struct sd_w_map)); 27997 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 27998 return (0); 27999 } 28000 28001 28002 /* 28003 * Function: sd_wm_cache_destructor() 28004 * 28005 * Description: Cache destructor for the wmap cache for the read/modify/write 28006 * devices. 28007 * 28008 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28009 * un - sd_lun structure for the device. 28010 */ 28011 /*ARGSUSED*/ 28012 static void 28013 sd_wm_cache_destructor(void *wm, void *un) 28014 { 28015 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28016 } 28017 28018 28019 /* 28020 * Function: sd_range_lock() 28021 * 28022 * Description: Lock the range of blocks specified as parameter to ensure 28023 * that read, modify write is atomic and no other i/o writes 28024 * to the same location. The range is specified in terms 28025 * of start and end blocks. Block numbers are the actual 28026 * media block numbers and not system. 28027 * 28028 * Arguments: un - sd_lun structure for the device. 28029 * startb - The starting block number 28030 * endb - The end block number 28031 * typ - type of i/o - simple/read_modify_write 28032 * 28033 * Return Code: wm - pointer to the wmap structure. 28034 * 28035 * Context: This routine can sleep. 28036 */ 28037 28038 static struct sd_w_map * 28039 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28040 { 28041 struct sd_w_map *wmp = NULL; 28042 struct sd_w_map *sl_wmp = NULL; 28043 struct sd_w_map *tmp_wmp; 28044 wm_state state = SD_WM_CHK_LIST; 28045 28046 28047 ASSERT(un != NULL); 28048 ASSERT(!mutex_owned(SD_MUTEX(un))); 28049 28050 mutex_enter(SD_MUTEX(un)); 28051 28052 while (state != SD_WM_DONE) { 28053 28054 switch (state) { 28055 case SD_WM_CHK_LIST: 28056 /* 28057 * This is the starting state. Check the wmap list 28058 * to see if the range is currently available. 28059 */ 28060 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28061 /* 28062 * If this is a simple write and no rmw 28063 * i/o is pending then try to lock the 28064 * range as the range should be available. 28065 */ 28066 state = SD_WM_LOCK_RANGE; 28067 } else { 28068 tmp_wmp = sd_get_range(un, startb, endb); 28069 if (tmp_wmp != NULL) { 28070 if ((wmp != NULL) && ONLIST(un, wmp)) { 28071 /* 28072 * Should not keep onlist wmps 28073 * while waiting this macro 28074 * will also do wmp = NULL; 28075 */ 28076 FREE_ONLIST_WMAP(un, wmp); 28077 } 28078 /* 28079 * sl_wmp is the wmap on which wait 28080 * is done, since the tmp_wmp points 28081 * to the inuse wmap, set sl_wmp to 28082 * tmp_wmp and change the state to sleep 28083 */ 28084 sl_wmp = tmp_wmp; 28085 state = SD_WM_WAIT_MAP; 28086 } else { 28087 state = SD_WM_LOCK_RANGE; 28088 } 28089 28090 } 28091 break; 28092 28093 case SD_WM_LOCK_RANGE: 28094 ASSERT(un->un_wm_cache); 28095 /* 28096 * The range need to be locked, try to get a wmap. 28097 * First attempt it with NO_SLEEP, want to avoid a sleep 28098 * if possible as we will have to release the sd mutex 28099 * if we have to sleep. 28100 */ 28101 if (wmp == NULL) 28102 wmp = kmem_cache_alloc(un->un_wm_cache, 28103 KM_NOSLEEP); 28104 if (wmp == NULL) { 28105 mutex_exit(SD_MUTEX(un)); 28106 _NOTE(DATA_READABLE_WITHOUT_LOCK 28107 (sd_lun::un_wm_cache)) 28108 wmp = kmem_cache_alloc(un->un_wm_cache, 28109 KM_SLEEP); 28110 mutex_enter(SD_MUTEX(un)); 28111 /* 28112 * we released the mutex so recheck and go to 28113 * check list state. 28114 */ 28115 state = SD_WM_CHK_LIST; 28116 } else { 28117 /* 28118 * We exit out of state machine since we 28119 * have the wmap. Do the housekeeping first. 28120 * place the wmap on the wmap list if it is not 28121 * on it already and then set the state to done. 28122 */ 28123 wmp->wm_start = startb; 28124 wmp->wm_end = endb; 28125 wmp->wm_flags = typ | SD_WM_BUSY; 28126 if (typ & SD_WTYPE_RMW) { 28127 un->un_rmw_count++; 28128 } 28129 /* 28130 * If not already on the list then link 28131 */ 28132 if (!ONLIST(un, wmp)) { 28133 wmp->wm_next = un->un_wm; 28134 wmp->wm_prev = NULL; 28135 if (wmp->wm_next) 28136 wmp->wm_next->wm_prev = wmp; 28137 un->un_wm = wmp; 28138 } 28139 state = SD_WM_DONE; 28140 } 28141 break; 28142 28143 case SD_WM_WAIT_MAP: 28144 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28145 /* 28146 * Wait is done on sl_wmp, which is set in the 28147 * check_list state. 28148 */ 28149 sl_wmp->wm_wanted_count++; 28150 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28151 sl_wmp->wm_wanted_count--; 28152 /* 28153 * We can reuse the memory from the completed sl_wmp 28154 * lock range for our new lock, but only if noone is 28155 * waiting for it. 28156 */ 28157 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28158 if (sl_wmp->wm_wanted_count == 0) { 28159 if (wmp != NULL) 28160 CHK_N_FREEWMP(un, wmp); 28161 wmp = sl_wmp; 28162 } 28163 sl_wmp = NULL; 28164 /* 28165 * After waking up, need to recheck for availability of 28166 * range. 28167 */ 28168 state = SD_WM_CHK_LIST; 28169 break; 28170 28171 default: 28172 panic("sd_range_lock: " 28173 "Unknown state %d in sd_range_lock", state); 28174 /*NOTREACHED*/ 28175 } /* switch(state) */ 28176 28177 } /* while(state != SD_WM_DONE) */ 28178 28179 mutex_exit(SD_MUTEX(un)); 28180 28181 ASSERT(wmp != NULL); 28182 28183 return (wmp); 28184 } 28185 28186 28187 /* 28188 * Function: sd_get_range() 28189 * 28190 * Description: Find if there any overlapping I/O to this one 28191 * Returns the write-map of 1st such I/O, NULL otherwise. 28192 * 28193 * Arguments: un - sd_lun structure for the device. 28194 * startb - The starting block number 28195 * endb - The end block number 28196 * 28197 * Return Code: wm - pointer to the wmap structure. 28198 */ 28199 28200 static struct sd_w_map * 28201 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28202 { 28203 struct sd_w_map *wmp; 28204 28205 ASSERT(un != NULL); 28206 28207 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28208 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28209 continue; 28210 } 28211 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28212 break; 28213 } 28214 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28215 break; 28216 } 28217 } 28218 28219 return (wmp); 28220 } 28221 28222 28223 /* 28224 * Function: sd_free_inlist_wmap() 28225 * 28226 * Description: Unlink and free a write map struct. 28227 * 28228 * Arguments: un - sd_lun structure for the device. 28229 * wmp - sd_w_map which needs to be unlinked. 28230 */ 28231 28232 static void 28233 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28234 { 28235 ASSERT(un != NULL); 28236 28237 if (un->un_wm == wmp) { 28238 un->un_wm = wmp->wm_next; 28239 } else { 28240 wmp->wm_prev->wm_next = wmp->wm_next; 28241 } 28242 28243 if (wmp->wm_next) { 28244 wmp->wm_next->wm_prev = wmp->wm_prev; 28245 } 28246 28247 wmp->wm_next = wmp->wm_prev = NULL; 28248 28249 kmem_cache_free(un->un_wm_cache, wmp); 28250 } 28251 28252 28253 /* 28254 * Function: sd_range_unlock() 28255 * 28256 * Description: Unlock the range locked by wm. 28257 * Free write map if nobody else is waiting on it. 28258 * 28259 * Arguments: un - sd_lun structure for the device. 28260 * wmp - sd_w_map which needs to be unlinked. 28261 */ 28262 28263 static void 28264 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28265 { 28266 ASSERT(un != NULL); 28267 ASSERT(wm != NULL); 28268 ASSERT(!mutex_owned(SD_MUTEX(un))); 28269 28270 mutex_enter(SD_MUTEX(un)); 28271 28272 if (wm->wm_flags & SD_WTYPE_RMW) { 28273 un->un_rmw_count--; 28274 } 28275 28276 if (wm->wm_wanted_count) { 28277 wm->wm_flags = 0; 28278 /* 28279 * Broadcast that the wmap is available now. 28280 */ 28281 cv_broadcast(&wm->wm_avail); 28282 } else { 28283 /* 28284 * If no one is waiting on the map, it should be free'ed. 28285 */ 28286 sd_free_inlist_wmap(un, wm); 28287 } 28288 28289 mutex_exit(SD_MUTEX(un)); 28290 } 28291 28292 28293 /* 28294 * Function: sd_read_modify_write_task 28295 * 28296 * Description: Called from a taskq thread to initiate the write phase of 28297 * a read-modify-write request. This is used for targets where 28298 * un->un_sys_blocksize != un->un_tgt_blocksize. 28299 * 28300 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28301 * 28302 * Context: Called under taskq thread context. 28303 */ 28304 28305 static void 28306 sd_read_modify_write_task(void *arg) 28307 { 28308 struct sd_mapblocksize_info *bsp; 28309 struct buf *bp; 28310 struct sd_xbuf *xp; 28311 struct sd_lun *un; 28312 28313 bp = arg; /* The bp is given in arg */ 28314 ASSERT(bp != NULL); 28315 28316 /* Get the pointer to the layer-private data struct */ 28317 xp = SD_GET_XBUF(bp); 28318 ASSERT(xp != NULL); 28319 bsp = xp->xb_private; 28320 ASSERT(bsp != NULL); 28321 28322 un = SD_GET_UN(bp); 28323 ASSERT(un != NULL); 28324 ASSERT(!mutex_owned(SD_MUTEX(un))); 28325 28326 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28327 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28328 28329 /* 28330 * This is the write phase of a read-modify-write request, called 28331 * under the context of a taskq thread in response to the completion 28332 * of the read portion of the rmw request completing under interrupt 28333 * context. The write request must be sent from here down the iostart 28334 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28335 * we use the layer index saved in the layer-private data area. 28336 */ 28337 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28338 28339 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28340 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28341 } 28342 28343 28344 /* 28345 * Function: sddump_do_read_of_rmw() 28346 * 28347 * Description: This routine will be called from sddump, If sddump is called 28348 * with an I/O which not aligned on device blocksize boundary 28349 * then the write has to be converted to read-modify-write. 28350 * Do the read part here in order to keep sddump simple. 28351 * Note - That the sd_mutex is held across the call to this 28352 * routine. 28353 * 28354 * Arguments: un - sd_lun 28355 * blkno - block number in terms of media block size. 28356 * nblk - number of blocks. 28357 * bpp - pointer to pointer to the buf structure. On return 28358 * from this function, *bpp points to the valid buffer 28359 * to which the write has to be done. 28360 * 28361 * Return Code: 0 for success or errno-type return code 28362 */ 28363 28364 static int 28365 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28366 struct buf **bpp) 28367 { 28368 int err; 28369 int i; 28370 int rval; 28371 struct buf *bp; 28372 struct scsi_pkt *pkt = NULL; 28373 uint32_t target_blocksize; 28374 28375 ASSERT(un != NULL); 28376 ASSERT(mutex_owned(SD_MUTEX(un))); 28377 28378 target_blocksize = un->un_tgt_blocksize; 28379 28380 mutex_exit(SD_MUTEX(un)); 28381 28382 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28383 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28384 if (bp == NULL) { 28385 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28386 "no resources for dumping; giving up"); 28387 err = ENOMEM; 28388 goto done; 28389 } 28390 28391 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28392 blkno, nblk); 28393 if (rval != 0) { 28394 scsi_free_consistent_buf(bp); 28395 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28396 "no resources for dumping; giving up"); 28397 err = ENOMEM; 28398 goto done; 28399 } 28400 28401 pkt->pkt_flags |= FLAG_NOINTR; 28402 28403 err = EIO; 28404 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28405 28406 /* 28407 * Scsi_poll returns 0 (success) if the command completes and 28408 * the status block is STATUS_GOOD. We should only check 28409 * errors if this condition is not true. Even then we should 28410 * send our own request sense packet only if we have a check 28411 * condition and auto request sense has not been performed by 28412 * the hba. 28413 */ 28414 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28415 28416 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28417 err = 0; 28418 break; 28419 } 28420 28421 /* 28422 * Check CMD_DEV_GONE 1st, give up if device is gone, 28423 * no need to read RQS data. 28424 */ 28425 if (pkt->pkt_reason == CMD_DEV_GONE) { 28426 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28427 "Error while dumping state with rmw..." 28428 "Device is gone\n"); 28429 break; 28430 } 28431 28432 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28433 SD_INFO(SD_LOG_DUMP, un, 28434 "sddump: read failed with CHECK, try # %d\n", i); 28435 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28436 (void) sd_send_polled_RQS(un); 28437 } 28438 28439 continue; 28440 } 28441 28442 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28443 int reset_retval = 0; 28444 28445 SD_INFO(SD_LOG_DUMP, un, 28446 "sddump: read failed with BUSY, try # %d\n", i); 28447 28448 if (un->un_f_lun_reset_enabled == TRUE) { 28449 reset_retval = scsi_reset(SD_ADDRESS(un), 28450 RESET_LUN); 28451 } 28452 if (reset_retval == 0) { 28453 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28454 } 28455 (void) sd_send_polled_RQS(un); 28456 28457 } else { 28458 SD_INFO(SD_LOG_DUMP, un, 28459 "sddump: read failed with 0x%x, try # %d\n", 28460 SD_GET_PKT_STATUS(pkt), i); 28461 mutex_enter(SD_MUTEX(un)); 28462 sd_reset_target(un, pkt); 28463 mutex_exit(SD_MUTEX(un)); 28464 } 28465 28466 /* 28467 * If we are not getting anywhere with lun/target resets, 28468 * let's reset the bus. 28469 */ 28470 if (i > SD_NDUMP_RETRIES/2) { 28471 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28472 (void) sd_send_polled_RQS(un); 28473 } 28474 28475 } 28476 scsi_destroy_pkt(pkt); 28477 28478 if (err != 0) { 28479 scsi_free_consistent_buf(bp); 28480 *bpp = NULL; 28481 } else { 28482 *bpp = bp; 28483 } 28484 28485 done: 28486 mutex_enter(SD_MUTEX(un)); 28487 return (err); 28488 } 28489 28490 28491 /* 28492 * Function: sd_failfast_flushq 28493 * 28494 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28495 * in b_flags and move them onto the failfast queue, then kick 28496 * off a thread to return all bp's on the failfast queue to 28497 * their owners with an error set. 28498 * 28499 * Arguments: un - pointer to the soft state struct for the instance. 28500 * 28501 * Context: may execute in interrupt context. 28502 */ 28503 28504 static void 28505 sd_failfast_flushq(struct sd_lun *un) 28506 { 28507 struct buf *bp; 28508 struct buf *next_waitq_bp; 28509 struct buf *prev_waitq_bp = NULL; 28510 28511 ASSERT(un != NULL); 28512 ASSERT(mutex_owned(SD_MUTEX(un))); 28513 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28514 ASSERT(un->un_failfast_bp == NULL); 28515 28516 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28517 "sd_failfast_flushq: entry: un:0x%p\n", un); 28518 28519 /* 28520 * Check if we should flush all bufs when entering failfast state, or 28521 * just those with B_FAILFAST set. 28522 */ 28523 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28524 /* 28525 * Move *all* bp's on the wait queue to the failfast flush 28526 * queue, including those that do NOT have B_FAILFAST set. 28527 */ 28528 if (un->un_failfast_headp == NULL) { 28529 ASSERT(un->un_failfast_tailp == NULL); 28530 un->un_failfast_headp = un->un_waitq_headp; 28531 } else { 28532 ASSERT(un->un_failfast_tailp != NULL); 28533 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28534 } 28535 28536 un->un_failfast_tailp = un->un_waitq_tailp; 28537 28538 /* update kstat for each bp moved out of the waitq */ 28539 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28540 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28541 } 28542 28543 /* empty the waitq */ 28544 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28545 28546 } else { 28547 /* 28548 * Go thru the wait queue, pick off all entries with 28549 * B_FAILFAST set, and move these onto the failfast queue. 28550 */ 28551 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28552 /* 28553 * Save the pointer to the next bp on the wait queue, 28554 * so we get to it on the next iteration of this loop. 28555 */ 28556 next_waitq_bp = bp->av_forw; 28557 28558 /* 28559 * If this bp from the wait queue does NOT have 28560 * B_FAILFAST set, just move on to the next element 28561 * in the wait queue. Note, this is the only place 28562 * where it is correct to set prev_waitq_bp. 28563 */ 28564 if ((bp->b_flags & B_FAILFAST) == 0) { 28565 prev_waitq_bp = bp; 28566 continue; 28567 } 28568 28569 /* 28570 * Remove the bp from the wait queue. 28571 */ 28572 if (bp == un->un_waitq_headp) { 28573 /* The bp is the first element of the waitq. */ 28574 un->un_waitq_headp = next_waitq_bp; 28575 if (un->un_waitq_headp == NULL) { 28576 /* The wait queue is now empty */ 28577 un->un_waitq_tailp = NULL; 28578 } 28579 } else { 28580 /* 28581 * The bp is either somewhere in the middle 28582 * or at the end of the wait queue. 28583 */ 28584 ASSERT(un->un_waitq_headp != NULL); 28585 ASSERT(prev_waitq_bp != NULL); 28586 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28587 == 0); 28588 if (bp == un->un_waitq_tailp) { 28589 /* bp is the last entry on the waitq. */ 28590 ASSERT(next_waitq_bp == NULL); 28591 un->un_waitq_tailp = prev_waitq_bp; 28592 } 28593 prev_waitq_bp->av_forw = next_waitq_bp; 28594 } 28595 bp->av_forw = NULL; 28596 28597 /* 28598 * update kstat since the bp is moved out of 28599 * the waitq 28600 */ 28601 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28602 28603 /* 28604 * Now put the bp onto the failfast queue. 28605 */ 28606 if (un->un_failfast_headp == NULL) { 28607 /* failfast queue is currently empty */ 28608 ASSERT(un->un_failfast_tailp == NULL); 28609 un->un_failfast_headp = 28610 un->un_failfast_tailp = bp; 28611 } else { 28612 /* Add the bp to the end of the failfast q */ 28613 ASSERT(un->un_failfast_tailp != NULL); 28614 ASSERT(un->un_failfast_tailp->b_flags & 28615 B_FAILFAST); 28616 un->un_failfast_tailp->av_forw = bp; 28617 un->un_failfast_tailp = bp; 28618 } 28619 } 28620 } 28621 28622 /* 28623 * Now return all bp's on the failfast queue to their owners. 28624 */ 28625 while ((bp = un->un_failfast_headp) != NULL) { 28626 28627 un->un_failfast_headp = bp->av_forw; 28628 if (un->un_failfast_headp == NULL) { 28629 un->un_failfast_tailp = NULL; 28630 } 28631 28632 /* 28633 * We want to return the bp with a failure error code, but 28634 * we do not want a call to sd_start_cmds() to occur here, 28635 * so use sd_return_failed_command_no_restart() instead of 28636 * sd_return_failed_command(). 28637 */ 28638 sd_return_failed_command_no_restart(un, bp, EIO); 28639 } 28640 28641 /* Flush the xbuf queues if required. */ 28642 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28643 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28644 } 28645 28646 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28647 "sd_failfast_flushq: exit: un:0x%p\n", un); 28648 } 28649 28650 28651 /* 28652 * Function: sd_failfast_flushq_callback 28653 * 28654 * Description: Return TRUE if the given bp meets the criteria for failfast 28655 * flushing. Used with ddi_xbuf_flushq(9F). 28656 * 28657 * Arguments: bp - ptr to buf struct to be examined. 28658 * 28659 * Context: Any 28660 */ 28661 28662 static int 28663 sd_failfast_flushq_callback(struct buf *bp) 28664 { 28665 /* 28666 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28667 * state is entered; OR (2) the given bp has B_FAILFAST set. 28668 */ 28669 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28670 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28671 } 28672 28673 28674 28675 /* 28676 * Function: sd_setup_next_xfer 28677 * 28678 * Description: Prepare next I/O operation using DMA_PARTIAL 28679 * 28680 */ 28681 28682 static int 28683 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28684 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28685 { 28686 ssize_t num_blks_not_xfered; 28687 daddr_t strt_blk_num; 28688 ssize_t bytes_not_xfered; 28689 int rval; 28690 28691 ASSERT(pkt->pkt_resid == 0); 28692 28693 /* 28694 * Calculate next block number and amount to be transferred. 28695 * 28696 * How much data NOT transfered to the HBA yet. 28697 */ 28698 bytes_not_xfered = xp->xb_dma_resid; 28699 28700 /* 28701 * figure how many blocks NOT transfered to the HBA yet. 28702 */ 28703 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28704 28705 /* 28706 * set starting block number to the end of what WAS transfered. 28707 */ 28708 strt_blk_num = xp->xb_blkno + 28709 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28710 28711 /* 28712 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28713 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28714 * the disk mutex here. 28715 */ 28716 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28717 strt_blk_num, num_blks_not_xfered); 28718 28719 if (rval == 0) { 28720 28721 /* 28722 * Success. 28723 * 28724 * Adjust things if there are still more blocks to be 28725 * transfered. 28726 */ 28727 xp->xb_dma_resid = pkt->pkt_resid; 28728 pkt->pkt_resid = 0; 28729 28730 return (1); 28731 } 28732 28733 /* 28734 * There's really only one possible return value from 28735 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28736 * returns NULL. 28737 */ 28738 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28739 28740 bp->b_resid = bp->b_bcount; 28741 bp->b_flags |= B_ERROR; 28742 28743 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28744 "Error setting up next portion of DMA transfer\n"); 28745 28746 return (0); 28747 } 28748 28749 /* 28750 * Function: sd_panic_for_res_conflict 28751 * 28752 * Description: Call panic with a string formatted with "Reservation Conflict" 28753 * and a human readable identifier indicating the SD instance 28754 * that experienced the reservation conflict. 28755 * 28756 * Arguments: un - pointer to the soft state struct for the instance. 28757 * 28758 * Context: may execute in interrupt context. 28759 */ 28760 28761 #define SD_RESV_CONFLICT_FMT_LEN 40 28762 void 28763 sd_panic_for_res_conflict(struct sd_lun *un) 28764 { 28765 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28766 char path_str[MAXPATHLEN]; 28767 28768 (void) snprintf(panic_str, sizeof (panic_str), 28769 "Reservation Conflict\nDisk: %s", 28770 ddi_pathname(SD_DEVINFO(un), path_str)); 28771 28772 panic(panic_str); 28773 } 28774 28775 /* 28776 * Note: The following sd_faultinjection_ioctl( ) routines implement 28777 * driver support for handling fault injection for error analysis 28778 * causing faults in multiple layers of the driver. 28779 * 28780 */ 28781 28782 #ifdef SD_FAULT_INJECTION 28783 static uint_t sd_fault_injection_on = 0; 28784 28785 /* 28786 * Function: sd_faultinjection_ioctl() 28787 * 28788 * Description: This routine is the driver entry point for handling 28789 * faultinjection ioctls to inject errors into the 28790 * layer model 28791 * 28792 * Arguments: cmd - the ioctl cmd received 28793 * arg - the arguments from user and returns 28794 */ 28795 28796 static void 28797 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28798 28799 uint_t i = 0; 28800 uint_t rval; 28801 28802 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28803 28804 mutex_enter(SD_MUTEX(un)); 28805 28806 switch (cmd) { 28807 case SDIOCRUN: 28808 /* Allow pushed faults to be injected */ 28809 SD_INFO(SD_LOG_SDTEST, un, 28810 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28811 28812 sd_fault_injection_on = 1; 28813 28814 SD_INFO(SD_LOG_IOERR, un, 28815 "sd_faultinjection_ioctl: run finished\n"); 28816 break; 28817 28818 case SDIOCSTART: 28819 /* Start Injection Session */ 28820 SD_INFO(SD_LOG_SDTEST, un, 28821 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28822 28823 sd_fault_injection_on = 0; 28824 un->sd_injection_mask = 0xFFFFFFFF; 28825 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28826 un->sd_fi_fifo_pkt[i] = NULL; 28827 un->sd_fi_fifo_xb[i] = NULL; 28828 un->sd_fi_fifo_un[i] = NULL; 28829 un->sd_fi_fifo_arq[i] = NULL; 28830 } 28831 un->sd_fi_fifo_start = 0; 28832 un->sd_fi_fifo_end = 0; 28833 28834 mutex_enter(&(un->un_fi_mutex)); 28835 un->sd_fi_log[0] = '\0'; 28836 un->sd_fi_buf_len = 0; 28837 mutex_exit(&(un->un_fi_mutex)); 28838 28839 SD_INFO(SD_LOG_IOERR, un, 28840 "sd_faultinjection_ioctl: start finished\n"); 28841 break; 28842 28843 case SDIOCSTOP: 28844 /* Stop Injection Session */ 28845 SD_INFO(SD_LOG_SDTEST, un, 28846 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28847 sd_fault_injection_on = 0; 28848 un->sd_injection_mask = 0x0; 28849 28850 /* Empty stray or unuseds structs from fifo */ 28851 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28852 if (un->sd_fi_fifo_pkt[i] != NULL) { 28853 kmem_free(un->sd_fi_fifo_pkt[i], 28854 sizeof (struct sd_fi_pkt)); 28855 } 28856 if (un->sd_fi_fifo_xb[i] != NULL) { 28857 kmem_free(un->sd_fi_fifo_xb[i], 28858 sizeof (struct sd_fi_xb)); 28859 } 28860 if (un->sd_fi_fifo_un[i] != NULL) { 28861 kmem_free(un->sd_fi_fifo_un[i], 28862 sizeof (struct sd_fi_un)); 28863 } 28864 if (un->sd_fi_fifo_arq[i] != NULL) { 28865 kmem_free(un->sd_fi_fifo_arq[i], 28866 sizeof (struct sd_fi_arq)); 28867 } 28868 un->sd_fi_fifo_pkt[i] = NULL; 28869 un->sd_fi_fifo_un[i] = NULL; 28870 un->sd_fi_fifo_xb[i] = NULL; 28871 un->sd_fi_fifo_arq[i] = NULL; 28872 } 28873 un->sd_fi_fifo_start = 0; 28874 un->sd_fi_fifo_end = 0; 28875 28876 SD_INFO(SD_LOG_IOERR, un, 28877 "sd_faultinjection_ioctl: stop finished\n"); 28878 break; 28879 28880 case SDIOCINSERTPKT: 28881 /* Store a packet struct to be pushed onto fifo */ 28882 SD_INFO(SD_LOG_SDTEST, un, 28883 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 28884 28885 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28886 28887 sd_fault_injection_on = 0; 28888 28889 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 28890 if (un->sd_fi_fifo_pkt[i] != NULL) { 28891 kmem_free(un->sd_fi_fifo_pkt[i], 28892 sizeof (struct sd_fi_pkt)); 28893 } 28894 if (arg != NULL) { 28895 un->sd_fi_fifo_pkt[i] = 28896 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 28897 if (un->sd_fi_fifo_pkt[i] == NULL) { 28898 /* Alloc failed don't store anything */ 28899 break; 28900 } 28901 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 28902 sizeof (struct sd_fi_pkt), 0); 28903 if (rval == -1) { 28904 kmem_free(un->sd_fi_fifo_pkt[i], 28905 sizeof (struct sd_fi_pkt)); 28906 un->sd_fi_fifo_pkt[i] = NULL; 28907 } 28908 } else { 28909 SD_INFO(SD_LOG_IOERR, un, 28910 "sd_faultinjection_ioctl: pkt null\n"); 28911 } 28912 break; 28913 28914 case SDIOCINSERTXB: 28915 /* Store a xb struct to be pushed onto fifo */ 28916 SD_INFO(SD_LOG_SDTEST, un, 28917 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 28918 28919 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28920 28921 sd_fault_injection_on = 0; 28922 28923 if (un->sd_fi_fifo_xb[i] != NULL) { 28924 kmem_free(un->sd_fi_fifo_xb[i], 28925 sizeof (struct sd_fi_xb)); 28926 un->sd_fi_fifo_xb[i] = NULL; 28927 } 28928 if (arg != NULL) { 28929 un->sd_fi_fifo_xb[i] = 28930 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 28931 if (un->sd_fi_fifo_xb[i] == NULL) { 28932 /* Alloc failed don't store anything */ 28933 break; 28934 } 28935 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 28936 sizeof (struct sd_fi_xb), 0); 28937 28938 if (rval == -1) { 28939 kmem_free(un->sd_fi_fifo_xb[i], 28940 sizeof (struct sd_fi_xb)); 28941 un->sd_fi_fifo_xb[i] = NULL; 28942 } 28943 } else { 28944 SD_INFO(SD_LOG_IOERR, un, 28945 "sd_faultinjection_ioctl: xb null\n"); 28946 } 28947 break; 28948 28949 case SDIOCINSERTUN: 28950 /* Store a un struct to be pushed onto fifo */ 28951 SD_INFO(SD_LOG_SDTEST, un, 28952 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 28953 28954 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28955 28956 sd_fault_injection_on = 0; 28957 28958 if (un->sd_fi_fifo_un[i] != NULL) { 28959 kmem_free(un->sd_fi_fifo_un[i], 28960 sizeof (struct sd_fi_un)); 28961 un->sd_fi_fifo_un[i] = NULL; 28962 } 28963 if (arg != NULL) { 28964 un->sd_fi_fifo_un[i] = 28965 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 28966 if (un->sd_fi_fifo_un[i] == NULL) { 28967 /* Alloc failed don't store anything */ 28968 break; 28969 } 28970 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 28971 sizeof (struct sd_fi_un), 0); 28972 if (rval == -1) { 28973 kmem_free(un->sd_fi_fifo_un[i], 28974 sizeof (struct sd_fi_un)); 28975 un->sd_fi_fifo_un[i] = NULL; 28976 } 28977 28978 } else { 28979 SD_INFO(SD_LOG_IOERR, un, 28980 "sd_faultinjection_ioctl: un null\n"); 28981 } 28982 28983 break; 28984 28985 case SDIOCINSERTARQ: 28986 /* Store a arq struct to be pushed onto fifo */ 28987 SD_INFO(SD_LOG_SDTEST, un, 28988 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 28989 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28990 28991 sd_fault_injection_on = 0; 28992 28993 if (un->sd_fi_fifo_arq[i] != NULL) { 28994 kmem_free(un->sd_fi_fifo_arq[i], 28995 sizeof (struct sd_fi_arq)); 28996 un->sd_fi_fifo_arq[i] = NULL; 28997 } 28998 if (arg != NULL) { 28999 un->sd_fi_fifo_arq[i] = 29000 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29001 if (un->sd_fi_fifo_arq[i] == NULL) { 29002 /* Alloc failed don't store anything */ 29003 break; 29004 } 29005 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29006 sizeof (struct sd_fi_arq), 0); 29007 if (rval == -1) { 29008 kmem_free(un->sd_fi_fifo_arq[i], 29009 sizeof (struct sd_fi_arq)); 29010 un->sd_fi_fifo_arq[i] = NULL; 29011 } 29012 29013 } else { 29014 SD_INFO(SD_LOG_IOERR, un, 29015 "sd_faultinjection_ioctl: arq null\n"); 29016 } 29017 29018 break; 29019 29020 case SDIOCPUSH: 29021 /* Push stored xb, pkt, un, and arq onto fifo */ 29022 sd_fault_injection_on = 0; 29023 29024 if (arg != NULL) { 29025 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29026 if (rval != -1 && 29027 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29028 un->sd_fi_fifo_end += i; 29029 } 29030 } else { 29031 SD_INFO(SD_LOG_IOERR, un, 29032 "sd_faultinjection_ioctl: push arg null\n"); 29033 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29034 un->sd_fi_fifo_end++; 29035 } 29036 } 29037 SD_INFO(SD_LOG_IOERR, un, 29038 "sd_faultinjection_ioctl: push to end=%d\n", 29039 un->sd_fi_fifo_end); 29040 break; 29041 29042 case SDIOCRETRIEVE: 29043 /* Return buffer of log from Injection session */ 29044 SD_INFO(SD_LOG_SDTEST, un, 29045 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29046 29047 sd_fault_injection_on = 0; 29048 29049 mutex_enter(&(un->un_fi_mutex)); 29050 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29051 un->sd_fi_buf_len+1, 0); 29052 mutex_exit(&(un->un_fi_mutex)); 29053 29054 if (rval == -1) { 29055 /* 29056 * arg is possibly invalid setting 29057 * it to NULL for return 29058 */ 29059 arg = NULL; 29060 } 29061 break; 29062 } 29063 29064 mutex_exit(SD_MUTEX(un)); 29065 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29066 " exit\n"); 29067 } 29068 29069 29070 /* 29071 * Function: sd_injection_log() 29072 * 29073 * Description: This routine adds buff to the already existing injection log 29074 * for retrieval via faultinjection_ioctl for use in fault 29075 * detection and recovery 29076 * 29077 * Arguments: buf - the string to add to the log 29078 */ 29079 29080 static void 29081 sd_injection_log(char *buf, struct sd_lun *un) 29082 { 29083 uint_t len; 29084 29085 ASSERT(un != NULL); 29086 ASSERT(buf != NULL); 29087 29088 mutex_enter(&(un->un_fi_mutex)); 29089 29090 len = min(strlen(buf), 255); 29091 /* Add logged value to Injection log to be returned later */ 29092 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29093 uint_t offset = strlen((char *)un->sd_fi_log); 29094 char *destp = (char *)un->sd_fi_log + offset; 29095 int i; 29096 for (i = 0; i < len; i++) { 29097 *destp++ = *buf++; 29098 } 29099 un->sd_fi_buf_len += len; 29100 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29101 } 29102 29103 mutex_exit(&(un->un_fi_mutex)); 29104 } 29105 29106 29107 /* 29108 * Function: sd_faultinjection() 29109 * 29110 * Description: This routine takes the pkt and changes its 29111 * content based on error injection scenerio. 29112 * 29113 * Arguments: pktp - packet to be changed 29114 */ 29115 29116 static void 29117 sd_faultinjection(struct scsi_pkt *pktp) 29118 { 29119 uint_t i; 29120 struct sd_fi_pkt *fi_pkt; 29121 struct sd_fi_xb *fi_xb; 29122 struct sd_fi_un *fi_un; 29123 struct sd_fi_arq *fi_arq; 29124 struct buf *bp; 29125 struct sd_xbuf *xb; 29126 struct sd_lun *un; 29127 29128 ASSERT(pktp != NULL); 29129 29130 /* pull bp xb and un from pktp */ 29131 bp = (struct buf *)pktp->pkt_private; 29132 xb = SD_GET_XBUF(bp); 29133 un = SD_GET_UN(bp); 29134 29135 ASSERT(un != NULL); 29136 29137 mutex_enter(SD_MUTEX(un)); 29138 29139 SD_TRACE(SD_LOG_SDTEST, un, 29140 "sd_faultinjection: entry Injection from sdintr\n"); 29141 29142 /* if injection is off return */ 29143 if (sd_fault_injection_on == 0 || 29144 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29145 mutex_exit(SD_MUTEX(un)); 29146 return; 29147 } 29148 29149 SD_INFO(SD_LOG_SDTEST, un, 29150 "sd_faultinjection: is working for copying\n"); 29151 29152 /* take next set off fifo */ 29153 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29154 29155 fi_pkt = un->sd_fi_fifo_pkt[i]; 29156 fi_xb = un->sd_fi_fifo_xb[i]; 29157 fi_un = un->sd_fi_fifo_un[i]; 29158 fi_arq = un->sd_fi_fifo_arq[i]; 29159 29160 29161 /* set variables accordingly */ 29162 /* set pkt if it was on fifo */ 29163 if (fi_pkt != NULL) { 29164 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29165 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29166 if (fi_pkt->pkt_cdbp != 0xff) 29167 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29168 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29169 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29170 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29171 29172 } 29173 /* set xb if it was on fifo */ 29174 if (fi_xb != NULL) { 29175 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29176 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29177 if (fi_xb->xb_retry_count != 0) 29178 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29179 SD_CONDSET(xb, xb, xb_victim_retry_count, 29180 "xb_victim_retry_count"); 29181 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29182 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29183 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29184 29185 /* copy in block data from sense */ 29186 /* 29187 * if (fi_xb->xb_sense_data[0] != -1) { 29188 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29189 * SENSE_LENGTH); 29190 * } 29191 */ 29192 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29193 29194 /* copy in extended sense codes */ 29195 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29196 xb, es_code, "es_code"); 29197 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29198 xb, es_key, "es_key"); 29199 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29200 xb, es_add_code, "es_add_code"); 29201 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29202 xb, es_qual_code, "es_qual_code"); 29203 struct scsi_extended_sense *esp; 29204 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29205 esp->es_class = CLASS_EXTENDED_SENSE; 29206 } 29207 29208 /* set un if it was on fifo */ 29209 if (fi_un != NULL) { 29210 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29211 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29212 SD_CONDSET(un, un, un_reset_retry_count, 29213 "un_reset_retry_count"); 29214 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29215 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29216 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29217 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29218 "un_f_allow_bus_device_reset"); 29219 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29220 29221 } 29222 29223 /* copy in auto request sense if it was on fifo */ 29224 if (fi_arq != NULL) { 29225 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29226 } 29227 29228 /* free structs */ 29229 if (un->sd_fi_fifo_pkt[i] != NULL) { 29230 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29231 } 29232 if (un->sd_fi_fifo_xb[i] != NULL) { 29233 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29234 } 29235 if (un->sd_fi_fifo_un[i] != NULL) { 29236 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29237 } 29238 if (un->sd_fi_fifo_arq[i] != NULL) { 29239 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29240 } 29241 29242 /* 29243 * kmem_free does not gurantee to set to NULL 29244 * since we uses these to determine if we set 29245 * values or not lets confirm they are always 29246 * NULL after free 29247 */ 29248 un->sd_fi_fifo_pkt[i] = NULL; 29249 un->sd_fi_fifo_un[i] = NULL; 29250 un->sd_fi_fifo_xb[i] = NULL; 29251 un->sd_fi_fifo_arq[i] = NULL; 29252 29253 un->sd_fi_fifo_start++; 29254 29255 mutex_exit(SD_MUTEX(un)); 29256 29257 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29258 } 29259 29260 #endif /* SD_FAULT_INJECTION */ 29261 29262 /* 29263 * This routine is invoked in sd_unit_attach(). Before calling it, the 29264 * properties in conf file should be processed already, and "hotpluggable" 29265 * property was processed also. 29266 * 29267 * The sd driver distinguishes 3 different type of devices: removable media, 29268 * non-removable media, and hotpluggable. Below the differences are defined: 29269 * 29270 * 1. Device ID 29271 * 29272 * The device ID of a device is used to identify this device. Refer to 29273 * ddi_devid_register(9F). 29274 * 29275 * For a non-removable media disk device which can provide 0x80 or 0x83 29276 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29277 * device ID is created to identify this device. For other non-removable 29278 * media devices, a default device ID is created only if this device has 29279 * at least 2 alter cylinders. Otherwise, this device has no devid. 29280 * 29281 * ------------------------------------------------------- 29282 * removable media hotpluggable | Can Have Device ID 29283 * ------------------------------------------------------- 29284 * false false | Yes 29285 * false true | Yes 29286 * true x | No 29287 * ------------------------------------------------------ 29288 * 29289 * 29290 * 2. SCSI group 4 commands 29291 * 29292 * In SCSI specs, only some commands in group 4 command set can use 29293 * 8-byte addresses that can be used to access >2TB storage spaces. 29294 * Other commands have no such capability. Without supporting group4, 29295 * it is impossible to make full use of storage spaces of a disk with 29296 * capacity larger than 2TB. 29297 * 29298 * ----------------------------------------------- 29299 * removable media hotpluggable LP64 | Group 29300 * ----------------------------------------------- 29301 * false false false | 1 29302 * false false true | 4 29303 * false true false | 1 29304 * false true true | 4 29305 * true x x | 5 29306 * ----------------------------------------------- 29307 * 29308 * 29309 * 3. Check for VTOC Label 29310 * 29311 * If a direct-access disk has no EFI label, sd will check if it has a 29312 * valid VTOC label. Now, sd also does that check for removable media 29313 * and hotpluggable devices. 29314 * 29315 * -------------------------------------------------------------- 29316 * Direct-Access removable media hotpluggable | Check Label 29317 * ------------------------------------------------------------- 29318 * false false false | No 29319 * false false true | No 29320 * false true false | Yes 29321 * false true true | Yes 29322 * true x x | Yes 29323 * -------------------------------------------------------------- 29324 * 29325 * 29326 * 4. Building default VTOC label 29327 * 29328 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29329 * If those devices have no valid VTOC label, sd(7d) will attempt to 29330 * create default VTOC for them. Currently sd creates default VTOC label 29331 * for all devices on x86 platform (VTOC_16), but only for removable 29332 * media devices on SPARC (VTOC_8). 29333 * 29334 * ----------------------------------------------------------- 29335 * removable media hotpluggable platform | Default Label 29336 * ----------------------------------------------------------- 29337 * false false sparc | No 29338 * false true x86 | Yes 29339 * false true sparc | Yes 29340 * true x x | Yes 29341 * ---------------------------------------------------------- 29342 * 29343 * 29344 * 5. Supported blocksizes of target devices 29345 * 29346 * Sd supports non-512-byte blocksize for removable media devices only. 29347 * For other devices, only 512-byte blocksize is supported. This may be 29348 * changed in near future because some RAID devices require non-512-byte 29349 * blocksize 29350 * 29351 * ----------------------------------------------------------- 29352 * removable media hotpluggable | non-512-byte blocksize 29353 * ----------------------------------------------------------- 29354 * false false | No 29355 * false true | No 29356 * true x | Yes 29357 * ----------------------------------------------------------- 29358 * 29359 * 29360 * 6. Automatic mount & unmount 29361 * 29362 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29363 * if a device is removable media device. It return 1 for removable media 29364 * devices, and 0 for others. 29365 * 29366 * The automatic mounting subsystem should distinguish between the types 29367 * of devices and apply automounting policies to each. 29368 * 29369 * 29370 * 7. fdisk partition management 29371 * 29372 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29373 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29374 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29375 * fdisk partitions on both x86 and SPARC platform. 29376 * 29377 * ----------------------------------------------------------- 29378 * platform removable media USB/1394 | fdisk supported 29379 * ----------------------------------------------------------- 29380 * x86 X X | true 29381 * ------------------------------------------------------------ 29382 * sparc X X | false 29383 * ------------------------------------------------------------ 29384 * 29385 * 29386 * 8. MBOOT/MBR 29387 * 29388 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29389 * read/write mboot for removable media devices on sparc platform. 29390 * 29391 * ----------------------------------------------------------- 29392 * platform removable media USB/1394 | mboot supported 29393 * ----------------------------------------------------------- 29394 * x86 X X | true 29395 * ------------------------------------------------------------ 29396 * sparc false false | false 29397 * sparc false true | true 29398 * sparc true false | true 29399 * sparc true true | true 29400 * ------------------------------------------------------------ 29401 * 29402 * 29403 * 9. error handling during opening device 29404 * 29405 * If failed to open a disk device, an errno is returned. For some kinds 29406 * of errors, different errno is returned depending on if this device is 29407 * a removable media device. This brings USB/1394 hard disks in line with 29408 * expected hard disk behavior. It is not expected that this breaks any 29409 * application. 29410 * 29411 * ------------------------------------------------------ 29412 * removable media hotpluggable | errno 29413 * ------------------------------------------------------ 29414 * false false | EIO 29415 * false true | EIO 29416 * true x | ENXIO 29417 * ------------------------------------------------------ 29418 * 29419 * 29420 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29421 * 29422 * These IOCTLs are applicable only to removable media devices. 29423 * 29424 * ----------------------------------------------------------- 29425 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29426 * ----------------------------------------------------------- 29427 * false false | No 29428 * false true | No 29429 * true x | Yes 29430 * ----------------------------------------------------------- 29431 * 29432 * 29433 * 12. Kstats for partitions 29434 * 29435 * sd creates partition kstat for non-removable media devices. USB and 29436 * Firewire hard disks now have partition kstats 29437 * 29438 * ------------------------------------------------------ 29439 * removable media hotpluggable | kstat 29440 * ------------------------------------------------------ 29441 * false false | Yes 29442 * false true | Yes 29443 * true x | No 29444 * ------------------------------------------------------ 29445 * 29446 * 29447 * 13. Removable media & hotpluggable properties 29448 * 29449 * Sd driver creates a "removable-media" property for removable media 29450 * devices. Parent nexus drivers create a "hotpluggable" property if 29451 * it supports hotplugging. 29452 * 29453 * --------------------------------------------------------------------- 29454 * removable media hotpluggable | "removable-media" " hotpluggable" 29455 * --------------------------------------------------------------------- 29456 * false false | No No 29457 * false true | No Yes 29458 * true false | Yes No 29459 * true true | Yes Yes 29460 * --------------------------------------------------------------------- 29461 * 29462 * 29463 * 14. Power Management 29464 * 29465 * sd only power manages removable media devices or devices that support 29466 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29467 * 29468 * A parent nexus that supports hotplugging can also set "pm-capable" 29469 * if the disk can be power managed. 29470 * 29471 * ------------------------------------------------------------ 29472 * removable media hotpluggable pm-capable | power manage 29473 * ------------------------------------------------------------ 29474 * false false false | No 29475 * false false true | Yes 29476 * false true false | No 29477 * false true true | Yes 29478 * true x x | Yes 29479 * ------------------------------------------------------------ 29480 * 29481 * USB and firewire hard disks can now be power managed independently 29482 * of the framebuffer 29483 * 29484 * 29485 * 15. Support for USB disks with capacity larger than 1TB 29486 * 29487 * Currently, sd doesn't permit a fixed disk device with capacity 29488 * larger than 1TB to be used in a 32-bit operating system environment. 29489 * However, sd doesn't do that for removable media devices. Instead, it 29490 * assumes that removable media devices cannot have a capacity larger 29491 * than 1TB. Therefore, using those devices on 32-bit system is partially 29492 * supported, which can cause some unexpected results. 29493 * 29494 * --------------------------------------------------------------------- 29495 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29496 * --------------------------------------------------------------------- 29497 * false false | true | no 29498 * false true | true | no 29499 * true false | true | Yes 29500 * true true | true | Yes 29501 * --------------------------------------------------------------------- 29502 * 29503 * 29504 * 16. Check write-protection at open time 29505 * 29506 * When a removable media device is being opened for writing without NDELAY 29507 * flag, sd will check if this device is writable. If attempting to open 29508 * without NDELAY flag a write-protected device, this operation will abort. 29509 * 29510 * ------------------------------------------------------------ 29511 * removable media USB/1394 | WP Check 29512 * ------------------------------------------------------------ 29513 * false false | No 29514 * false true | No 29515 * true false | Yes 29516 * true true | Yes 29517 * ------------------------------------------------------------ 29518 * 29519 * 29520 * 17. syslog when corrupted VTOC is encountered 29521 * 29522 * Currently, if an invalid VTOC is encountered, sd only print syslog 29523 * for fixed SCSI disks. 29524 * ------------------------------------------------------------ 29525 * removable media USB/1394 | print syslog 29526 * ------------------------------------------------------------ 29527 * false false | Yes 29528 * false true | No 29529 * true false | No 29530 * true true | No 29531 * ------------------------------------------------------------ 29532 */ 29533 static void 29534 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29535 { 29536 int pm_capable_prop; 29537 29538 ASSERT(un->un_sd); 29539 ASSERT(un->un_sd->sd_inq); 29540 29541 /* 29542 * Enable SYNC CACHE support for all devices. 29543 */ 29544 un->un_f_sync_cache_supported = TRUE; 29545 29546 /* 29547 * Set the sync cache required flag to false. 29548 * This would ensure that there is no SYNC CACHE 29549 * sent when there are no writes 29550 */ 29551 un->un_f_sync_cache_required = FALSE; 29552 29553 if (un->un_sd->sd_inq->inq_rmb) { 29554 /* 29555 * The media of this device is removable. And for this kind 29556 * of devices, it is possible to change medium after opening 29557 * devices. Thus we should support this operation. 29558 */ 29559 un->un_f_has_removable_media = TRUE; 29560 29561 /* 29562 * support non-512-byte blocksize of removable media devices 29563 */ 29564 un->un_f_non_devbsize_supported = TRUE; 29565 29566 /* 29567 * Assume that all removable media devices support DOOR_LOCK 29568 */ 29569 un->un_f_doorlock_supported = TRUE; 29570 29571 /* 29572 * For a removable media device, it is possible to be opened 29573 * with NDELAY flag when there is no media in drive, in this 29574 * case we don't care if device is writable. But if without 29575 * NDELAY flag, we need to check if media is write-protected. 29576 */ 29577 un->un_f_chk_wp_open = TRUE; 29578 29579 /* 29580 * need to start a SCSI watch thread to monitor media state, 29581 * when media is being inserted or ejected, notify syseventd. 29582 */ 29583 un->un_f_monitor_media_state = TRUE; 29584 29585 /* 29586 * Some devices don't support START_STOP_UNIT command. 29587 * Therefore, we'd better check if a device supports it 29588 * before sending it. 29589 */ 29590 un->un_f_check_start_stop = TRUE; 29591 29592 /* 29593 * support eject media ioctl: 29594 * FDEJECT, DKIOCEJECT, CDROMEJECT 29595 */ 29596 un->un_f_eject_media_supported = TRUE; 29597 29598 /* 29599 * Because many removable-media devices don't support 29600 * LOG_SENSE, we couldn't use this command to check if 29601 * a removable media device support power-management. 29602 * We assume that they support power-management via 29603 * START_STOP_UNIT command and can be spun up and down 29604 * without limitations. 29605 */ 29606 un->un_f_pm_supported = TRUE; 29607 29608 /* 29609 * Need to create a zero length (Boolean) property 29610 * removable-media for the removable media devices. 29611 * Note that the return value of the property is not being 29612 * checked, since if unable to create the property 29613 * then do not want the attach to fail altogether. Consistent 29614 * with other property creation in attach. 29615 */ 29616 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29617 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29618 29619 } else { 29620 /* 29621 * create device ID for device 29622 */ 29623 un->un_f_devid_supported = TRUE; 29624 29625 /* 29626 * Spin up non-removable-media devices once it is attached 29627 */ 29628 un->un_f_attach_spinup = TRUE; 29629 29630 /* 29631 * According to SCSI specification, Sense data has two kinds of 29632 * format: fixed format, and descriptor format. At present, we 29633 * don't support descriptor format sense data for removable 29634 * media. 29635 */ 29636 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29637 un->un_f_descr_format_supported = TRUE; 29638 } 29639 29640 /* 29641 * kstats are created only for non-removable media devices. 29642 * 29643 * Set this in sd.conf to 0 in order to disable kstats. The 29644 * default is 1, so they are enabled by default. 29645 */ 29646 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29647 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29648 "enable-partition-kstats", 1)); 29649 29650 /* 29651 * Check if HBA has set the "pm-capable" property. 29652 * If "pm-capable" exists and is non-zero then we can 29653 * power manage the device without checking the start/stop 29654 * cycle count log sense page. 29655 * 29656 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29657 * then we should not power manage the device. 29658 * 29659 * If "pm-capable" doesn't exist then pm_capable_prop will 29660 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29661 * sd will check the start/stop cycle count log sense page 29662 * and power manage the device if the cycle count limit has 29663 * not been exceeded. 29664 */ 29665 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29666 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29667 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29668 un->un_f_log_sense_supported = TRUE; 29669 } else { 29670 /* 29671 * pm-capable property exists. 29672 * 29673 * Convert "TRUE" values for pm_capable_prop to 29674 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29675 * later. "TRUE" values are any values except 29676 * SD_PM_CAPABLE_FALSE (0) and 29677 * SD_PM_CAPABLE_UNDEFINED (-1) 29678 */ 29679 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29680 un->un_f_log_sense_supported = FALSE; 29681 } else { 29682 un->un_f_pm_supported = TRUE; 29683 } 29684 29685 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29686 "sd_unit_attach: un:0x%p pm-capable " 29687 "property set to %d.\n", un, un->un_f_pm_supported); 29688 } 29689 } 29690 29691 if (un->un_f_is_hotpluggable) { 29692 29693 /* 29694 * Have to watch hotpluggable devices as well, since 29695 * that's the only way for userland applications to 29696 * detect hot removal while device is busy/mounted. 29697 */ 29698 un->un_f_monitor_media_state = TRUE; 29699 29700 un->un_f_check_start_stop = TRUE; 29701 29702 } 29703 } 29704 29705 /* 29706 * sd_tg_rdwr: 29707 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29708 * in sys block size, req_length in bytes. 29709 * 29710 */ 29711 static int 29712 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29713 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29714 { 29715 struct sd_lun *un; 29716 int path_flag = (int)(uintptr_t)tg_cookie; 29717 char *dkl = NULL; 29718 diskaddr_t real_addr = start_block; 29719 diskaddr_t first_byte, end_block; 29720 29721 size_t buffer_size = reqlength; 29722 int rval = 0; 29723 diskaddr_t cap; 29724 uint32_t lbasize; 29725 sd_ssc_t *ssc; 29726 29727 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29728 if (un == NULL) 29729 return (ENXIO); 29730 29731 if (cmd != TG_READ && cmd != TG_WRITE) 29732 return (EINVAL); 29733 29734 ssc = sd_ssc_init(un); 29735 mutex_enter(SD_MUTEX(un)); 29736 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29737 mutex_exit(SD_MUTEX(un)); 29738 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29739 &lbasize, path_flag); 29740 if (rval != 0) 29741 goto done1; 29742 mutex_enter(SD_MUTEX(un)); 29743 sd_update_block_info(un, lbasize, cap); 29744 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29745 mutex_exit(SD_MUTEX(un)); 29746 rval = EIO; 29747 goto done; 29748 } 29749 } 29750 29751 if (NOT_DEVBSIZE(un)) { 29752 /* 29753 * sys_blocksize != tgt_blocksize, need to re-adjust 29754 * blkno and save the index to beginning of dk_label 29755 */ 29756 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29757 real_addr = first_byte / un->un_tgt_blocksize; 29758 29759 end_block = (first_byte + reqlength + 29760 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29761 29762 /* round up buffer size to multiple of target block size */ 29763 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29764 29765 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29766 "label_addr: 0x%x allocation size: 0x%x\n", 29767 real_addr, buffer_size); 29768 29769 if (((first_byte % un->un_tgt_blocksize) != 0) || 29770 (reqlength % un->un_tgt_blocksize) != 0) 29771 /* the request is not aligned */ 29772 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29773 } 29774 29775 /* 29776 * The MMC standard allows READ CAPACITY to be 29777 * inaccurate by a bounded amount (in the interest of 29778 * response latency). As a result, failed READs are 29779 * commonplace (due to the reading of metadata and not 29780 * data). Depending on the per-Vendor/drive Sense data, 29781 * the failed READ can cause many (unnecessary) retries. 29782 */ 29783 29784 if (ISCD(un) && (cmd == TG_READ) && 29785 (un->un_f_blockcount_is_valid == TRUE) && 29786 ((start_block == (un->un_blockcount - 1))|| 29787 (start_block == (un->un_blockcount - 2)))) { 29788 path_flag = SD_PATH_DIRECT_PRIORITY; 29789 } 29790 29791 mutex_exit(SD_MUTEX(un)); 29792 if (cmd == TG_READ) { 29793 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29794 buffer_size, real_addr, path_flag); 29795 if (dkl != NULL) 29796 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29797 real_addr), bufaddr, reqlength); 29798 } else { 29799 if (dkl) { 29800 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29801 real_addr, path_flag); 29802 if (rval) { 29803 goto done1; 29804 } 29805 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29806 real_addr), reqlength); 29807 } 29808 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29809 buffer_size, real_addr, path_flag); 29810 } 29811 29812 done1: 29813 if (dkl != NULL) 29814 kmem_free(dkl, buffer_size); 29815 29816 if (rval != 0) { 29817 if (rval == EIO) 29818 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29819 else 29820 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29821 } 29822 done: 29823 sd_ssc_fini(ssc); 29824 return (rval); 29825 } 29826 29827 29828 static int 29829 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29830 { 29831 29832 struct sd_lun *un; 29833 diskaddr_t cap; 29834 uint32_t lbasize; 29835 int path_flag = (int)(uintptr_t)tg_cookie; 29836 int ret = 0; 29837 29838 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29839 if (un == NULL) 29840 return (ENXIO); 29841 29842 switch (cmd) { 29843 case TG_GETPHYGEOM: 29844 case TG_GETVIRTGEOM: 29845 case TG_GETCAPACITY: 29846 case TG_GETBLOCKSIZE: 29847 mutex_enter(SD_MUTEX(un)); 29848 29849 if ((un->un_f_blockcount_is_valid == TRUE) && 29850 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29851 cap = un->un_blockcount; 29852 lbasize = un->un_tgt_blocksize; 29853 mutex_exit(SD_MUTEX(un)); 29854 } else { 29855 sd_ssc_t *ssc; 29856 mutex_exit(SD_MUTEX(un)); 29857 ssc = sd_ssc_init(un); 29858 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29859 &lbasize, path_flag); 29860 if (ret != 0) { 29861 if (ret == EIO) 29862 sd_ssc_assessment(ssc, 29863 SD_FMT_STATUS_CHECK); 29864 else 29865 sd_ssc_assessment(ssc, 29866 SD_FMT_IGNORE); 29867 sd_ssc_fini(ssc); 29868 return (ret); 29869 } 29870 sd_ssc_fini(ssc); 29871 mutex_enter(SD_MUTEX(un)); 29872 sd_update_block_info(un, lbasize, cap); 29873 if ((un->un_f_blockcount_is_valid == FALSE) || 29874 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 29875 mutex_exit(SD_MUTEX(un)); 29876 return (EIO); 29877 } 29878 mutex_exit(SD_MUTEX(un)); 29879 } 29880 29881 if (cmd == TG_GETCAPACITY) { 29882 *(diskaddr_t *)arg = cap; 29883 return (0); 29884 } 29885 29886 if (cmd == TG_GETBLOCKSIZE) { 29887 *(uint32_t *)arg = lbasize; 29888 return (0); 29889 } 29890 29891 if (cmd == TG_GETPHYGEOM) 29892 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 29893 cap, lbasize, path_flag); 29894 else 29895 /* TG_GETVIRTGEOM */ 29896 ret = sd_get_virtual_geometry(un, 29897 (cmlb_geom_t *)arg, cap, lbasize); 29898 29899 return (ret); 29900 29901 case TG_GETATTR: 29902 mutex_enter(SD_MUTEX(un)); 29903 ((tg_attribute_t *)arg)->media_is_writable = 29904 un->un_f_mmc_writable_media; 29905 mutex_exit(SD_MUTEX(un)); 29906 return (0); 29907 default: 29908 return (ENOTTY); 29909 29910 } 29911 } 29912 29913 /* 29914 * Function: sd_ssc_ereport_post 29915 * 29916 * Description: Will be called when SD driver need to post an ereport. 29917 * 29918 * Context: Kernel thread or interrupt context. 29919 */ 29920 static void 29921 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 29922 { 29923 int uscsi_path_instance = 0; 29924 uchar_t uscsi_pkt_reason; 29925 uint32_t uscsi_pkt_state; 29926 uint32_t uscsi_pkt_statistics; 29927 uint64_t uscsi_ena; 29928 uchar_t op_code; 29929 uint8_t *sensep; 29930 union scsi_cdb *cdbp; 29931 uint_t cdblen = 0; 29932 uint_t senlen = 0; 29933 struct sd_lun *un; 29934 dev_info_t *dip; 29935 char *devid; 29936 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 29937 SSC_FLAGS_INVALID_STATUS | 29938 SSC_FLAGS_INVALID_SENSE | 29939 SSC_FLAGS_INVALID_DATA; 29940 char assessment[16]; 29941 29942 ASSERT(ssc != NULL); 29943 ASSERT(ssc->ssc_uscsi_cmd != NULL); 29944 ASSERT(ssc->ssc_uscsi_info != NULL); 29945 29946 un = ssc->ssc_un; 29947 ASSERT(un != NULL); 29948 29949 dip = un->un_sd->sd_dev; 29950 29951 /* 29952 * Get the devid: 29953 * devid will only be passed to non-transport error reports. 29954 */ 29955 devid = DEVI(dip)->devi_devid_str; 29956 29957 /* 29958 * If we are syncing or dumping, the command will not be executed 29959 * so we bypass this situation. 29960 */ 29961 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 29962 (un->un_state == SD_STATE_DUMPING)) 29963 return; 29964 29965 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 29966 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 29967 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 29968 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 29969 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 29970 29971 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 29972 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 29973 29974 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 29975 if (cdbp == NULL) { 29976 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29977 "sd_ssc_ereport_post meet empty cdb\n"); 29978 return; 29979 } 29980 29981 op_code = cdbp->scc_cmd; 29982 29983 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 29984 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 29985 ssc->ssc_uscsi_cmd->uscsi_rqresid); 29986 29987 if (senlen > 0) 29988 ASSERT(sensep != NULL); 29989 29990 /* 29991 * Initialize drv_assess to corresponding values. 29992 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 29993 * on the sense-key returned back. 29994 */ 29995 switch (drv_assess) { 29996 case SD_FM_DRV_RECOVERY: 29997 (void) sprintf(assessment, "%s", "recovered"); 29998 break; 29999 case SD_FM_DRV_RETRY: 30000 (void) sprintf(assessment, "%s", "retry"); 30001 break; 30002 case SD_FM_DRV_NOTICE: 30003 (void) sprintf(assessment, "%s", "info"); 30004 break; 30005 case SD_FM_DRV_FATAL: 30006 default: 30007 (void) sprintf(assessment, "%s", "unknown"); 30008 } 30009 /* 30010 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30011 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30012 * driver-assessment will always be "recovered" here. 30013 */ 30014 if (drv_assess == SD_FM_DRV_RECOVERY) { 30015 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30016 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30017 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30018 "driver-assessment", DATA_TYPE_STRING, assessment, 30019 "op-code", DATA_TYPE_UINT8, op_code, 30020 "cdb", DATA_TYPE_UINT8_ARRAY, 30021 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30022 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30023 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30024 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30025 NULL); 30026 return; 30027 } 30028 30029 /* 30030 * If there is un-expected/un-decodable data, we should post 30031 * ereport.io.scsi.cmd.disk.dev.uderr. 30032 * driver-assessment will be set based on parameter drv_assess. 30033 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30034 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30035 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30036 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30037 */ 30038 if (ssc->ssc_flags & ssc_invalid_flags) { 30039 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30040 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30041 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30042 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30043 "driver-assessment", DATA_TYPE_STRING, 30044 drv_assess == SD_FM_DRV_FATAL ? 30045 "fail" : assessment, 30046 "op-code", DATA_TYPE_UINT8, op_code, 30047 "cdb", DATA_TYPE_UINT8_ARRAY, 30048 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30049 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30050 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30051 "pkt-stats", DATA_TYPE_UINT32, 30052 uscsi_pkt_statistics, 30053 "stat-code", DATA_TYPE_UINT8, 30054 ssc->ssc_uscsi_cmd->uscsi_status, 30055 "un-decode-info", DATA_TYPE_STRING, 30056 ssc->ssc_info, 30057 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30058 senlen, sensep, 30059 NULL); 30060 } else { 30061 /* 30062 * For other type of invalid data, the 30063 * un-decode-value field would be empty because the 30064 * un-decodable content could be seen from upper 30065 * level payload or inside un-decode-info. 30066 */ 30067 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30068 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30069 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30070 "driver-assessment", DATA_TYPE_STRING, 30071 drv_assess == SD_FM_DRV_FATAL ? 30072 "fail" : assessment, 30073 "op-code", DATA_TYPE_UINT8, op_code, 30074 "cdb", DATA_TYPE_UINT8_ARRAY, 30075 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30076 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30077 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30078 "pkt-stats", DATA_TYPE_UINT32, 30079 uscsi_pkt_statistics, 30080 "stat-code", DATA_TYPE_UINT8, 30081 ssc->ssc_uscsi_cmd->uscsi_status, 30082 "un-decode-info", DATA_TYPE_STRING, 30083 ssc->ssc_info, 30084 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30085 0, NULL, 30086 NULL); 30087 } 30088 ssc->ssc_flags &= ~ssc_invalid_flags; 30089 return; 30090 } 30091 30092 if (uscsi_pkt_reason != CMD_CMPLT || 30093 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30094 /* 30095 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30096 * set inside sd_start_cmds due to errors(bad packet or 30097 * fatal transport error), we should take it as a 30098 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30099 * driver-assessment will be set based on drv_assess. 30100 * We will set devid to NULL because it is a transport 30101 * error. 30102 */ 30103 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30104 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30105 30106 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30107 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30108 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30109 "driver-assessment", DATA_TYPE_STRING, 30110 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30111 "op-code", DATA_TYPE_UINT8, op_code, 30112 "cdb", DATA_TYPE_UINT8_ARRAY, 30113 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30114 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30115 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30116 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30117 NULL); 30118 } else { 30119 /* 30120 * If we got here, we have a completed command, and we need 30121 * to further investigate the sense data to see what kind 30122 * of ereport we should post. 30123 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30124 * if sense-key == 0x3. 30125 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30126 * driver-assessment will be set based on the parameter 30127 * drv_assess. 30128 */ 30129 if (senlen > 0) { 30130 /* 30131 * Here we have sense data available. 30132 */ 30133 uint8_t sense_key; 30134 sense_key = scsi_sense_key(sensep); 30135 if (sense_key == 0x3) { 30136 /* 30137 * sense-key == 0x3(medium error), 30138 * driver-assessment should be "fatal" if 30139 * drv_assess is SD_FM_DRV_FATAL. 30140 */ 30141 scsi_fm_ereport_post(un->un_sd, 30142 uscsi_path_instance, 30143 "cmd.disk.dev.rqs.merr", 30144 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30145 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30146 "driver-assessment", 30147 DATA_TYPE_STRING, 30148 drv_assess == SD_FM_DRV_FATAL ? 30149 "fatal" : assessment, 30150 "op-code", 30151 DATA_TYPE_UINT8, op_code, 30152 "cdb", 30153 DATA_TYPE_UINT8_ARRAY, cdblen, 30154 ssc->ssc_uscsi_cmd->uscsi_cdb, 30155 "pkt-reason", 30156 DATA_TYPE_UINT8, uscsi_pkt_reason, 30157 "pkt-state", 30158 DATA_TYPE_UINT8, uscsi_pkt_state, 30159 "pkt-stats", 30160 DATA_TYPE_UINT32, 30161 uscsi_pkt_statistics, 30162 "stat-code", 30163 DATA_TYPE_UINT8, 30164 ssc->ssc_uscsi_cmd->uscsi_status, 30165 "key", 30166 DATA_TYPE_UINT8, 30167 scsi_sense_key(sensep), 30168 "asc", 30169 DATA_TYPE_UINT8, 30170 scsi_sense_asc(sensep), 30171 "ascq", 30172 DATA_TYPE_UINT8, 30173 scsi_sense_ascq(sensep), 30174 "sense-data", 30175 DATA_TYPE_UINT8_ARRAY, 30176 senlen, sensep, 30177 "lba", 30178 DATA_TYPE_UINT64, 30179 ssc->ssc_uscsi_info->ui_lba, 30180 NULL); 30181 } else { 30182 /* 30183 * if sense-key == 0x4(hardware 30184 * error), driver-assessment should 30185 * be "fatal" if drv_assess is 30186 * SD_FM_DRV_FATAL. 30187 */ 30188 scsi_fm_ereport_post(un->un_sd, 30189 uscsi_path_instance, 30190 "cmd.disk.dev.rqs.derr", 30191 uscsi_ena, devid, DDI_NOSLEEP, 30192 FM_VERSION, 30193 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30194 "driver-assessment", 30195 DATA_TYPE_STRING, 30196 drv_assess == SD_FM_DRV_FATAL ? 30197 (sense_key == 0x4 ? 30198 "fatal" : "fail") : assessment, 30199 "op-code", 30200 DATA_TYPE_UINT8, op_code, 30201 "cdb", 30202 DATA_TYPE_UINT8_ARRAY, cdblen, 30203 ssc->ssc_uscsi_cmd->uscsi_cdb, 30204 "pkt-reason", 30205 DATA_TYPE_UINT8, uscsi_pkt_reason, 30206 "pkt-state", 30207 DATA_TYPE_UINT8, uscsi_pkt_state, 30208 "pkt-stats", 30209 DATA_TYPE_UINT32, 30210 uscsi_pkt_statistics, 30211 "stat-code", 30212 DATA_TYPE_UINT8, 30213 ssc->ssc_uscsi_cmd->uscsi_status, 30214 "key", 30215 DATA_TYPE_UINT8, 30216 scsi_sense_key(sensep), 30217 "asc", 30218 DATA_TYPE_UINT8, 30219 scsi_sense_asc(sensep), 30220 "ascq", 30221 DATA_TYPE_UINT8, 30222 scsi_sense_ascq(sensep), 30223 "sense-data", 30224 DATA_TYPE_UINT8_ARRAY, 30225 senlen, sensep, 30226 NULL); 30227 } 30228 } else { 30229 /* 30230 * For stat_code == STATUS_GOOD, this is not a 30231 * hardware error. 30232 */ 30233 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 30234 return; 30235 30236 /* 30237 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30238 * stat-code but with sense data unavailable. 30239 * driver-assessment will be set based on parameter 30240 * drv_assess. 30241 */ 30242 scsi_fm_ereport_post(un->un_sd, 30243 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30244 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30245 FM_EREPORT_VERS0, 30246 "driver-assessment", DATA_TYPE_STRING, 30247 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30248 "op-code", DATA_TYPE_UINT8, op_code, 30249 "cdb", 30250 DATA_TYPE_UINT8_ARRAY, 30251 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30252 "pkt-reason", 30253 DATA_TYPE_UINT8, uscsi_pkt_reason, 30254 "pkt-state", 30255 DATA_TYPE_UINT8, uscsi_pkt_state, 30256 "pkt-stats", 30257 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30258 "stat-code", 30259 DATA_TYPE_UINT8, 30260 ssc->ssc_uscsi_cmd->uscsi_status, 30261 NULL); 30262 } 30263 } 30264 } 30265 30266 /* 30267 * Function: sd_ssc_extract_info 30268 * 30269 * Description: Extract information available to help generate ereport. 30270 * 30271 * Context: Kernel thread or interrupt context. 30272 */ 30273 static void 30274 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30275 struct buf *bp, struct sd_xbuf *xp) 30276 { 30277 size_t senlen = 0; 30278 union scsi_cdb *cdbp; 30279 int path_instance; 30280 /* 30281 * Need scsi_cdb_size array to determine the cdb length. 30282 */ 30283 extern uchar_t scsi_cdb_size[]; 30284 30285 ASSERT(un != NULL); 30286 ASSERT(pktp != NULL); 30287 ASSERT(bp != NULL); 30288 ASSERT(xp != NULL); 30289 ASSERT(ssc != NULL); 30290 ASSERT(mutex_owned(SD_MUTEX(un))); 30291 30292 /* 30293 * Transfer the cdb buffer pointer here. 30294 */ 30295 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30296 30297 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30298 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30299 30300 /* 30301 * Transfer the sense data buffer pointer if sense data is available, 30302 * calculate the sense data length first. 30303 */ 30304 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 30305 (xp->xb_sense_state & STATE_ARQ_DONE)) { 30306 /* 30307 * For arq case, we will enter here. 30308 */ 30309 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30310 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30311 } else { 30312 senlen = SENSE_LENGTH; 30313 } 30314 } else { 30315 /* 30316 * For non-arq case, we will enter this branch. 30317 */ 30318 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30319 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 30320 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30321 } 30322 30323 } 30324 30325 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30326 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30327 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30328 30329 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30330 30331 /* 30332 * Only transfer path_instance when scsi_pkt was properly allocated. 30333 */ 30334 path_instance = pktp->pkt_path_instance; 30335 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30336 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30337 else 30338 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30339 30340 /* 30341 * Copy in the other fields we may need when posting ereport. 30342 */ 30343 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30344 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30345 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30346 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30347 30348 /* 30349 * For partially read/write command, we will not create ena 30350 * in case of a successful command be reconized as recovered. 30351 */ 30352 if ((pktp->pkt_reason == CMD_CMPLT) && 30353 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 30354 (senlen == 0)) { 30355 return; 30356 } 30357 30358 /* 30359 * To associate ereports of a single command execution flow, we 30360 * need a shared ena for a specific command. 30361 */ 30362 if (xp->xb_ena == 0) 30363 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30364 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30365 } 30366