1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_IS_PARALLEL_SCSI(un) \ 779 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 780 #define SD_IS_SERIAL(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 782 783 /* 784 * Definitions used by device id registration routines 785 */ 786 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 787 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 788 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 789 790 static kmutex_t sd_sense_mutex = {0}; 791 792 /* 793 * Macros for updates of the driver state 794 */ 795 #define New_state(un, s) \ 796 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 797 #define Restore_state(un) \ 798 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 799 800 static struct sd_cdbinfo sd_cdbtab[] = { 801 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 802 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 803 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 804 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 805 }; 806 807 /* 808 * Specifies the number of seconds that must have elapsed since the last 809 * cmd. has completed for a device to be declared idle to the PM framework. 810 */ 811 static int sd_pm_idletime = 1; 812 813 /* 814 * Internal function prototypes 815 */ 816 817 #if (defined(__fibre)) 818 /* 819 * These #defines are to avoid namespace collisions that occur because this 820 * code is currently used to compile two separate driver modules: sd and ssd. 821 * All function names need to be treated this way (even if declared static) 822 * in order to allow the debugger to resolve the names properly. 823 * It is anticipated that in the near future the ssd module will be obsoleted, 824 * at which time this ugliness should go away. 825 */ 826 #define sd_log_trace ssd_log_trace 827 #define sd_log_info ssd_log_info 828 #define sd_log_err ssd_log_err 829 #define sdprobe ssdprobe 830 #define sdinfo ssdinfo 831 #define sd_prop_op ssd_prop_op 832 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 833 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 834 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 835 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 836 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 837 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 838 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 839 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 840 #define sd_spin_up_unit ssd_spin_up_unit 841 #define sd_enable_descr_sense ssd_enable_descr_sense 842 #define sd_reenable_dsense_task ssd_reenable_dsense_task 843 #define sd_set_mmc_caps ssd_set_mmc_caps 844 #define sd_read_unit_properties ssd_read_unit_properties 845 #define sd_process_sdconf_file ssd_process_sdconf_file 846 #define sd_process_sdconf_table ssd_process_sdconf_table 847 #define sd_sdconf_id_match ssd_sdconf_id_match 848 #define sd_blank_cmp ssd_blank_cmp 849 #define sd_chk_vers1_data ssd_chk_vers1_data 850 #define sd_set_vers1_properties ssd_set_vers1_properties 851 852 #define sd_get_physical_geometry ssd_get_physical_geometry 853 #define sd_get_virtual_geometry ssd_get_virtual_geometry 854 #define sd_update_block_info ssd_update_block_info 855 #define sd_register_devid ssd_register_devid 856 #define sd_get_devid ssd_get_devid 857 #define sd_create_devid ssd_create_devid 858 #define sd_write_deviceid ssd_write_deviceid 859 #define sd_check_vpd_page_support ssd_check_vpd_page_support 860 #define sd_setup_pm ssd_setup_pm 861 #define sd_create_pm_components ssd_create_pm_components 862 #define sd_ddi_suspend ssd_ddi_suspend 863 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 864 #define sd_ddi_resume ssd_ddi_resume 865 #define sd_ddi_pm_resume ssd_ddi_pm_resume 866 #define sdpower ssdpower 867 #define sdattach ssdattach 868 #define sddetach ssddetach 869 #define sd_unit_attach ssd_unit_attach 870 #define sd_unit_detach ssd_unit_detach 871 #define sd_set_unit_attributes ssd_set_unit_attributes 872 #define sd_create_errstats ssd_create_errstats 873 #define sd_set_errstats ssd_set_errstats 874 #define sd_set_pstats ssd_set_pstats 875 #define sddump ssddump 876 #define sd_scsi_poll ssd_scsi_poll 877 #define sd_send_polled_RQS ssd_send_polled_RQS 878 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 879 #define sd_init_event_callbacks ssd_init_event_callbacks 880 #define sd_event_callback ssd_event_callback 881 #define sd_cache_control ssd_cache_control 882 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 883 #define sd_get_nv_sup ssd_get_nv_sup 884 #define sd_make_device ssd_make_device 885 #define sdopen ssdopen 886 #define sdclose ssdclose 887 #define sd_ready_and_valid ssd_ready_and_valid 888 #define sdmin ssdmin 889 #define sdread ssdread 890 #define sdwrite ssdwrite 891 #define sdaread ssdaread 892 #define sdawrite ssdawrite 893 #define sdstrategy ssdstrategy 894 #define sdioctl ssdioctl 895 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 896 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 897 #define sd_checksum_iostart ssd_checksum_iostart 898 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 899 #define sd_pm_iostart ssd_pm_iostart 900 #define sd_core_iostart ssd_core_iostart 901 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 902 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 903 #define sd_checksum_iodone ssd_checksum_iodone 904 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 905 #define sd_pm_iodone ssd_pm_iodone 906 #define sd_initpkt_for_buf ssd_initpkt_for_buf 907 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 908 #define sd_setup_rw_pkt ssd_setup_rw_pkt 909 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 910 #define sd_buf_iodone ssd_buf_iodone 911 #define sd_uscsi_strategy ssd_uscsi_strategy 912 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 913 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 914 #define sd_uscsi_iodone ssd_uscsi_iodone 915 #define sd_xbuf_strategy ssd_xbuf_strategy 916 #define sd_xbuf_init ssd_xbuf_init 917 #define sd_pm_entry ssd_pm_entry 918 #define sd_pm_exit ssd_pm_exit 919 920 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 921 #define sd_pm_timeout_handler ssd_pm_timeout_handler 922 923 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 924 #define sdintr ssdintr 925 #define sd_start_cmds ssd_start_cmds 926 #define sd_send_scsi_cmd ssd_send_scsi_cmd 927 #define sd_bioclone_alloc ssd_bioclone_alloc 928 #define sd_bioclone_free ssd_bioclone_free 929 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 930 #define sd_shadow_buf_free ssd_shadow_buf_free 931 #define sd_print_transport_rejected_message \ 932 ssd_print_transport_rejected_message 933 #define sd_retry_command ssd_retry_command 934 #define sd_set_retry_bp ssd_set_retry_bp 935 #define sd_send_request_sense_command ssd_send_request_sense_command 936 #define sd_start_retry_command ssd_start_retry_command 937 #define sd_start_direct_priority_command \ 938 ssd_start_direct_priority_command 939 #define sd_return_failed_command ssd_return_failed_command 940 #define sd_return_failed_command_no_restart \ 941 ssd_return_failed_command_no_restart 942 #define sd_return_command ssd_return_command 943 #define sd_sync_with_callback ssd_sync_with_callback 944 #define sdrunout ssdrunout 945 #define sd_mark_rqs_busy ssd_mark_rqs_busy 946 #define sd_mark_rqs_idle ssd_mark_rqs_idle 947 #define sd_reduce_throttle ssd_reduce_throttle 948 #define sd_restore_throttle ssd_restore_throttle 949 #define sd_print_incomplete_msg ssd_print_incomplete_msg 950 #define sd_init_cdb_limits ssd_init_cdb_limits 951 #define sd_pkt_status_good ssd_pkt_status_good 952 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 953 #define sd_pkt_status_busy ssd_pkt_status_busy 954 #define sd_pkt_status_reservation_conflict \ 955 ssd_pkt_status_reservation_conflict 956 #define sd_pkt_status_qfull ssd_pkt_status_qfull 957 #define sd_handle_request_sense ssd_handle_request_sense 958 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 959 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 960 #define sd_validate_sense_data ssd_validate_sense_data 961 #define sd_decode_sense ssd_decode_sense 962 #define sd_print_sense_msg ssd_print_sense_msg 963 #define sd_sense_key_no_sense ssd_sense_key_no_sense 964 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 965 #define sd_sense_key_not_ready ssd_sense_key_not_ready 966 #define sd_sense_key_medium_or_hardware_error \ 967 ssd_sense_key_medium_or_hardware_error 968 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 969 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 970 #define sd_sense_key_fail_command ssd_sense_key_fail_command 971 #define sd_sense_key_blank_check ssd_sense_key_blank_check 972 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 973 #define sd_sense_key_default ssd_sense_key_default 974 #define sd_print_retry_msg ssd_print_retry_msg 975 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 976 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 977 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 978 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 979 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 980 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 981 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 982 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 983 #define sd_pkt_reason_default ssd_pkt_reason_default 984 #define sd_reset_target ssd_reset_target 985 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 986 #define sd_start_stop_unit_task ssd_start_stop_unit_task 987 #define sd_taskq_create ssd_taskq_create 988 #define sd_taskq_delete ssd_taskq_delete 989 #define sd_target_change_task ssd_target_change_task 990 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 991 #define sd_media_change_task ssd_media_change_task 992 #define sd_handle_mchange ssd_handle_mchange 993 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 994 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 995 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 996 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 997 #define sd_send_scsi_feature_GET_CONFIGURATION \ 998 sd_send_scsi_feature_GET_CONFIGURATION 999 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1000 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1001 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1002 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1003 ssd_send_scsi_PERSISTENT_RESERVE_IN 1004 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1005 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1006 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1007 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1008 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1009 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1010 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1011 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1012 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1013 #define sd_alloc_rqs ssd_alloc_rqs 1014 #define sd_free_rqs ssd_free_rqs 1015 #define sd_dump_memory ssd_dump_memory 1016 #define sd_get_media_info ssd_get_media_info 1017 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1018 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1019 #define sd_strtok_r ssd_strtok_r 1020 #define sd_set_properties ssd_set_properties 1021 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1022 #define sd_setup_next_xfer ssd_setup_next_xfer 1023 #define sd_dkio_get_temp ssd_dkio_get_temp 1024 #define sd_check_mhd ssd_check_mhd 1025 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1026 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1027 #define sd_sname ssd_sname 1028 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1029 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1030 #define sd_take_ownership ssd_take_ownership 1031 #define sd_reserve_release ssd_reserve_release 1032 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1033 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1034 #define sd_persistent_reservation_in_read_keys \ 1035 ssd_persistent_reservation_in_read_keys 1036 #define sd_persistent_reservation_in_read_resv \ 1037 ssd_persistent_reservation_in_read_resv 1038 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1039 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1040 #define sd_mhdioc_release ssd_mhdioc_release 1041 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1042 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1043 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1044 #define sr_change_blkmode ssr_change_blkmode 1045 #define sr_change_speed ssr_change_speed 1046 #define sr_atapi_change_speed ssr_atapi_change_speed 1047 #define sr_pause_resume ssr_pause_resume 1048 #define sr_play_msf ssr_play_msf 1049 #define sr_play_trkind ssr_play_trkind 1050 #define sr_read_all_subcodes ssr_read_all_subcodes 1051 #define sr_read_subchannel ssr_read_subchannel 1052 #define sr_read_tocentry ssr_read_tocentry 1053 #define sr_read_tochdr ssr_read_tochdr 1054 #define sr_read_cdda ssr_read_cdda 1055 #define sr_read_cdxa ssr_read_cdxa 1056 #define sr_read_mode1 ssr_read_mode1 1057 #define sr_read_mode2 ssr_read_mode2 1058 #define sr_read_cd_mode2 ssr_read_cd_mode2 1059 #define sr_sector_mode ssr_sector_mode 1060 #define sr_eject ssr_eject 1061 #define sr_ejected ssr_ejected 1062 #define sr_check_wp ssr_check_wp 1063 #define sd_check_media ssd_check_media 1064 #define sd_media_watch_cb ssd_media_watch_cb 1065 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1066 #define sr_volume_ctrl ssr_volume_ctrl 1067 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1068 #define sd_log_page_supported ssd_log_page_supported 1069 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1070 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1071 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1072 #define sd_range_lock ssd_range_lock 1073 #define sd_get_range ssd_get_range 1074 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1075 #define sd_range_unlock ssd_range_unlock 1076 #define sd_read_modify_write_task ssd_read_modify_write_task 1077 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1078 1079 #define sd_iostart_chain ssd_iostart_chain 1080 #define sd_iodone_chain ssd_iodone_chain 1081 #define sd_initpkt_map ssd_initpkt_map 1082 #define sd_destroypkt_map ssd_destroypkt_map 1083 #define sd_chain_type_map ssd_chain_type_map 1084 #define sd_chain_index_map ssd_chain_index_map 1085 1086 #define sd_failfast_flushctl ssd_failfast_flushctl 1087 #define sd_failfast_flushq ssd_failfast_flushq 1088 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1089 1090 #define sd_is_lsi ssd_is_lsi 1091 #define sd_tg_rdwr ssd_tg_rdwr 1092 #define sd_tg_getinfo ssd_tg_getinfo 1093 1094 #endif /* #if (defined(__fibre)) */ 1095 1096 1097 int _init(void); 1098 int _fini(void); 1099 int _info(struct modinfo *modinfop); 1100 1101 /*PRINTFLIKE3*/ 1102 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1103 /*PRINTFLIKE3*/ 1104 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1105 /*PRINTFLIKE3*/ 1106 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1107 1108 static int sdprobe(dev_info_t *devi); 1109 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1110 void **result); 1111 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1112 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1113 1114 /* 1115 * Smart probe for parallel scsi 1116 */ 1117 static void sd_scsi_probe_cache_init(void); 1118 static void sd_scsi_probe_cache_fini(void); 1119 static void sd_scsi_clear_probe_cache(void); 1120 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1121 1122 /* 1123 * Attached luns on target for parallel scsi 1124 */ 1125 static void sd_scsi_target_lun_init(void); 1126 static void sd_scsi_target_lun_fini(void); 1127 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1128 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1129 1130 static int sd_spin_up_unit(sd_ssc_t *ssc); 1131 1132 /* 1133 * Using sd_ssc_init to establish sd_ssc_t struct 1134 * Using sd_ssc_send to send uscsi internal command 1135 * Using sd_ssc_fini to free sd_ssc_t struct 1136 */ 1137 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1138 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1139 int flag, enum uio_seg dataspace, int path_flag); 1140 static void sd_ssc_fini(sd_ssc_t *ssc); 1141 1142 /* 1143 * Using sd_ssc_assessment to set correct type-of-assessment 1144 * Using sd_ssc_post to post ereport & system log 1145 * sd_ssc_post will call sd_ssc_print to print system log 1146 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1147 */ 1148 static void sd_ssc_assessment(sd_ssc_t *ssc, 1149 enum sd_type_assessment tp_assess); 1150 1151 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1152 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1153 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1154 enum sd_driver_assessment drv_assess); 1155 1156 /* 1157 * Using sd_ssc_set_info to mark an un-decodable-data error. 1158 * Using sd_ssc_extract_info to transfer information from internal 1159 * data structures to sd_ssc_t. 1160 */ 1161 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, 1162 const char *fmt, ...); 1163 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1164 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1165 1166 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1167 enum uio_seg dataspace, int path_flag); 1168 1169 #ifdef _LP64 1170 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1171 static void sd_reenable_dsense_task(void *arg); 1172 #endif /* _LP64 */ 1173 1174 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1175 1176 static void sd_read_unit_properties(struct sd_lun *un); 1177 static int sd_process_sdconf_file(struct sd_lun *un); 1178 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1179 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1180 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1181 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1182 int *data_list, sd_tunables *values); 1183 static void sd_process_sdconf_table(struct sd_lun *un); 1184 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1185 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1186 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1187 int list_len, char *dataname_ptr); 1188 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1189 sd_tunables *prop_list); 1190 1191 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1192 int reservation_flag); 1193 static int sd_get_devid(sd_ssc_t *ssc); 1194 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1195 static int sd_write_deviceid(sd_ssc_t *ssc); 1196 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1197 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1198 1199 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1200 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1201 1202 static int sd_ddi_suspend(dev_info_t *devi); 1203 static int sd_ddi_pm_suspend(struct sd_lun *un); 1204 static int sd_ddi_resume(dev_info_t *devi); 1205 static int sd_ddi_pm_resume(struct sd_lun *un); 1206 static int sdpower(dev_info_t *devi, int component, int level); 1207 1208 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1209 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1210 static int sd_unit_attach(dev_info_t *devi); 1211 static int sd_unit_detach(dev_info_t *devi); 1212 1213 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1214 static void sd_create_errstats(struct sd_lun *un, int instance); 1215 static void sd_set_errstats(struct sd_lun *un); 1216 static void sd_set_pstats(struct sd_lun *un); 1217 1218 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1219 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1220 static int sd_send_polled_RQS(struct sd_lun *un); 1221 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1222 1223 #if (defined(__fibre)) 1224 /* 1225 * Event callbacks (photon) 1226 */ 1227 static void sd_init_event_callbacks(struct sd_lun *un); 1228 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1229 #endif 1230 1231 /* 1232 * Defines for sd_cache_control 1233 */ 1234 1235 #define SD_CACHE_ENABLE 1 1236 #define SD_CACHE_DISABLE 0 1237 #define SD_CACHE_NOCHANGE -1 1238 1239 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1240 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1241 static void sd_get_nv_sup(sd_ssc_t *ssc); 1242 static dev_t sd_make_device(dev_info_t *devi); 1243 1244 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1245 uint64_t capacity); 1246 1247 /* 1248 * Driver entry point functions. 1249 */ 1250 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1251 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1252 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1253 1254 static void sdmin(struct buf *bp); 1255 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1256 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1257 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1258 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1259 1260 static int sdstrategy(struct buf *bp); 1261 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1262 1263 /* 1264 * Function prototypes for layering functions in the iostart chain. 1265 */ 1266 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1267 struct buf *bp); 1268 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1269 struct buf *bp); 1270 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1271 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1275 1276 /* 1277 * Function prototypes for layering functions in the iodone chain. 1278 */ 1279 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1281 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1282 struct buf *bp); 1283 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1284 struct buf *bp); 1285 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 1290 /* 1291 * Prototypes for functions to support buf(9S) based IO. 1292 */ 1293 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1294 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1295 static void sd_destroypkt_for_buf(struct buf *); 1296 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1297 struct buf *bp, int flags, 1298 int (*callback)(caddr_t), caddr_t callback_arg, 1299 diskaddr_t lba, uint32_t blockcount); 1300 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1301 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1302 1303 /* 1304 * Prototypes for functions to support USCSI IO. 1305 */ 1306 static int sd_uscsi_strategy(struct buf *bp); 1307 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1308 static void sd_destroypkt_for_uscsi(struct buf *); 1309 1310 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1311 uchar_t chain_type, void *pktinfop); 1312 1313 static int sd_pm_entry(struct sd_lun *un); 1314 static void sd_pm_exit(struct sd_lun *un); 1315 1316 static void sd_pm_idletimeout_handler(void *arg); 1317 1318 /* 1319 * sd_core internal functions (used at the sd_core_io layer). 1320 */ 1321 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1322 static void sdintr(struct scsi_pkt *pktp); 1323 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1324 1325 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1326 enum uio_seg dataspace, int path_flag); 1327 1328 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1329 daddr_t blkno, int (*func)(struct buf *)); 1330 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1331 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1332 static void sd_bioclone_free(struct buf *bp); 1333 static void sd_shadow_buf_free(struct buf *bp); 1334 1335 static void sd_print_transport_rejected_message(struct sd_lun *un, 1336 struct sd_xbuf *xp, int code); 1337 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1342 void *arg, int code); 1343 1344 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1345 int retry_check_flag, 1346 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1347 int c), 1348 void *user_arg, int failure_code, clock_t retry_delay, 1349 void (*statp)(kstat_io_t *)); 1350 1351 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1352 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1353 1354 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1355 struct scsi_pkt *pktp); 1356 static void sd_start_retry_command(void *arg); 1357 static void sd_start_direct_priority_command(void *arg); 1358 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1359 int errcode); 1360 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1361 struct buf *bp, int errcode); 1362 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1363 static void sd_sync_with_callback(struct sd_lun *un); 1364 static int sdrunout(caddr_t arg); 1365 1366 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1367 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1368 1369 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1370 static void sd_restore_throttle(void *arg); 1371 1372 static void sd_init_cdb_limits(struct sd_lun *un); 1373 1374 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 1377 /* 1378 * Error handling functions 1379 */ 1380 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1385 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 1389 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, size_t actual_len); 1395 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1404 uint8_t *sense_datap, 1405 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_not_ready(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_unit_attention(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1420 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1421 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1422 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_default(struct sd_lun *un, 1424 uint8_t *sense_datap, 1425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1428 void *arg, int flag); 1429 1430 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1435 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1436 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1448 1449 static void sd_start_stop_unit_callback(void *arg); 1450 static void sd_start_stop_unit_task(void *arg); 1451 1452 static void sd_taskq_create(void); 1453 static void sd_taskq_delete(void); 1454 static void sd_target_change_task(void *arg); 1455 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1456 static void sd_media_change_task(void *arg); 1457 1458 static int sd_handle_mchange(struct sd_lun *un); 1459 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1460 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1461 uint32_t *lbap, int path_flag); 1462 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1463 uint32_t *lbap, int path_flag); 1464 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1465 int path_flag); 1466 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1467 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1468 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1469 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1470 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1471 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1472 uchar_t usr_cmd, uchar_t *usr_bufp); 1473 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1474 struct dk_callback *dkc); 1475 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1476 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1478 uchar_t *bufaddr, uint_t buflen, int path_flag); 1479 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1482 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1483 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1484 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1485 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1486 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1487 size_t buflen, daddr_t start_block, int path_flag); 1488 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1489 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1490 path_flag) 1491 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1492 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1493 path_flag) 1494 1495 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1496 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1497 uint16_t param_ptr, int path_flag); 1498 1499 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1500 static void sd_free_rqs(struct sd_lun *un); 1501 1502 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1503 uchar_t *data, int len, int fmt); 1504 static void sd_panic_for_res_conflict(struct sd_lun *un); 1505 1506 /* 1507 * Disk Ioctl Function Prototypes 1508 */ 1509 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1510 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1511 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1512 1513 /* 1514 * Multi-host Ioctl Prototypes 1515 */ 1516 static int sd_check_mhd(dev_t dev, int interval); 1517 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1519 static char *sd_sname(uchar_t status); 1520 static void sd_mhd_resvd_recover(void *arg); 1521 static void sd_resv_reclaim_thread(); 1522 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1523 static int sd_reserve_release(dev_t dev, int cmd); 1524 static void sd_rmv_resv_reclaim_req(dev_t dev); 1525 static void sd_mhd_reset_notify_cb(caddr_t arg); 1526 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1527 mhioc_inkeys_t *usrp, int flag); 1528 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1529 mhioc_inresvs_t *usrp, int flag); 1530 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1531 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1532 static int sd_mhdioc_release(dev_t dev); 1533 static int sd_mhdioc_register_devid(dev_t dev); 1534 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1536 1537 /* 1538 * SCSI removable prototypes 1539 */ 1540 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1541 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1542 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1543 static int sr_pause_resume(dev_t dev, int mode); 1544 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1545 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1546 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1547 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1548 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1555 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1556 static int sr_eject(dev_t dev); 1557 static void sr_ejected(register struct sd_lun *un); 1558 static int sr_check_wp(dev_t dev); 1559 static int sd_check_media(dev_t dev, enum dkio_state state); 1560 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1561 static void sd_delayed_cv_broadcast(void *arg); 1562 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1563 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1564 1565 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1566 1567 /* 1568 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1569 */ 1570 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1571 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1572 static void sd_wm_cache_destructor(void *wm, void *un); 1573 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1574 daddr_t endb, ushort_t typ); 1575 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1576 daddr_t endb); 1577 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1578 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1579 static void sd_read_modify_write_task(void * arg); 1580 static int 1581 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1582 struct buf **bpp); 1583 1584 1585 /* 1586 * Function prototypes for failfast support. 1587 */ 1588 static void sd_failfast_flushq(struct sd_lun *un); 1589 static int sd_failfast_flushq_callback(struct buf *bp); 1590 1591 /* 1592 * Function prototypes to check for lsi devices 1593 */ 1594 static void sd_is_lsi(struct sd_lun *un); 1595 1596 /* 1597 * Function prototypes for partial DMA support 1598 */ 1599 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1600 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1601 1602 1603 /* Function prototypes for cmlb */ 1604 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1605 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1606 1607 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1608 1609 /* 1610 * Constants for failfast support: 1611 * 1612 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1613 * failfast processing being performed. 1614 * 1615 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1616 * failfast processing on all bufs with B_FAILFAST set. 1617 */ 1618 1619 #define SD_FAILFAST_INACTIVE 0 1620 #define SD_FAILFAST_ACTIVE 1 1621 1622 /* 1623 * Bitmask to control behavior of buf(9S) flushes when a transition to 1624 * the failfast state occurs. Optional bits include: 1625 * 1626 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1627 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1628 * be flushed. 1629 * 1630 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1631 * driver, in addition to the regular wait queue. This includes the xbuf 1632 * queues. When clear, only the driver's wait queue will be flushed. 1633 */ 1634 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1635 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1636 1637 /* 1638 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1639 * to flush all queues within the driver. 1640 */ 1641 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1642 1643 1644 /* 1645 * SD Testing Fault Injection 1646 */ 1647 #ifdef SD_FAULT_INJECTION 1648 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1649 static void sd_faultinjection(struct scsi_pkt *pktp); 1650 static void sd_injection_log(char *buf, struct sd_lun *un); 1651 #endif 1652 1653 /* 1654 * Device driver ops vector 1655 */ 1656 static struct cb_ops sd_cb_ops = { 1657 sdopen, /* open */ 1658 sdclose, /* close */ 1659 sdstrategy, /* strategy */ 1660 nodev, /* print */ 1661 sddump, /* dump */ 1662 sdread, /* read */ 1663 sdwrite, /* write */ 1664 sdioctl, /* ioctl */ 1665 nodev, /* devmap */ 1666 nodev, /* mmap */ 1667 nodev, /* segmap */ 1668 nochpoll, /* poll */ 1669 sd_prop_op, /* cb_prop_op */ 1670 0, /* streamtab */ 1671 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1672 CB_REV, /* cb_rev */ 1673 sdaread, /* async I/O read entry point */ 1674 sdawrite /* async I/O write entry point */ 1675 }; 1676 1677 static struct dev_ops sd_ops = { 1678 DEVO_REV, /* devo_rev, */ 1679 0, /* refcnt */ 1680 sdinfo, /* info */ 1681 nulldev, /* identify */ 1682 sdprobe, /* probe */ 1683 sdattach, /* attach */ 1684 sddetach, /* detach */ 1685 nodev, /* reset */ 1686 &sd_cb_ops, /* driver operations */ 1687 NULL, /* bus operations */ 1688 sdpower, /* power */ 1689 ddi_quiesce_not_needed, /* quiesce */ 1690 }; 1691 1692 1693 /* 1694 * This is the loadable module wrapper. 1695 */ 1696 #include <sys/modctl.h> 1697 1698 static struct modldrv modldrv = { 1699 &mod_driverops, /* Type of module. This one is a driver */ 1700 SD_MODULE_NAME, /* Module name. */ 1701 &sd_ops /* driver ops */ 1702 }; 1703 1704 1705 static struct modlinkage modlinkage = { 1706 MODREV_1, 1707 &modldrv, 1708 NULL 1709 }; 1710 1711 static cmlb_tg_ops_t sd_tgops = { 1712 TG_DK_OPS_VERSION_1, 1713 sd_tg_rdwr, 1714 sd_tg_getinfo 1715 }; 1716 1717 static struct scsi_asq_key_strings sd_additional_codes[] = { 1718 0x81, 0, "Logical Unit is Reserved", 1719 0x85, 0, "Audio Address Not Valid", 1720 0xb6, 0, "Media Load Mechanism Failed", 1721 0xB9, 0, "Audio Play Operation Aborted", 1722 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1723 0x53, 2, "Medium removal prevented", 1724 0x6f, 0, "Authentication failed during key exchange", 1725 0x6f, 1, "Key not present", 1726 0x6f, 2, "Key not established", 1727 0x6f, 3, "Read without proper authentication", 1728 0x6f, 4, "Mismatched region to this logical unit", 1729 0x6f, 5, "Region reset count error", 1730 0xffff, 0x0, NULL 1731 }; 1732 1733 1734 /* 1735 * Struct for passing printing information for sense data messages 1736 */ 1737 struct sd_sense_info { 1738 int ssi_severity; 1739 int ssi_pfa_flag; 1740 }; 1741 1742 /* 1743 * Table of function pointers for iostart-side routines. Separate "chains" 1744 * of layered function calls are formed by placing the function pointers 1745 * sequentially in the desired order. Functions are called according to an 1746 * incrementing table index ordering. The last function in each chain must 1747 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1748 * in the sd_iodone_chain[] array. 1749 * 1750 * Note: It may seem more natural to organize both the iostart and iodone 1751 * functions together, into an array of structures (or some similar 1752 * organization) with a common index, rather than two separate arrays which 1753 * must be maintained in synchronization. The purpose of this division is 1754 * to achieve improved performance: individual arrays allows for more 1755 * effective cache line utilization on certain platforms. 1756 */ 1757 1758 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1759 1760 1761 static sd_chain_t sd_iostart_chain[] = { 1762 1763 /* Chain for buf IO for disk drive targets (PM enabled) */ 1764 sd_mapblockaddr_iostart, /* Index: 0 */ 1765 sd_pm_iostart, /* Index: 1 */ 1766 sd_core_iostart, /* Index: 2 */ 1767 1768 /* Chain for buf IO for disk drive targets (PM disabled) */ 1769 sd_mapblockaddr_iostart, /* Index: 3 */ 1770 sd_core_iostart, /* Index: 4 */ 1771 1772 /* Chain for buf IO for removable-media targets (PM enabled) */ 1773 sd_mapblockaddr_iostart, /* Index: 5 */ 1774 sd_mapblocksize_iostart, /* Index: 6 */ 1775 sd_pm_iostart, /* Index: 7 */ 1776 sd_core_iostart, /* Index: 8 */ 1777 1778 /* Chain for buf IO for removable-media targets (PM disabled) */ 1779 sd_mapblockaddr_iostart, /* Index: 9 */ 1780 sd_mapblocksize_iostart, /* Index: 10 */ 1781 sd_core_iostart, /* Index: 11 */ 1782 1783 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1784 sd_mapblockaddr_iostart, /* Index: 12 */ 1785 sd_checksum_iostart, /* Index: 13 */ 1786 sd_pm_iostart, /* Index: 14 */ 1787 sd_core_iostart, /* Index: 15 */ 1788 1789 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1790 sd_mapblockaddr_iostart, /* Index: 16 */ 1791 sd_checksum_iostart, /* Index: 17 */ 1792 sd_core_iostart, /* Index: 18 */ 1793 1794 /* Chain for USCSI commands (all targets) */ 1795 sd_pm_iostart, /* Index: 19 */ 1796 sd_core_iostart, /* Index: 20 */ 1797 1798 /* Chain for checksumming USCSI commands (all targets) */ 1799 sd_checksum_uscsi_iostart, /* Index: 21 */ 1800 sd_pm_iostart, /* Index: 22 */ 1801 sd_core_iostart, /* Index: 23 */ 1802 1803 /* Chain for "direct" USCSI commands (all targets) */ 1804 sd_core_iostart, /* Index: 24 */ 1805 1806 /* Chain for "direct priority" USCSI commands (all targets) */ 1807 sd_core_iostart, /* Index: 25 */ 1808 }; 1809 1810 /* 1811 * Macros to locate the first function of each iostart chain in the 1812 * sd_iostart_chain[] array. These are located by the index in the array. 1813 */ 1814 #define SD_CHAIN_DISK_IOSTART 0 1815 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1816 #define SD_CHAIN_RMMEDIA_IOSTART 5 1817 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1818 #define SD_CHAIN_CHKSUM_IOSTART 12 1819 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1820 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1821 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1822 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1823 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1824 1825 1826 /* 1827 * Table of function pointers for the iodone-side routines for the driver- 1828 * internal layering mechanism. The calling sequence for iodone routines 1829 * uses a decrementing table index, so the last routine called in a chain 1830 * must be at the lowest array index location for that chain. The last 1831 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1832 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1833 * of the functions in an iodone side chain must correspond to the ordering 1834 * of the iostart routines for that chain. Note that there is no iodone 1835 * side routine that corresponds to sd_core_iostart(), so there is no 1836 * entry in the table for this. 1837 */ 1838 1839 static sd_chain_t sd_iodone_chain[] = { 1840 1841 /* Chain for buf IO for disk drive targets (PM enabled) */ 1842 sd_buf_iodone, /* Index: 0 */ 1843 sd_mapblockaddr_iodone, /* Index: 1 */ 1844 sd_pm_iodone, /* Index: 2 */ 1845 1846 /* Chain for buf IO for disk drive targets (PM disabled) */ 1847 sd_buf_iodone, /* Index: 3 */ 1848 sd_mapblockaddr_iodone, /* Index: 4 */ 1849 1850 /* Chain for buf IO for removable-media targets (PM enabled) */ 1851 sd_buf_iodone, /* Index: 5 */ 1852 sd_mapblockaddr_iodone, /* Index: 6 */ 1853 sd_mapblocksize_iodone, /* Index: 7 */ 1854 sd_pm_iodone, /* Index: 8 */ 1855 1856 /* Chain for buf IO for removable-media targets (PM disabled) */ 1857 sd_buf_iodone, /* Index: 9 */ 1858 sd_mapblockaddr_iodone, /* Index: 10 */ 1859 sd_mapblocksize_iodone, /* Index: 11 */ 1860 1861 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1862 sd_buf_iodone, /* Index: 12 */ 1863 sd_mapblockaddr_iodone, /* Index: 13 */ 1864 sd_checksum_iodone, /* Index: 14 */ 1865 sd_pm_iodone, /* Index: 15 */ 1866 1867 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1868 sd_buf_iodone, /* Index: 16 */ 1869 sd_mapblockaddr_iodone, /* Index: 17 */ 1870 sd_checksum_iodone, /* Index: 18 */ 1871 1872 /* Chain for USCSI commands (non-checksum targets) */ 1873 sd_uscsi_iodone, /* Index: 19 */ 1874 sd_pm_iodone, /* Index: 20 */ 1875 1876 /* Chain for USCSI commands (checksum targets) */ 1877 sd_uscsi_iodone, /* Index: 21 */ 1878 sd_checksum_uscsi_iodone, /* Index: 22 */ 1879 sd_pm_iodone, /* Index: 22 */ 1880 1881 /* Chain for "direct" USCSI commands (all targets) */ 1882 sd_uscsi_iodone, /* Index: 24 */ 1883 1884 /* Chain for "direct priority" USCSI commands (all targets) */ 1885 sd_uscsi_iodone, /* Index: 25 */ 1886 }; 1887 1888 1889 /* 1890 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1891 * each iodone-side chain. These are located by the array index, but as the 1892 * iodone side functions are called in a decrementing-index order, the 1893 * highest index number in each chain must be specified (as these correspond 1894 * to the first function in the iodone chain that will be called by the core 1895 * at IO completion time). 1896 */ 1897 1898 #define SD_CHAIN_DISK_IODONE 2 1899 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1900 #define SD_CHAIN_RMMEDIA_IODONE 8 1901 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1902 #define SD_CHAIN_CHKSUM_IODONE 15 1903 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1904 #define SD_CHAIN_USCSI_CMD_IODONE 20 1905 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1906 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1907 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1908 1909 1910 1911 1912 /* 1913 * Array to map a layering chain index to the appropriate initpkt routine. 1914 * The redundant entries are present so that the index used for accessing 1915 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1916 * with this table as well. 1917 */ 1918 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1919 1920 static sd_initpkt_t sd_initpkt_map[] = { 1921 1922 /* Chain for buf IO for disk drive targets (PM enabled) */ 1923 sd_initpkt_for_buf, /* Index: 0 */ 1924 sd_initpkt_for_buf, /* Index: 1 */ 1925 sd_initpkt_for_buf, /* Index: 2 */ 1926 1927 /* Chain for buf IO for disk drive targets (PM disabled) */ 1928 sd_initpkt_for_buf, /* Index: 3 */ 1929 sd_initpkt_for_buf, /* Index: 4 */ 1930 1931 /* Chain for buf IO for removable-media targets (PM enabled) */ 1932 sd_initpkt_for_buf, /* Index: 5 */ 1933 sd_initpkt_for_buf, /* Index: 6 */ 1934 sd_initpkt_for_buf, /* Index: 7 */ 1935 sd_initpkt_for_buf, /* Index: 8 */ 1936 1937 /* Chain for buf IO for removable-media targets (PM disabled) */ 1938 sd_initpkt_for_buf, /* Index: 9 */ 1939 sd_initpkt_for_buf, /* Index: 10 */ 1940 sd_initpkt_for_buf, /* Index: 11 */ 1941 1942 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1943 sd_initpkt_for_buf, /* Index: 12 */ 1944 sd_initpkt_for_buf, /* Index: 13 */ 1945 sd_initpkt_for_buf, /* Index: 14 */ 1946 sd_initpkt_for_buf, /* Index: 15 */ 1947 1948 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1949 sd_initpkt_for_buf, /* Index: 16 */ 1950 sd_initpkt_for_buf, /* Index: 17 */ 1951 sd_initpkt_for_buf, /* Index: 18 */ 1952 1953 /* Chain for USCSI commands (non-checksum targets) */ 1954 sd_initpkt_for_uscsi, /* Index: 19 */ 1955 sd_initpkt_for_uscsi, /* Index: 20 */ 1956 1957 /* Chain for USCSI commands (checksum targets) */ 1958 sd_initpkt_for_uscsi, /* Index: 21 */ 1959 sd_initpkt_for_uscsi, /* Index: 22 */ 1960 sd_initpkt_for_uscsi, /* Index: 22 */ 1961 1962 /* Chain for "direct" USCSI commands (all targets) */ 1963 sd_initpkt_for_uscsi, /* Index: 24 */ 1964 1965 /* Chain for "direct priority" USCSI commands (all targets) */ 1966 sd_initpkt_for_uscsi, /* Index: 25 */ 1967 1968 }; 1969 1970 1971 /* 1972 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1973 * The redundant entries are present so that the index used for accessing 1974 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1975 * with this table as well. 1976 */ 1977 typedef void (*sd_destroypkt_t)(struct buf *); 1978 1979 static sd_destroypkt_t sd_destroypkt_map[] = { 1980 1981 /* Chain for buf IO for disk drive targets (PM enabled) */ 1982 sd_destroypkt_for_buf, /* Index: 0 */ 1983 sd_destroypkt_for_buf, /* Index: 1 */ 1984 sd_destroypkt_for_buf, /* Index: 2 */ 1985 1986 /* Chain for buf IO for disk drive targets (PM disabled) */ 1987 sd_destroypkt_for_buf, /* Index: 3 */ 1988 sd_destroypkt_for_buf, /* Index: 4 */ 1989 1990 /* Chain for buf IO for removable-media targets (PM enabled) */ 1991 sd_destroypkt_for_buf, /* Index: 5 */ 1992 sd_destroypkt_for_buf, /* Index: 6 */ 1993 sd_destroypkt_for_buf, /* Index: 7 */ 1994 sd_destroypkt_for_buf, /* Index: 8 */ 1995 1996 /* Chain for buf IO for removable-media targets (PM disabled) */ 1997 sd_destroypkt_for_buf, /* Index: 9 */ 1998 sd_destroypkt_for_buf, /* Index: 10 */ 1999 sd_destroypkt_for_buf, /* Index: 11 */ 2000 2001 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2002 sd_destroypkt_for_buf, /* Index: 12 */ 2003 sd_destroypkt_for_buf, /* Index: 13 */ 2004 sd_destroypkt_for_buf, /* Index: 14 */ 2005 sd_destroypkt_for_buf, /* Index: 15 */ 2006 2007 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2008 sd_destroypkt_for_buf, /* Index: 16 */ 2009 sd_destroypkt_for_buf, /* Index: 17 */ 2010 sd_destroypkt_for_buf, /* Index: 18 */ 2011 2012 /* Chain for USCSI commands (non-checksum targets) */ 2013 sd_destroypkt_for_uscsi, /* Index: 19 */ 2014 sd_destroypkt_for_uscsi, /* Index: 20 */ 2015 2016 /* Chain for USCSI commands (checksum targets) */ 2017 sd_destroypkt_for_uscsi, /* Index: 21 */ 2018 sd_destroypkt_for_uscsi, /* Index: 22 */ 2019 sd_destroypkt_for_uscsi, /* Index: 22 */ 2020 2021 /* Chain for "direct" USCSI commands (all targets) */ 2022 sd_destroypkt_for_uscsi, /* Index: 24 */ 2023 2024 /* Chain for "direct priority" USCSI commands (all targets) */ 2025 sd_destroypkt_for_uscsi, /* Index: 25 */ 2026 2027 }; 2028 2029 2030 2031 /* 2032 * Array to map a layering chain index to the appropriate chain "type". 2033 * The chain type indicates a specific property/usage of the chain. 2034 * The redundant entries are present so that the index used for accessing 2035 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2036 * with this table as well. 2037 */ 2038 2039 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2040 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2041 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2042 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2043 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2044 /* (for error recovery) */ 2045 2046 static int sd_chain_type_map[] = { 2047 2048 /* Chain for buf IO for disk drive targets (PM enabled) */ 2049 SD_CHAIN_BUFIO, /* Index: 0 */ 2050 SD_CHAIN_BUFIO, /* Index: 1 */ 2051 SD_CHAIN_BUFIO, /* Index: 2 */ 2052 2053 /* Chain for buf IO for disk drive targets (PM disabled) */ 2054 SD_CHAIN_BUFIO, /* Index: 3 */ 2055 SD_CHAIN_BUFIO, /* Index: 4 */ 2056 2057 /* Chain for buf IO for removable-media targets (PM enabled) */ 2058 SD_CHAIN_BUFIO, /* Index: 5 */ 2059 SD_CHAIN_BUFIO, /* Index: 6 */ 2060 SD_CHAIN_BUFIO, /* Index: 7 */ 2061 SD_CHAIN_BUFIO, /* Index: 8 */ 2062 2063 /* Chain for buf IO for removable-media targets (PM disabled) */ 2064 SD_CHAIN_BUFIO, /* Index: 9 */ 2065 SD_CHAIN_BUFIO, /* Index: 10 */ 2066 SD_CHAIN_BUFIO, /* Index: 11 */ 2067 2068 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2069 SD_CHAIN_BUFIO, /* Index: 12 */ 2070 SD_CHAIN_BUFIO, /* Index: 13 */ 2071 SD_CHAIN_BUFIO, /* Index: 14 */ 2072 SD_CHAIN_BUFIO, /* Index: 15 */ 2073 2074 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2075 SD_CHAIN_BUFIO, /* Index: 16 */ 2076 SD_CHAIN_BUFIO, /* Index: 17 */ 2077 SD_CHAIN_BUFIO, /* Index: 18 */ 2078 2079 /* Chain for USCSI commands (non-checksum targets) */ 2080 SD_CHAIN_USCSI, /* Index: 19 */ 2081 SD_CHAIN_USCSI, /* Index: 20 */ 2082 2083 /* Chain for USCSI commands (checksum targets) */ 2084 SD_CHAIN_USCSI, /* Index: 21 */ 2085 SD_CHAIN_USCSI, /* Index: 22 */ 2086 SD_CHAIN_USCSI, /* Index: 22 */ 2087 2088 /* Chain for "direct" USCSI commands (all targets) */ 2089 SD_CHAIN_DIRECT, /* Index: 24 */ 2090 2091 /* Chain for "direct priority" USCSI commands (all targets) */ 2092 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2093 }; 2094 2095 2096 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2097 #define SD_IS_BUFIO(xp) \ 2098 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2099 2100 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2101 #define SD_IS_DIRECT_PRIORITY(xp) \ 2102 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2103 2104 2105 2106 /* 2107 * Struct, array, and macros to map a specific chain to the appropriate 2108 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2109 * 2110 * The sd_chain_index_map[] array is used at attach time to set the various 2111 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2112 * chain to be used with the instance. This allows different instances to use 2113 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2114 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2115 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2116 * dynamically & without the use of locking; and (2) a layer may update the 2117 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2118 * to allow for deferred processing of an IO within the same chain from a 2119 * different execution context. 2120 */ 2121 2122 struct sd_chain_index { 2123 int sci_iostart_index; 2124 int sci_iodone_index; 2125 }; 2126 2127 static struct sd_chain_index sd_chain_index_map[] = { 2128 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2129 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2130 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2131 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2132 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2133 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2134 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2135 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2136 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2137 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2138 }; 2139 2140 2141 /* 2142 * The following are indexes into the sd_chain_index_map[] array. 2143 */ 2144 2145 /* un->un_buf_chain_type must be set to one of these */ 2146 #define SD_CHAIN_INFO_DISK 0 2147 #define SD_CHAIN_INFO_DISK_NO_PM 1 2148 #define SD_CHAIN_INFO_RMMEDIA 2 2149 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2150 #define SD_CHAIN_INFO_CHKSUM 4 2151 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2152 2153 /* un->un_uscsi_chain_type must be set to one of these */ 2154 #define SD_CHAIN_INFO_USCSI_CMD 6 2155 /* USCSI with PM disabled is the same as DIRECT */ 2156 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2157 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2158 2159 /* un->un_direct_chain_type must be set to one of these */ 2160 #define SD_CHAIN_INFO_DIRECT_CMD 8 2161 2162 /* un->un_priority_chain_type must be set to one of these */ 2163 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2164 2165 /* size for devid inquiries */ 2166 #define MAX_INQUIRY_SIZE 0xF0 2167 2168 /* 2169 * Macros used by functions to pass a given buf(9S) struct along to the 2170 * next function in the layering chain for further processing. 2171 * 2172 * In the following macros, passing more than three arguments to the called 2173 * routines causes the optimizer for the SPARC compiler to stop doing tail 2174 * call elimination which results in significant performance degradation. 2175 */ 2176 #define SD_BEGIN_IOSTART(index, un, bp) \ 2177 ((*(sd_iostart_chain[index]))(index, un, bp)) 2178 2179 #define SD_BEGIN_IODONE(index, un, bp) \ 2180 ((*(sd_iodone_chain[index]))(index, un, bp)) 2181 2182 #define SD_NEXT_IOSTART(index, un, bp) \ 2183 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2184 2185 #define SD_NEXT_IODONE(index, un, bp) \ 2186 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2187 2188 /* 2189 * Function: _init 2190 * 2191 * Description: This is the driver _init(9E) entry point. 2192 * 2193 * Return Code: Returns the value from mod_install(9F) or 2194 * ddi_soft_state_init(9F) as appropriate. 2195 * 2196 * Context: Called when driver module loaded. 2197 */ 2198 2199 int 2200 _init(void) 2201 { 2202 int err; 2203 2204 /* establish driver name from module name */ 2205 sd_label = (char *)mod_modname(&modlinkage); 2206 2207 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2208 SD_MAXUNIT); 2209 2210 if (err != 0) { 2211 return (err); 2212 } 2213 2214 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2215 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2216 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2217 2218 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2219 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2220 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2221 2222 /* 2223 * it's ok to init here even for fibre device 2224 */ 2225 sd_scsi_probe_cache_init(); 2226 2227 sd_scsi_target_lun_init(); 2228 2229 /* 2230 * Creating taskq before mod_install ensures that all callers (threads) 2231 * that enter the module after a successful mod_install encounter 2232 * a valid taskq. 2233 */ 2234 sd_taskq_create(); 2235 2236 err = mod_install(&modlinkage); 2237 if (err != 0) { 2238 /* delete taskq if install fails */ 2239 sd_taskq_delete(); 2240 2241 mutex_destroy(&sd_detach_mutex); 2242 mutex_destroy(&sd_log_mutex); 2243 mutex_destroy(&sd_label_mutex); 2244 2245 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2246 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2247 cv_destroy(&sd_tr.srq_inprocess_cv); 2248 2249 sd_scsi_probe_cache_fini(); 2250 2251 sd_scsi_target_lun_fini(); 2252 2253 ddi_soft_state_fini(&sd_state); 2254 return (err); 2255 } 2256 2257 return (err); 2258 } 2259 2260 2261 /* 2262 * Function: _fini 2263 * 2264 * Description: This is the driver _fini(9E) entry point. 2265 * 2266 * Return Code: Returns the value from mod_remove(9F) 2267 * 2268 * Context: Called when driver module is unloaded. 2269 */ 2270 2271 int 2272 _fini(void) 2273 { 2274 int err; 2275 2276 if ((err = mod_remove(&modlinkage)) != 0) { 2277 return (err); 2278 } 2279 2280 sd_taskq_delete(); 2281 2282 mutex_destroy(&sd_detach_mutex); 2283 mutex_destroy(&sd_log_mutex); 2284 mutex_destroy(&sd_label_mutex); 2285 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2286 2287 sd_scsi_probe_cache_fini(); 2288 2289 sd_scsi_target_lun_fini(); 2290 2291 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2292 cv_destroy(&sd_tr.srq_inprocess_cv); 2293 2294 ddi_soft_state_fini(&sd_state); 2295 2296 return (err); 2297 } 2298 2299 2300 /* 2301 * Function: _info 2302 * 2303 * Description: This is the driver _info(9E) entry point. 2304 * 2305 * Arguments: modinfop - pointer to the driver modinfo structure 2306 * 2307 * Return Code: Returns the value from mod_info(9F). 2308 * 2309 * Context: Kernel thread context 2310 */ 2311 2312 int 2313 _info(struct modinfo *modinfop) 2314 { 2315 return (mod_info(&modlinkage, modinfop)); 2316 } 2317 2318 2319 /* 2320 * The following routines implement the driver message logging facility. 2321 * They provide component- and level- based debug output filtering. 2322 * Output may also be restricted to messages for a single instance by 2323 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2324 * to NULL, then messages for all instances are printed. 2325 * 2326 * These routines have been cloned from each other due to the language 2327 * constraints of macros and variable argument list processing. 2328 */ 2329 2330 2331 /* 2332 * Function: sd_log_err 2333 * 2334 * Description: This routine is called by the SD_ERROR macro for debug 2335 * logging of error conditions. 2336 * 2337 * Arguments: comp - driver component being logged 2338 * dev - pointer to driver info structure 2339 * fmt - error string and format to be logged 2340 */ 2341 2342 static void 2343 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2344 { 2345 va_list ap; 2346 dev_info_t *dev; 2347 2348 ASSERT(un != NULL); 2349 dev = SD_DEVINFO(un); 2350 ASSERT(dev != NULL); 2351 2352 /* 2353 * Filter messages based on the global component and level masks. 2354 * Also print if un matches the value of sd_debug_un, or if 2355 * sd_debug_un is set to NULL. 2356 */ 2357 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2358 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2359 mutex_enter(&sd_log_mutex); 2360 va_start(ap, fmt); 2361 (void) vsprintf(sd_log_buf, fmt, ap); 2362 va_end(ap); 2363 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2364 mutex_exit(&sd_log_mutex); 2365 } 2366 #ifdef SD_FAULT_INJECTION 2367 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2368 if (un->sd_injection_mask & comp) { 2369 mutex_enter(&sd_log_mutex); 2370 va_start(ap, fmt); 2371 (void) vsprintf(sd_log_buf, fmt, ap); 2372 va_end(ap); 2373 sd_injection_log(sd_log_buf, un); 2374 mutex_exit(&sd_log_mutex); 2375 } 2376 #endif 2377 } 2378 2379 2380 /* 2381 * Function: sd_log_info 2382 * 2383 * Description: This routine is called by the SD_INFO macro for debug 2384 * logging of general purpose informational conditions. 2385 * 2386 * Arguments: comp - driver component being logged 2387 * dev - pointer to driver info structure 2388 * fmt - info string and format to be logged 2389 */ 2390 2391 static void 2392 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2393 { 2394 va_list ap; 2395 dev_info_t *dev; 2396 2397 ASSERT(un != NULL); 2398 dev = SD_DEVINFO(un); 2399 ASSERT(dev != NULL); 2400 2401 /* 2402 * Filter messages based on the global component and level masks. 2403 * Also print if un matches the value of sd_debug_un, or if 2404 * sd_debug_un is set to NULL. 2405 */ 2406 if ((sd_component_mask & component) && 2407 (sd_level_mask & SD_LOGMASK_INFO) && 2408 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2409 mutex_enter(&sd_log_mutex); 2410 va_start(ap, fmt); 2411 (void) vsprintf(sd_log_buf, fmt, ap); 2412 va_end(ap); 2413 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2414 mutex_exit(&sd_log_mutex); 2415 } 2416 #ifdef SD_FAULT_INJECTION 2417 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2418 if (un->sd_injection_mask & component) { 2419 mutex_enter(&sd_log_mutex); 2420 va_start(ap, fmt); 2421 (void) vsprintf(sd_log_buf, fmt, ap); 2422 va_end(ap); 2423 sd_injection_log(sd_log_buf, un); 2424 mutex_exit(&sd_log_mutex); 2425 } 2426 #endif 2427 } 2428 2429 2430 /* 2431 * Function: sd_log_trace 2432 * 2433 * Description: This routine is called by the SD_TRACE macro for debug 2434 * logging of trace conditions (i.e. function entry/exit). 2435 * 2436 * Arguments: comp - driver component being logged 2437 * dev - pointer to driver info structure 2438 * fmt - trace string and format to be logged 2439 */ 2440 2441 static void 2442 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2443 { 2444 va_list ap; 2445 dev_info_t *dev; 2446 2447 ASSERT(un != NULL); 2448 dev = SD_DEVINFO(un); 2449 ASSERT(dev != NULL); 2450 2451 /* 2452 * Filter messages based on the global component and level masks. 2453 * Also print if un matches the value of sd_debug_un, or if 2454 * sd_debug_un is set to NULL. 2455 */ 2456 if ((sd_component_mask & component) && 2457 (sd_level_mask & SD_LOGMASK_TRACE) && 2458 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2459 mutex_enter(&sd_log_mutex); 2460 va_start(ap, fmt); 2461 (void) vsprintf(sd_log_buf, fmt, ap); 2462 va_end(ap); 2463 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2464 mutex_exit(&sd_log_mutex); 2465 } 2466 #ifdef SD_FAULT_INJECTION 2467 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2468 if (un->sd_injection_mask & component) { 2469 mutex_enter(&sd_log_mutex); 2470 va_start(ap, fmt); 2471 (void) vsprintf(sd_log_buf, fmt, ap); 2472 va_end(ap); 2473 sd_injection_log(sd_log_buf, un); 2474 mutex_exit(&sd_log_mutex); 2475 } 2476 #endif 2477 } 2478 2479 2480 /* 2481 * Function: sdprobe 2482 * 2483 * Description: This is the driver probe(9e) entry point function. 2484 * 2485 * Arguments: devi - opaque device info handle 2486 * 2487 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2488 * DDI_PROBE_FAILURE: If the probe failed. 2489 * DDI_PROBE_PARTIAL: If the instance is not present now, 2490 * but may be present in the future. 2491 */ 2492 2493 static int 2494 sdprobe(dev_info_t *devi) 2495 { 2496 struct scsi_device *devp; 2497 int rval; 2498 int instance; 2499 2500 /* 2501 * if it wasn't for pln, sdprobe could actually be nulldev 2502 * in the "__fibre" case. 2503 */ 2504 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2505 return (DDI_PROBE_DONTCARE); 2506 } 2507 2508 devp = ddi_get_driver_private(devi); 2509 2510 if (devp == NULL) { 2511 /* Ooops... nexus driver is mis-configured... */ 2512 return (DDI_PROBE_FAILURE); 2513 } 2514 2515 instance = ddi_get_instance(devi); 2516 2517 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2518 return (DDI_PROBE_PARTIAL); 2519 } 2520 2521 /* 2522 * Call the SCSA utility probe routine to see if we actually 2523 * have a target at this SCSI nexus. 2524 */ 2525 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2526 case SCSIPROBE_EXISTS: 2527 switch (devp->sd_inq->inq_dtype) { 2528 case DTYPE_DIRECT: 2529 rval = DDI_PROBE_SUCCESS; 2530 break; 2531 case DTYPE_RODIRECT: 2532 /* CDs etc. Can be removable media */ 2533 rval = DDI_PROBE_SUCCESS; 2534 break; 2535 case DTYPE_OPTICAL: 2536 /* 2537 * Rewritable optical driver HP115AA 2538 * Can also be removable media 2539 */ 2540 2541 /* 2542 * Do not attempt to bind to DTYPE_OPTICAL if 2543 * pre solaris 9 sparc sd behavior is required 2544 * 2545 * If first time through and sd_dtype_optical_bind 2546 * has not been set in /etc/system check properties 2547 */ 2548 2549 if (sd_dtype_optical_bind < 0) { 2550 sd_dtype_optical_bind = ddi_prop_get_int 2551 (DDI_DEV_T_ANY, devi, 0, 2552 "optical-device-bind", 1); 2553 } 2554 2555 if (sd_dtype_optical_bind == 0) { 2556 rval = DDI_PROBE_FAILURE; 2557 } else { 2558 rval = DDI_PROBE_SUCCESS; 2559 } 2560 break; 2561 2562 case DTYPE_NOTPRESENT: 2563 default: 2564 rval = DDI_PROBE_FAILURE; 2565 break; 2566 } 2567 break; 2568 default: 2569 rval = DDI_PROBE_PARTIAL; 2570 break; 2571 } 2572 2573 /* 2574 * This routine checks for resource allocation prior to freeing, 2575 * so it will take care of the "smart probing" case where a 2576 * scsi_probe() may or may not have been issued and will *not* 2577 * free previously-freed resources. 2578 */ 2579 scsi_unprobe(devp); 2580 return (rval); 2581 } 2582 2583 2584 /* 2585 * Function: sdinfo 2586 * 2587 * Description: This is the driver getinfo(9e) entry point function. 2588 * Given the device number, return the devinfo pointer from 2589 * the scsi_device structure or the instance number 2590 * associated with the dev_t. 2591 * 2592 * Arguments: dip - pointer to device info structure 2593 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2594 * DDI_INFO_DEVT2INSTANCE) 2595 * arg - driver dev_t 2596 * resultp - user buffer for request response 2597 * 2598 * Return Code: DDI_SUCCESS 2599 * DDI_FAILURE 2600 */ 2601 /* ARGSUSED */ 2602 static int 2603 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2604 { 2605 struct sd_lun *un; 2606 dev_t dev; 2607 int instance; 2608 int error; 2609 2610 switch (infocmd) { 2611 case DDI_INFO_DEVT2DEVINFO: 2612 dev = (dev_t)arg; 2613 instance = SDUNIT(dev); 2614 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2615 return (DDI_FAILURE); 2616 } 2617 *result = (void *) SD_DEVINFO(un); 2618 error = DDI_SUCCESS; 2619 break; 2620 case DDI_INFO_DEVT2INSTANCE: 2621 dev = (dev_t)arg; 2622 instance = SDUNIT(dev); 2623 *result = (void *)(uintptr_t)instance; 2624 error = DDI_SUCCESS; 2625 break; 2626 default: 2627 error = DDI_FAILURE; 2628 } 2629 return (error); 2630 } 2631 2632 /* 2633 * Function: sd_prop_op 2634 * 2635 * Description: This is the driver prop_op(9e) entry point function. 2636 * Return the number of blocks for the partition in question 2637 * or forward the request to the property facilities. 2638 * 2639 * Arguments: dev - device number 2640 * dip - pointer to device info structure 2641 * prop_op - property operator 2642 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2643 * name - pointer to property name 2644 * valuep - pointer or address of the user buffer 2645 * lengthp - property length 2646 * 2647 * Return Code: DDI_PROP_SUCCESS 2648 * DDI_PROP_NOT_FOUND 2649 * DDI_PROP_UNDEFINED 2650 * DDI_PROP_NO_MEMORY 2651 * DDI_PROP_BUF_TOO_SMALL 2652 */ 2653 2654 static int 2655 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2656 char *name, caddr_t valuep, int *lengthp) 2657 { 2658 struct sd_lun *un; 2659 2660 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2661 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2662 name, valuep, lengthp)); 2663 2664 return (cmlb_prop_op(un->un_cmlbhandle, 2665 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2666 SDPART(dev), (void *)SD_PATH_DIRECT)); 2667 } 2668 2669 /* 2670 * The following functions are for smart probing: 2671 * sd_scsi_probe_cache_init() 2672 * sd_scsi_probe_cache_fini() 2673 * sd_scsi_clear_probe_cache() 2674 * sd_scsi_probe_with_cache() 2675 */ 2676 2677 /* 2678 * Function: sd_scsi_probe_cache_init 2679 * 2680 * Description: Initializes the probe response cache mutex and head pointer. 2681 * 2682 * Context: Kernel thread context 2683 */ 2684 2685 static void 2686 sd_scsi_probe_cache_init(void) 2687 { 2688 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2689 sd_scsi_probe_cache_head = NULL; 2690 } 2691 2692 2693 /* 2694 * Function: sd_scsi_probe_cache_fini 2695 * 2696 * Description: Frees all resources associated with the probe response cache. 2697 * 2698 * Context: Kernel thread context 2699 */ 2700 2701 static void 2702 sd_scsi_probe_cache_fini(void) 2703 { 2704 struct sd_scsi_probe_cache *cp; 2705 struct sd_scsi_probe_cache *ncp; 2706 2707 /* Clean up our smart probing linked list */ 2708 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2709 ncp = cp->next; 2710 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2711 } 2712 sd_scsi_probe_cache_head = NULL; 2713 mutex_destroy(&sd_scsi_probe_cache_mutex); 2714 } 2715 2716 2717 /* 2718 * Function: sd_scsi_clear_probe_cache 2719 * 2720 * Description: This routine clears the probe response cache. This is 2721 * done when open() returns ENXIO so that when deferred 2722 * attach is attempted (possibly after a device has been 2723 * turned on) we will retry the probe. Since we don't know 2724 * which target we failed to open, we just clear the 2725 * entire cache. 2726 * 2727 * Context: Kernel thread context 2728 */ 2729 2730 static void 2731 sd_scsi_clear_probe_cache(void) 2732 { 2733 struct sd_scsi_probe_cache *cp; 2734 int i; 2735 2736 mutex_enter(&sd_scsi_probe_cache_mutex); 2737 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2738 /* 2739 * Reset all entries to SCSIPROBE_EXISTS. This will 2740 * force probing to be performed the next time 2741 * sd_scsi_probe_with_cache is called. 2742 */ 2743 for (i = 0; i < NTARGETS_WIDE; i++) { 2744 cp->cache[i] = SCSIPROBE_EXISTS; 2745 } 2746 } 2747 mutex_exit(&sd_scsi_probe_cache_mutex); 2748 } 2749 2750 2751 /* 2752 * Function: sd_scsi_probe_with_cache 2753 * 2754 * Description: This routine implements support for a scsi device probe 2755 * with cache. The driver maintains a cache of the target 2756 * responses to scsi probes. If we get no response from a 2757 * target during a probe inquiry, we remember that, and we 2758 * avoid additional calls to scsi_probe on non-zero LUNs 2759 * on the same target until the cache is cleared. By doing 2760 * so we avoid the 1/4 sec selection timeout for nonzero 2761 * LUNs. lun0 of a target is always probed. 2762 * 2763 * Arguments: devp - Pointer to a scsi_device(9S) structure 2764 * waitfunc - indicates what the allocator routines should 2765 * do when resources are not available. This value 2766 * is passed on to scsi_probe() when that routine 2767 * is called. 2768 * 2769 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2770 * otherwise the value returned by scsi_probe(9F). 2771 * 2772 * Context: Kernel thread context 2773 */ 2774 2775 static int 2776 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2777 { 2778 struct sd_scsi_probe_cache *cp; 2779 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2780 int lun, tgt; 2781 2782 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2783 SCSI_ADDR_PROP_LUN, 0); 2784 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2785 SCSI_ADDR_PROP_TARGET, -1); 2786 2787 /* Make sure caching enabled and target in range */ 2788 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2789 /* do it the old way (no cache) */ 2790 return (scsi_probe(devp, waitfn)); 2791 } 2792 2793 mutex_enter(&sd_scsi_probe_cache_mutex); 2794 2795 /* Find the cache for this scsi bus instance */ 2796 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2797 if (cp->pdip == pdip) { 2798 break; 2799 } 2800 } 2801 2802 /* If we can't find a cache for this pdip, create one */ 2803 if (cp == NULL) { 2804 int i; 2805 2806 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2807 KM_SLEEP); 2808 cp->pdip = pdip; 2809 cp->next = sd_scsi_probe_cache_head; 2810 sd_scsi_probe_cache_head = cp; 2811 for (i = 0; i < NTARGETS_WIDE; i++) { 2812 cp->cache[i] = SCSIPROBE_EXISTS; 2813 } 2814 } 2815 2816 mutex_exit(&sd_scsi_probe_cache_mutex); 2817 2818 /* Recompute the cache for this target if LUN zero */ 2819 if (lun == 0) { 2820 cp->cache[tgt] = SCSIPROBE_EXISTS; 2821 } 2822 2823 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2824 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2825 return (SCSIPROBE_NORESP); 2826 } 2827 2828 /* Do the actual probe; save & return the result */ 2829 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2830 } 2831 2832 2833 /* 2834 * Function: sd_scsi_target_lun_init 2835 * 2836 * Description: Initializes the attached lun chain mutex and head pointer. 2837 * 2838 * Context: Kernel thread context 2839 */ 2840 2841 static void 2842 sd_scsi_target_lun_init(void) 2843 { 2844 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2845 sd_scsi_target_lun_head = NULL; 2846 } 2847 2848 2849 /* 2850 * Function: sd_scsi_target_lun_fini 2851 * 2852 * Description: Frees all resources associated with the attached lun 2853 * chain 2854 * 2855 * Context: Kernel thread context 2856 */ 2857 2858 static void 2859 sd_scsi_target_lun_fini(void) 2860 { 2861 struct sd_scsi_hba_tgt_lun *cp; 2862 struct sd_scsi_hba_tgt_lun *ncp; 2863 2864 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2865 ncp = cp->next; 2866 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2867 } 2868 sd_scsi_target_lun_head = NULL; 2869 mutex_destroy(&sd_scsi_target_lun_mutex); 2870 } 2871 2872 2873 /* 2874 * Function: sd_scsi_get_target_lun_count 2875 * 2876 * Description: This routine will check in the attached lun chain to see 2877 * how many luns are attached on the required SCSI controller 2878 * and target. Currently, some capabilities like tagged queue 2879 * are supported per target based by HBA. So all luns in a 2880 * target have the same capabilities. Based on this assumption, 2881 * sd should only set these capabilities once per target. This 2882 * function is called when sd needs to decide how many luns 2883 * already attached on a target. 2884 * 2885 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2886 * controller device. 2887 * target - The target ID on the controller's SCSI bus. 2888 * 2889 * Return Code: The number of luns attached on the required target and 2890 * controller. 2891 * -1 if target ID is not in parallel SCSI scope or the given 2892 * dip is not in the chain. 2893 * 2894 * Context: Kernel thread context 2895 */ 2896 2897 static int 2898 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2899 { 2900 struct sd_scsi_hba_tgt_lun *cp; 2901 2902 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2903 return (-1); 2904 } 2905 2906 mutex_enter(&sd_scsi_target_lun_mutex); 2907 2908 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2909 if (cp->pdip == dip) { 2910 break; 2911 } 2912 } 2913 2914 mutex_exit(&sd_scsi_target_lun_mutex); 2915 2916 if (cp == NULL) { 2917 return (-1); 2918 } 2919 2920 return (cp->nlun[target]); 2921 } 2922 2923 2924 /* 2925 * Function: sd_scsi_update_lun_on_target 2926 * 2927 * Description: This routine is used to update the attached lun chain when a 2928 * lun is attached or detached on a target. 2929 * 2930 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2931 * controller device. 2932 * target - The target ID on the controller's SCSI bus. 2933 * flag - Indicate the lun is attached or detached. 2934 * 2935 * Context: Kernel thread context 2936 */ 2937 2938 static void 2939 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2940 { 2941 struct sd_scsi_hba_tgt_lun *cp; 2942 2943 mutex_enter(&sd_scsi_target_lun_mutex); 2944 2945 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2946 if (cp->pdip == dip) { 2947 break; 2948 } 2949 } 2950 2951 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2952 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2953 KM_SLEEP); 2954 cp->pdip = dip; 2955 cp->next = sd_scsi_target_lun_head; 2956 sd_scsi_target_lun_head = cp; 2957 } 2958 2959 mutex_exit(&sd_scsi_target_lun_mutex); 2960 2961 if (cp != NULL) { 2962 if (flag == SD_SCSI_LUN_ATTACH) { 2963 cp->nlun[target] ++; 2964 } else { 2965 cp->nlun[target] --; 2966 } 2967 } 2968 } 2969 2970 2971 /* 2972 * Function: sd_spin_up_unit 2973 * 2974 * Description: Issues the following commands to spin-up the device: 2975 * START STOP UNIT, and INQUIRY. 2976 * 2977 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 2978 * structure for this target. 2979 * 2980 * Return Code: 0 - success 2981 * EIO - failure 2982 * EACCES - reservation conflict 2983 * 2984 * Context: Kernel thread context 2985 */ 2986 2987 static int 2988 sd_spin_up_unit(sd_ssc_t *ssc) 2989 { 2990 size_t resid = 0; 2991 int has_conflict = FALSE; 2992 uchar_t *bufaddr; 2993 int status; 2994 struct sd_lun *un; 2995 2996 ASSERT(ssc != NULL); 2997 un = ssc->ssc_un; 2998 ASSERT(un != NULL); 2999 3000 /* 3001 * Send a throwaway START UNIT command. 3002 * 3003 * If we fail on this, we don't care presently what precisely 3004 * is wrong. EMC's arrays will also fail this with a check 3005 * condition (0x2/0x4/0x3) if the device is "inactive," but 3006 * we don't want to fail the attach because it may become 3007 * "active" later. 3008 */ 3009 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3010 SD_PATH_DIRECT); 3011 3012 if (status != 0) { 3013 if (status == EACCES) 3014 has_conflict = TRUE; 3015 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3016 } 3017 3018 /* 3019 * Send another INQUIRY command to the target. This is necessary for 3020 * non-removable media direct access devices because their INQUIRY data 3021 * may not be fully qualified until they are spun up (perhaps via the 3022 * START command above). Note: This seems to be needed for some 3023 * legacy devices only.) The INQUIRY command should succeed even if a 3024 * Reservation Conflict is present. 3025 */ 3026 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3027 3028 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3029 != 0) { 3030 kmem_free(bufaddr, SUN_INQSIZE); 3031 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3032 return (EIO); 3033 } 3034 3035 /* 3036 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3037 * Note that this routine does not return a failure here even if the 3038 * INQUIRY command did not return any data. This is a legacy behavior. 3039 */ 3040 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3041 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3042 } 3043 3044 kmem_free(bufaddr, SUN_INQSIZE); 3045 3046 /* If we hit a reservation conflict above, tell the caller. */ 3047 if (has_conflict == TRUE) { 3048 return (EACCES); 3049 } 3050 3051 return (0); 3052 } 3053 3054 #ifdef _LP64 3055 /* 3056 * Function: sd_enable_descr_sense 3057 * 3058 * Description: This routine attempts to select descriptor sense format 3059 * using the Control mode page. Devices that support 64 bit 3060 * LBAs (for >2TB luns) should also implement descriptor 3061 * sense data so we will call this function whenever we see 3062 * a lun larger than 2TB. If for some reason the device 3063 * supports 64 bit LBAs but doesn't support descriptor sense 3064 * presumably the mode select will fail. Everything will 3065 * continue to work normally except that we will not get 3066 * complete sense data for commands that fail with an LBA 3067 * larger than 32 bits. 3068 * 3069 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3070 * structure for this target. 3071 * 3072 * Context: Kernel thread context only 3073 */ 3074 3075 static void 3076 sd_enable_descr_sense(sd_ssc_t *ssc) 3077 { 3078 uchar_t *header; 3079 struct mode_control_scsi3 *ctrl_bufp; 3080 size_t buflen; 3081 size_t bd_len; 3082 int status; 3083 struct sd_lun *un; 3084 3085 ASSERT(ssc != NULL); 3086 un = ssc->ssc_un; 3087 ASSERT(un != NULL); 3088 3089 /* 3090 * Read MODE SENSE page 0xA, Control Mode Page 3091 */ 3092 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3093 sizeof (struct mode_control_scsi3); 3094 header = kmem_zalloc(buflen, KM_SLEEP); 3095 3096 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3097 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3098 3099 if (status != 0) { 3100 SD_ERROR(SD_LOG_COMMON, un, 3101 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3102 goto eds_exit; 3103 } 3104 3105 /* 3106 * Determine size of Block Descriptors in order to locate 3107 * the mode page data. ATAPI devices return 0, SCSI devices 3108 * should return MODE_BLK_DESC_LENGTH. 3109 */ 3110 bd_len = ((struct mode_header *)header)->bdesc_length; 3111 3112 /* Clear the mode data length field for MODE SELECT */ 3113 ((struct mode_header *)header)->length = 0; 3114 3115 ctrl_bufp = (struct mode_control_scsi3 *) 3116 (header + MODE_HEADER_LENGTH + bd_len); 3117 3118 /* 3119 * If the page length is smaller than the expected value, 3120 * the target device doesn't support D_SENSE. Bail out here. 3121 */ 3122 if (ctrl_bufp->mode_page.length < 3123 sizeof (struct mode_control_scsi3) - 2) { 3124 SD_ERROR(SD_LOG_COMMON, un, 3125 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3126 goto eds_exit; 3127 } 3128 3129 /* 3130 * Clear PS bit for MODE SELECT 3131 */ 3132 ctrl_bufp->mode_page.ps = 0; 3133 3134 /* 3135 * Set D_SENSE to enable descriptor sense format. 3136 */ 3137 ctrl_bufp->d_sense = 1; 3138 3139 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3140 3141 /* 3142 * Use MODE SELECT to commit the change to the D_SENSE bit 3143 */ 3144 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3145 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3146 3147 if (status != 0) { 3148 SD_INFO(SD_LOG_COMMON, un, 3149 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3150 } else { 3151 kmem_free(header, buflen); 3152 return; 3153 } 3154 3155 eds_exit: 3156 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3157 kmem_free(header, buflen); 3158 } 3159 3160 /* 3161 * Function: sd_reenable_dsense_task 3162 * 3163 * Description: Re-enable descriptor sense after device or bus reset 3164 * 3165 * Context: Executes in a taskq() thread context 3166 */ 3167 static void 3168 sd_reenable_dsense_task(void *arg) 3169 { 3170 struct sd_lun *un = arg; 3171 sd_ssc_t *ssc; 3172 3173 ASSERT(un != NULL); 3174 3175 ssc = sd_ssc_init(un); 3176 sd_enable_descr_sense(ssc); 3177 sd_ssc_fini(ssc); 3178 } 3179 #endif /* _LP64 */ 3180 3181 /* 3182 * Function: sd_set_mmc_caps 3183 * 3184 * Description: This routine determines if the device is MMC compliant and if 3185 * the device supports CDDA via a mode sense of the CDVD 3186 * capabilities mode page. Also checks if the device is a 3187 * dvdram writable device. 3188 * 3189 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3190 * structure for this target. 3191 * 3192 * Context: Kernel thread context only 3193 */ 3194 3195 static void 3196 sd_set_mmc_caps(sd_ssc_t *ssc) 3197 { 3198 struct mode_header_grp2 *sense_mhp; 3199 uchar_t *sense_page; 3200 caddr_t buf; 3201 int bd_len; 3202 int status; 3203 struct uscsi_cmd com; 3204 int rtn; 3205 uchar_t *out_data_rw, *out_data_hd; 3206 uchar_t *rqbuf_rw, *rqbuf_hd; 3207 struct sd_lun *un; 3208 3209 ASSERT(ssc != NULL); 3210 un = ssc->ssc_un; 3211 ASSERT(un != NULL); 3212 3213 /* 3214 * The flags which will be set in this function are - mmc compliant, 3215 * dvdram writable device, cdda support. Initialize them to FALSE 3216 * and if a capability is detected - it will be set to TRUE. 3217 */ 3218 un->un_f_mmc_cap = FALSE; 3219 un->un_f_dvdram_writable_device = FALSE; 3220 un->un_f_cfg_cdda = FALSE; 3221 3222 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3223 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3224 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3225 3226 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3227 3228 if (status != 0) { 3229 /* command failed; just return */ 3230 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3231 return; 3232 } 3233 /* 3234 * If the mode sense request for the CDROM CAPABILITIES 3235 * page (0x2A) succeeds the device is assumed to be MMC. 3236 */ 3237 un->un_f_mmc_cap = TRUE; 3238 3239 /* Get to the page data */ 3240 sense_mhp = (struct mode_header_grp2 *)buf; 3241 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3242 sense_mhp->bdesc_length_lo; 3243 if (bd_len > MODE_BLK_DESC_LENGTH) { 3244 /* 3245 * We did not get back the expected block descriptor 3246 * length so we cannot determine if the device supports 3247 * CDDA. However, we still indicate the device is MMC 3248 * according to the successful response to the page 3249 * 0x2A mode sense request. 3250 */ 3251 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3252 "sd_set_mmc_caps: Mode Sense returned " 3253 "invalid block descriptor length\n"); 3254 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3255 return; 3256 } 3257 3258 /* See if read CDDA is supported */ 3259 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3260 bd_len); 3261 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3262 3263 /* See if writing DVD RAM is supported. */ 3264 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3265 if (un->un_f_dvdram_writable_device == TRUE) { 3266 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3267 return; 3268 } 3269 3270 /* 3271 * If the device presents DVD or CD capabilities in the mode 3272 * page, we can return here since a RRD will not have 3273 * these capabilities. 3274 */ 3275 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3276 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3277 return; 3278 } 3279 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3280 3281 /* 3282 * If un->un_f_dvdram_writable_device is still FALSE, 3283 * check for a Removable Rigid Disk (RRD). A RRD 3284 * device is identified by the features RANDOM_WRITABLE and 3285 * HARDWARE_DEFECT_MANAGEMENT. 3286 */ 3287 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3288 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3289 3290 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3291 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3292 RANDOM_WRITABLE, SD_PATH_STANDARD); 3293 3294 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3295 3296 if (rtn != 0) { 3297 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3298 kmem_free(rqbuf_rw, SENSE_LENGTH); 3299 return; 3300 } 3301 3302 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3303 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3304 3305 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3306 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3307 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3308 3309 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3310 3311 if (rtn == 0) { 3312 /* 3313 * We have good information, check for random writable 3314 * and hardware defect features. 3315 */ 3316 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3317 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3318 un->un_f_dvdram_writable_device = TRUE; 3319 } 3320 } 3321 3322 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3323 kmem_free(rqbuf_rw, SENSE_LENGTH); 3324 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3325 kmem_free(rqbuf_hd, SENSE_LENGTH); 3326 } 3327 3328 /* 3329 * Function: sd_check_for_writable_cd 3330 * 3331 * Description: This routine determines if the media in the device is 3332 * writable or not. It uses the get configuration command (0x46) 3333 * to determine if the media is writable 3334 * 3335 * Arguments: un - driver soft state (unit) structure 3336 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3337 * chain and the normal command waitq, or 3338 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3339 * "direct" chain and bypass the normal command 3340 * waitq. 3341 * 3342 * Context: Never called at interrupt context. 3343 */ 3344 3345 static void 3346 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3347 { 3348 struct uscsi_cmd com; 3349 uchar_t *out_data; 3350 uchar_t *rqbuf; 3351 int rtn; 3352 uchar_t *out_data_rw, *out_data_hd; 3353 uchar_t *rqbuf_rw, *rqbuf_hd; 3354 struct mode_header_grp2 *sense_mhp; 3355 uchar_t *sense_page; 3356 caddr_t buf; 3357 int bd_len; 3358 int status; 3359 struct sd_lun *un; 3360 3361 ASSERT(ssc != NULL); 3362 un = ssc->ssc_un; 3363 ASSERT(un != NULL); 3364 ASSERT(mutex_owned(SD_MUTEX(un))); 3365 3366 /* 3367 * Initialize the writable media to false, if configuration info. 3368 * tells us otherwise then only we will set it. 3369 */ 3370 un->un_f_mmc_writable_media = FALSE; 3371 mutex_exit(SD_MUTEX(un)); 3372 3373 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3374 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3375 3376 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3377 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3378 3379 if (rtn != 0) 3380 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3381 3382 mutex_enter(SD_MUTEX(un)); 3383 if (rtn == 0) { 3384 /* 3385 * We have good information, check for writable DVD. 3386 */ 3387 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3388 un->un_f_mmc_writable_media = TRUE; 3389 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3390 kmem_free(rqbuf, SENSE_LENGTH); 3391 return; 3392 } 3393 } 3394 3395 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3396 kmem_free(rqbuf, SENSE_LENGTH); 3397 3398 /* 3399 * Determine if this is a RRD type device. 3400 */ 3401 mutex_exit(SD_MUTEX(un)); 3402 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3403 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3404 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3405 3406 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3407 3408 mutex_enter(SD_MUTEX(un)); 3409 if (status != 0) { 3410 /* command failed; just return */ 3411 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3412 return; 3413 } 3414 3415 /* Get to the page data */ 3416 sense_mhp = (struct mode_header_grp2 *)buf; 3417 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3418 if (bd_len > MODE_BLK_DESC_LENGTH) { 3419 /* 3420 * We did not get back the expected block descriptor length so 3421 * we cannot check the mode page. 3422 */ 3423 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3424 "sd_check_for_writable_cd: Mode Sense returned " 3425 "invalid block descriptor length\n"); 3426 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3427 return; 3428 } 3429 3430 /* 3431 * If the device presents DVD or CD capabilities in the mode 3432 * page, we can return here since a RRD device will not have 3433 * these capabilities. 3434 */ 3435 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3436 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3437 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3438 return; 3439 } 3440 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3441 3442 /* 3443 * If un->un_f_mmc_writable_media is still FALSE, 3444 * check for RRD type media. A RRD device is identified 3445 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3446 */ 3447 mutex_exit(SD_MUTEX(un)); 3448 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3449 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3450 3451 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3452 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3453 RANDOM_WRITABLE, path_flag); 3454 3455 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3456 if (rtn != 0) { 3457 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3458 kmem_free(rqbuf_rw, SENSE_LENGTH); 3459 mutex_enter(SD_MUTEX(un)); 3460 return; 3461 } 3462 3463 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3464 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3465 3466 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3467 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3468 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3469 3470 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3471 mutex_enter(SD_MUTEX(un)); 3472 if (rtn == 0) { 3473 /* 3474 * We have good information, check for random writable 3475 * and hardware defect features as current. 3476 */ 3477 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3478 (out_data_rw[10] & 0x1) && 3479 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3480 (out_data_hd[10] & 0x1)) { 3481 un->un_f_mmc_writable_media = TRUE; 3482 } 3483 } 3484 3485 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3486 kmem_free(rqbuf_rw, SENSE_LENGTH); 3487 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3488 kmem_free(rqbuf_hd, SENSE_LENGTH); 3489 } 3490 3491 /* 3492 * Function: sd_read_unit_properties 3493 * 3494 * Description: The following implements a property lookup mechanism. 3495 * Properties for particular disks (keyed on vendor, model 3496 * and rev numbers) are sought in the sd.conf file via 3497 * sd_process_sdconf_file(), and if not found there, are 3498 * looked for in a list hardcoded in this driver via 3499 * sd_process_sdconf_table() Once located the properties 3500 * are used to update the driver unit structure. 3501 * 3502 * Arguments: un - driver soft state (unit) structure 3503 */ 3504 3505 static void 3506 sd_read_unit_properties(struct sd_lun *un) 3507 { 3508 /* 3509 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3510 * the "sd-config-list" property (from the sd.conf file) or if 3511 * there was not a match for the inquiry vid/pid. If this event 3512 * occurs the static driver configuration table is searched for 3513 * a match. 3514 */ 3515 ASSERT(un != NULL); 3516 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3517 sd_process_sdconf_table(un); 3518 } 3519 3520 /* check for LSI device */ 3521 sd_is_lsi(un); 3522 3523 3524 } 3525 3526 3527 /* 3528 * Function: sd_process_sdconf_file 3529 * 3530 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3531 * driver's config file (ie, sd.conf) and update the driver 3532 * soft state structure accordingly. 3533 * 3534 * Arguments: un - driver soft state (unit) structure 3535 * 3536 * Return Code: SD_SUCCESS - The properties were successfully set according 3537 * to the driver configuration file. 3538 * SD_FAILURE - The driver config list was not obtained or 3539 * there was no vid/pid match. This indicates that 3540 * the static config table should be used. 3541 * 3542 * The config file has a property, "sd-config-list". Currently we support 3543 * two kinds of formats. For both formats, the value of this property 3544 * is a list of duplets: 3545 * 3546 * sd-config-list= 3547 * <duplet>, 3548 * [,<duplet>]*; 3549 * 3550 * For the improved format, where 3551 * 3552 * <duplet>:= "<vid+pid>","<tunable-list>" 3553 * 3554 * and 3555 * 3556 * <tunable-list>:= <tunable> [, <tunable> ]*; 3557 * <tunable> = <name> : <value> 3558 * 3559 * The <vid+pid> is the string that is returned by the target device on a 3560 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3561 * to apply to all target devices with the specified <vid+pid>. 3562 * 3563 * Each <tunable> is a "<name> : <value>" pair. 3564 * 3565 * For the old format, the structure of each duplet is as follows: 3566 * 3567 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3568 * 3569 * The first entry of the duplet is the device ID string (the concatenated 3570 * vid & pid; not to be confused with a device_id). This is defined in 3571 * the same way as in the sd_disk_table. 3572 * 3573 * The second part of the duplet is a string that identifies a 3574 * data-property-name-list. The data-property-name-list is defined as 3575 * follows: 3576 * 3577 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3578 * 3579 * The syntax of <data-property-name> depends on the <version> field. 3580 * 3581 * If version = SD_CONF_VERSION_1 we have the following syntax: 3582 * 3583 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3584 * 3585 * where the prop0 value will be used to set prop0 if bit0 set in the 3586 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3587 * 3588 */ 3589 3590 static int 3591 sd_process_sdconf_file(struct sd_lun *un) 3592 { 3593 char **config_list = NULL; 3594 uint_t nelements; 3595 char *vidptr; 3596 int vidlen; 3597 char *dnlist_ptr; 3598 char *dataname_ptr; 3599 char *dataname_lasts; 3600 int *data_list = NULL; 3601 uint_t data_list_len; 3602 int rval = SD_FAILURE; 3603 int i; 3604 3605 ASSERT(un != NULL); 3606 3607 /* Obtain the configuration list associated with the .conf file */ 3608 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3609 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3610 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3611 return (SD_FAILURE); 3612 } 3613 3614 /* 3615 * Compare vids in each duplet to the inquiry vid - if a match is 3616 * made, get the data value and update the soft state structure 3617 * accordingly. 3618 * 3619 * Each duplet should show as a pair of strings, return SD_FAILURE 3620 * otherwise. 3621 */ 3622 if (nelements & 1) { 3623 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3624 "sd-config-list should show as pairs of strings.\n"); 3625 if (config_list) 3626 ddi_prop_free(config_list); 3627 return (SD_FAILURE); 3628 } 3629 3630 for (i = 0; i < nelements; i += 2) { 3631 /* 3632 * Note: The assumption here is that each vid entry is on 3633 * a unique line from its associated duplet. 3634 */ 3635 vidptr = config_list[i]; 3636 vidlen = (int)strlen(vidptr); 3637 if ((vidlen == 0) || 3638 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3639 continue; 3640 } 3641 3642 /* 3643 * dnlist contains 1 or more blank separated 3644 * data-property-name entries 3645 */ 3646 dnlist_ptr = config_list[i + 1]; 3647 3648 if (strchr(dnlist_ptr, ':') != NULL) { 3649 /* 3650 * Decode the improved format sd-config-list. 3651 */ 3652 sd_nvpair_str_decode(un, dnlist_ptr); 3653 } else { 3654 /* 3655 * The old format sd-config-list, loop through all 3656 * data-property-name entries in the 3657 * data-property-name-list 3658 * setting the properties for each. 3659 */ 3660 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3661 &dataname_lasts); dataname_ptr != NULL; 3662 dataname_ptr = sd_strtok_r(NULL, " \t", 3663 &dataname_lasts)) { 3664 int version; 3665 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3667 "sd_process_sdconf_file: disk:%s, " 3668 "data:%s\n", vidptr, dataname_ptr); 3669 3670 /* Get the data list */ 3671 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3672 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3673 &data_list_len) != DDI_PROP_SUCCESS) { 3674 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3675 "sd_process_sdconf_file: data " 3676 "property (%s) has no value\n", 3677 dataname_ptr); 3678 continue; 3679 } 3680 3681 version = data_list[0]; 3682 3683 if (version == SD_CONF_VERSION_1) { 3684 sd_tunables values; 3685 3686 /* Set the properties */ 3687 if (sd_chk_vers1_data(un, data_list[1], 3688 &data_list[2], data_list_len, 3689 dataname_ptr) == SD_SUCCESS) { 3690 sd_get_tunables_from_conf(un, 3691 data_list[1], &data_list[2], 3692 &values); 3693 sd_set_vers1_properties(un, 3694 data_list[1], &values); 3695 rval = SD_SUCCESS; 3696 } else { 3697 rval = SD_FAILURE; 3698 } 3699 } else { 3700 scsi_log(SD_DEVINFO(un), sd_label, 3701 CE_WARN, "data property %s version " 3702 "0x%x is invalid.", 3703 dataname_ptr, version); 3704 rval = SD_FAILURE; 3705 } 3706 if (data_list) 3707 ddi_prop_free(data_list); 3708 } 3709 } 3710 } 3711 3712 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3713 if (config_list) { 3714 ddi_prop_free(config_list); 3715 } 3716 3717 return (rval); 3718 } 3719 3720 /* 3721 * Function: sd_nvpair_str_decode() 3722 * 3723 * Description: Parse the improved format sd-config-list to get 3724 * each entry of tunable, which includes a name-value pair. 3725 * Then call sd_set_properties() to set the property. 3726 * 3727 * Arguments: un - driver soft state (unit) structure 3728 * nvpair_str - the tunable list 3729 */ 3730 static void 3731 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3732 { 3733 char *nv, *name, *value, *token; 3734 char *nv_lasts, *v_lasts, *x_lasts; 3735 3736 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3737 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3738 token = sd_strtok_r(nv, ":", &v_lasts); 3739 name = sd_strtok_r(token, " \t", &x_lasts); 3740 token = sd_strtok_r(NULL, ":", &v_lasts); 3741 value = sd_strtok_r(token, " \t", &x_lasts); 3742 if (name == NULL || value == NULL) { 3743 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3744 "sd_nvpair_str_decode: " 3745 "name or value is not valid!\n"); 3746 } else { 3747 sd_set_properties(un, name, value); 3748 } 3749 } 3750 } 3751 3752 /* 3753 * Function: sd_strtok_r() 3754 * 3755 * Description: This function uses strpbrk and strspn to break 3756 * string into tokens on sequentially subsequent calls. Return 3757 * NULL when no non-separator characters remain. The first 3758 * argument is NULL for subsequent calls. 3759 */ 3760 static char * 3761 sd_strtok_r(char *string, const char *sepset, char **lasts) 3762 { 3763 char *q, *r; 3764 3765 /* First or subsequent call */ 3766 if (string == NULL) 3767 string = *lasts; 3768 3769 if (string == NULL) 3770 return (NULL); 3771 3772 /* Skip leading separators */ 3773 q = string + strspn(string, sepset); 3774 3775 if (*q == '\0') 3776 return (NULL); 3777 3778 if ((r = strpbrk(q, sepset)) == NULL) 3779 *lasts = NULL; 3780 else { 3781 *r = '\0'; 3782 *lasts = r + 1; 3783 } 3784 return (q); 3785 } 3786 3787 /* 3788 * Function: sd_set_properties() 3789 * 3790 * Description: Set device properties based on the improved 3791 * format sd-config-list. 3792 * 3793 * Arguments: un - driver soft state (unit) structure 3794 * name - supported tunable name 3795 * value - tunable value 3796 */ 3797 static void 3798 sd_set_properties(struct sd_lun *un, char *name, char *value) 3799 { 3800 char *endptr = NULL; 3801 long val = 0; 3802 3803 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3804 if (strcasecmp(value, "true") == 0) { 3805 un->un_f_suppress_cache_flush = TRUE; 3806 } else if (strcasecmp(value, "false") == 0) { 3807 un->un_f_suppress_cache_flush = FALSE; 3808 } else { 3809 goto value_invalid; 3810 } 3811 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3812 "suppress_cache_flush flag set to %d\n", 3813 un->un_f_suppress_cache_flush); 3814 return; 3815 } 3816 3817 if (strcasecmp(name, "controller-type") == 0) { 3818 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3819 un->un_ctype = val; 3820 } else { 3821 goto value_invalid; 3822 } 3823 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3824 "ctype set to %d\n", un->un_ctype); 3825 return; 3826 } 3827 3828 if (strcasecmp(name, "delay-busy") == 0) { 3829 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3830 un->un_busy_timeout = drv_usectohz(val / 1000); 3831 } else { 3832 goto value_invalid; 3833 } 3834 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3835 "busy_timeout set to %d\n", un->un_busy_timeout); 3836 return; 3837 } 3838 3839 if (strcasecmp(name, "disksort") == 0) { 3840 if (strcasecmp(value, "true") == 0) { 3841 un->un_f_disksort_disabled = FALSE; 3842 } else if (strcasecmp(value, "false") == 0) { 3843 un->un_f_disksort_disabled = TRUE; 3844 } else { 3845 goto value_invalid; 3846 } 3847 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3848 "disksort disabled flag set to %d\n", 3849 un->un_f_disksort_disabled); 3850 return; 3851 } 3852 3853 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3854 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3855 un->un_reserve_release_time = val; 3856 } else { 3857 goto value_invalid; 3858 } 3859 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3860 "reservation release timeout set to %d\n", 3861 un->un_reserve_release_time); 3862 return; 3863 } 3864 3865 if (strcasecmp(name, "reset-lun") == 0) { 3866 if (strcasecmp(value, "true") == 0) { 3867 un->un_f_lun_reset_enabled = TRUE; 3868 } else if (strcasecmp(value, "false") == 0) { 3869 un->un_f_lun_reset_enabled = FALSE; 3870 } else { 3871 goto value_invalid; 3872 } 3873 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3874 "lun reset enabled flag set to %d\n", 3875 un->un_f_lun_reset_enabled); 3876 return; 3877 } 3878 3879 if (strcasecmp(name, "retries-busy") == 0) { 3880 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3881 un->un_busy_retry_count = val; 3882 } else { 3883 goto value_invalid; 3884 } 3885 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3886 "busy retry count set to %d\n", un->un_busy_retry_count); 3887 return; 3888 } 3889 3890 if (strcasecmp(name, "retries-timeout") == 0) { 3891 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3892 un->un_retry_count = val; 3893 } else { 3894 goto value_invalid; 3895 } 3896 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3897 "timeout retry count set to %d\n", un->un_retry_count); 3898 return; 3899 } 3900 3901 if (strcasecmp(name, "retries-notready") == 0) { 3902 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3903 un->un_notready_retry_count = val; 3904 } else { 3905 goto value_invalid; 3906 } 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3908 "notready retry count set to %d\n", 3909 un->un_notready_retry_count); 3910 return; 3911 } 3912 3913 if (strcasecmp(name, "retries-reset") == 0) { 3914 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3915 un->un_reset_retry_count = val; 3916 } else { 3917 goto value_invalid; 3918 } 3919 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3920 "reset retry count set to %d\n", 3921 un->un_reset_retry_count); 3922 return; 3923 } 3924 3925 if (strcasecmp(name, "throttle-max") == 0) { 3926 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3927 un->un_saved_throttle = un->un_throttle = val; 3928 } else { 3929 goto value_invalid; 3930 } 3931 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3932 "throttle set to %d\n", un->un_throttle); 3933 } 3934 3935 if (strcasecmp(name, "throttle-min") == 0) { 3936 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3937 un->un_min_throttle = val; 3938 } else { 3939 goto value_invalid; 3940 } 3941 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3942 "min throttle set to %d\n", un->un_min_throttle); 3943 } 3944 3945 /* 3946 * Validate the throttle values. 3947 * If any of the numbers are invalid, set everything to defaults. 3948 */ 3949 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3950 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3951 (un->un_min_throttle > un->un_throttle)) { 3952 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3953 un->un_min_throttle = sd_min_throttle; 3954 } 3955 return; 3956 3957 value_invalid: 3958 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3959 "value of prop %s is invalid\n", name); 3960 } 3961 3962 /* 3963 * Function: sd_get_tunables_from_conf() 3964 * 3965 * 3966 * This function reads the data list from the sd.conf file and pulls 3967 * the values that can have numeric values as arguments and places 3968 * the values in the appropriate sd_tunables member. 3969 * Since the order of the data list members varies across platforms 3970 * This function reads them from the data list in a platform specific 3971 * order and places them into the correct sd_tunable member that is 3972 * consistent across all platforms. 3973 */ 3974 static void 3975 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3976 sd_tunables *values) 3977 { 3978 int i; 3979 int mask; 3980 3981 bzero(values, sizeof (sd_tunables)); 3982 3983 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3984 3985 mask = 1 << i; 3986 if (mask > flags) { 3987 break; 3988 } 3989 3990 switch (mask & flags) { 3991 case 0: /* This mask bit not set in flags */ 3992 continue; 3993 case SD_CONF_BSET_THROTTLE: 3994 values->sdt_throttle = data_list[i]; 3995 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3996 "sd_get_tunables_from_conf: throttle = %d\n", 3997 values->sdt_throttle); 3998 break; 3999 case SD_CONF_BSET_CTYPE: 4000 values->sdt_ctype = data_list[i]; 4001 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4002 "sd_get_tunables_from_conf: ctype = %d\n", 4003 values->sdt_ctype); 4004 break; 4005 case SD_CONF_BSET_NRR_COUNT: 4006 values->sdt_not_rdy_retries = data_list[i]; 4007 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4008 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4009 values->sdt_not_rdy_retries); 4010 break; 4011 case SD_CONF_BSET_BSY_RETRY_COUNT: 4012 values->sdt_busy_retries = data_list[i]; 4013 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4014 "sd_get_tunables_from_conf: busy_retries = %d\n", 4015 values->sdt_busy_retries); 4016 break; 4017 case SD_CONF_BSET_RST_RETRIES: 4018 values->sdt_reset_retries = data_list[i]; 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4020 "sd_get_tunables_from_conf: reset_retries = %d\n", 4021 values->sdt_reset_retries); 4022 break; 4023 case SD_CONF_BSET_RSV_REL_TIME: 4024 values->sdt_reserv_rel_time = data_list[i]; 4025 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4026 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4027 values->sdt_reserv_rel_time); 4028 break; 4029 case SD_CONF_BSET_MIN_THROTTLE: 4030 values->sdt_min_throttle = data_list[i]; 4031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4032 "sd_get_tunables_from_conf: min_throttle = %d\n", 4033 values->sdt_min_throttle); 4034 break; 4035 case SD_CONF_BSET_DISKSORT_DISABLED: 4036 values->sdt_disk_sort_dis = data_list[i]; 4037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4038 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4039 values->sdt_disk_sort_dis); 4040 break; 4041 case SD_CONF_BSET_LUN_RESET_ENABLED: 4042 values->sdt_lun_reset_enable = data_list[i]; 4043 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4044 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4045 "\n", values->sdt_lun_reset_enable); 4046 break; 4047 case SD_CONF_BSET_CACHE_IS_NV: 4048 values->sdt_suppress_cache_flush = data_list[i]; 4049 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4050 "sd_get_tunables_from_conf: \ 4051 suppress_cache_flush = %d" 4052 "\n", values->sdt_suppress_cache_flush); 4053 break; 4054 } 4055 } 4056 } 4057 4058 /* 4059 * Function: sd_process_sdconf_table 4060 * 4061 * Description: Search the static configuration table for a match on the 4062 * inquiry vid/pid and update the driver soft state structure 4063 * according to the table property values for the device. 4064 * 4065 * The form of a configuration table entry is: 4066 * <vid+pid>,<flags>,<property-data> 4067 * "SEAGATE ST42400N",1,0x40000, 4068 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4069 * 4070 * Arguments: un - driver soft state (unit) structure 4071 */ 4072 4073 static void 4074 sd_process_sdconf_table(struct sd_lun *un) 4075 { 4076 char *id = NULL; 4077 int table_index; 4078 int idlen; 4079 4080 ASSERT(un != NULL); 4081 for (table_index = 0; table_index < sd_disk_table_size; 4082 table_index++) { 4083 id = sd_disk_table[table_index].device_id; 4084 idlen = strlen(id); 4085 if (idlen == 0) { 4086 continue; 4087 } 4088 4089 /* 4090 * The static configuration table currently does not 4091 * implement version 10 properties. Additionally, 4092 * multiple data-property-name entries are not 4093 * implemented in the static configuration table. 4094 */ 4095 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4096 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4097 "sd_process_sdconf_table: disk %s\n", id); 4098 sd_set_vers1_properties(un, 4099 sd_disk_table[table_index].flags, 4100 sd_disk_table[table_index].properties); 4101 break; 4102 } 4103 } 4104 } 4105 4106 4107 /* 4108 * Function: sd_sdconf_id_match 4109 * 4110 * Description: This local function implements a case sensitive vid/pid 4111 * comparison as well as the boundary cases of wild card and 4112 * multiple blanks. 4113 * 4114 * Note: An implicit assumption made here is that the scsi 4115 * inquiry structure will always keep the vid, pid and 4116 * revision strings in consecutive sequence, so they can be 4117 * read as a single string. If this assumption is not the 4118 * case, a separate string, to be used for the check, needs 4119 * to be built with these strings concatenated. 4120 * 4121 * Arguments: un - driver soft state (unit) structure 4122 * id - table or config file vid/pid 4123 * idlen - length of the vid/pid (bytes) 4124 * 4125 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4126 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4127 */ 4128 4129 static int 4130 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4131 { 4132 struct scsi_inquiry *sd_inq; 4133 int rval = SD_SUCCESS; 4134 4135 ASSERT(un != NULL); 4136 sd_inq = un->un_sd->sd_inq; 4137 ASSERT(id != NULL); 4138 4139 /* 4140 * We use the inq_vid as a pointer to a buffer containing the 4141 * vid and pid and use the entire vid/pid length of the table 4142 * entry for the comparison. This works because the inq_pid 4143 * data member follows inq_vid in the scsi_inquiry structure. 4144 */ 4145 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4146 /* 4147 * The user id string is compared to the inquiry vid/pid 4148 * using a case insensitive comparison and ignoring 4149 * multiple spaces. 4150 */ 4151 rval = sd_blank_cmp(un, id, idlen); 4152 if (rval != SD_SUCCESS) { 4153 /* 4154 * User id strings that start and end with a "*" 4155 * are a special case. These do not have a 4156 * specific vendor, and the product string can 4157 * appear anywhere in the 16 byte PID portion of 4158 * the inquiry data. This is a simple strstr() 4159 * type search for the user id in the inquiry data. 4160 */ 4161 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4162 char *pidptr = &id[1]; 4163 int i; 4164 int j; 4165 int pidstrlen = idlen - 2; 4166 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4167 pidstrlen; 4168 4169 if (j < 0) { 4170 return (SD_FAILURE); 4171 } 4172 for (i = 0; i < j; i++) { 4173 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4174 pidptr, pidstrlen) == 0) { 4175 rval = SD_SUCCESS; 4176 break; 4177 } 4178 } 4179 } 4180 } 4181 } 4182 return (rval); 4183 } 4184 4185 4186 /* 4187 * Function: sd_blank_cmp 4188 * 4189 * Description: If the id string starts and ends with a space, treat 4190 * multiple consecutive spaces as equivalent to a single 4191 * space. For example, this causes a sd_disk_table entry 4192 * of " NEC CDROM " to match a device's id string of 4193 * "NEC CDROM". 4194 * 4195 * Note: The success exit condition for this routine is if 4196 * the pointer to the table entry is '\0' and the cnt of 4197 * the inquiry length is zero. This will happen if the inquiry 4198 * string returned by the device is padded with spaces to be 4199 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4200 * SCSI spec states that the inquiry string is to be padded with 4201 * spaces. 4202 * 4203 * Arguments: un - driver soft state (unit) structure 4204 * id - table or config file vid/pid 4205 * idlen - length of the vid/pid (bytes) 4206 * 4207 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4208 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4209 */ 4210 4211 static int 4212 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4213 { 4214 char *p1; 4215 char *p2; 4216 int cnt; 4217 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4218 sizeof (SD_INQUIRY(un)->inq_pid); 4219 4220 ASSERT(un != NULL); 4221 p2 = un->un_sd->sd_inq->inq_vid; 4222 ASSERT(id != NULL); 4223 p1 = id; 4224 4225 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4226 /* 4227 * Note: string p1 is terminated by a NUL but string p2 4228 * isn't. The end of p2 is determined by cnt. 4229 */ 4230 for (;;) { 4231 /* skip over any extra blanks in both strings */ 4232 while ((*p1 != '\0') && (*p1 == ' ')) { 4233 p1++; 4234 } 4235 while ((cnt != 0) && (*p2 == ' ')) { 4236 p2++; 4237 cnt--; 4238 } 4239 4240 /* compare the two strings */ 4241 if ((cnt == 0) || 4242 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4243 break; 4244 } 4245 while ((cnt > 0) && 4246 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4247 p1++; 4248 p2++; 4249 cnt--; 4250 } 4251 } 4252 } 4253 4254 /* return SD_SUCCESS if both strings match */ 4255 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4256 } 4257 4258 4259 /* 4260 * Function: sd_chk_vers1_data 4261 * 4262 * Description: Verify the version 1 device properties provided by the 4263 * user via the configuration file 4264 * 4265 * Arguments: un - driver soft state (unit) structure 4266 * flags - integer mask indicating properties to be set 4267 * prop_list - integer list of property values 4268 * list_len - number of the elements 4269 * 4270 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4271 * SD_FAILURE - Indicates the user provided data is invalid 4272 */ 4273 4274 static int 4275 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4276 int list_len, char *dataname_ptr) 4277 { 4278 int i; 4279 int mask = 1; 4280 int index = 0; 4281 4282 ASSERT(un != NULL); 4283 4284 /* Check for a NULL property name and list */ 4285 if (dataname_ptr == NULL) { 4286 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4287 "sd_chk_vers1_data: NULL data property name."); 4288 return (SD_FAILURE); 4289 } 4290 if (prop_list == NULL) { 4291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4292 "sd_chk_vers1_data: %s NULL data property list.", 4293 dataname_ptr); 4294 return (SD_FAILURE); 4295 } 4296 4297 /* Display a warning if undefined bits are set in the flags */ 4298 if (flags & ~SD_CONF_BIT_MASK) { 4299 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4300 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4301 "Properties not set.", 4302 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4303 return (SD_FAILURE); 4304 } 4305 4306 /* 4307 * Verify the length of the list by identifying the highest bit set 4308 * in the flags and validating that the property list has a length 4309 * up to the index of this bit. 4310 */ 4311 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4312 if (flags & mask) { 4313 index++; 4314 } 4315 mask = 1 << i; 4316 } 4317 if (list_len < (index + 2)) { 4318 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4319 "sd_chk_vers1_data: " 4320 "Data property list %s size is incorrect. " 4321 "Properties not set.", dataname_ptr); 4322 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4323 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4324 return (SD_FAILURE); 4325 } 4326 return (SD_SUCCESS); 4327 } 4328 4329 4330 /* 4331 * Function: sd_set_vers1_properties 4332 * 4333 * Description: Set version 1 device properties based on a property list 4334 * retrieved from the driver configuration file or static 4335 * configuration table. Version 1 properties have the format: 4336 * 4337 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4338 * 4339 * where the prop0 value will be used to set prop0 if bit0 4340 * is set in the flags 4341 * 4342 * Arguments: un - driver soft state (unit) structure 4343 * flags - integer mask indicating properties to be set 4344 * prop_list - integer list of property values 4345 */ 4346 4347 static void 4348 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4349 { 4350 ASSERT(un != NULL); 4351 4352 /* 4353 * Set the flag to indicate cache is to be disabled. An attempt 4354 * to disable the cache via sd_cache_control() will be made 4355 * later during attach once the basic initialization is complete. 4356 */ 4357 if (flags & SD_CONF_BSET_NOCACHE) { 4358 un->un_f_opt_disable_cache = TRUE; 4359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4360 "sd_set_vers1_properties: caching disabled flag set\n"); 4361 } 4362 4363 /* CD-specific configuration parameters */ 4364 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4365 un->un_f_cfg_playmsf_bcd = TRUE; 4366 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4367 "sd_set_vers1_properties: playmsf_bcd set\n"); 4368 } 4369 if (flags & SD_CONF_BSET_READSUB_BCD) { 4370 un->un_f_cfg_readsub_bcd = TRUE; 4371 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4372 "sd_set_vers1_properties: readsub_bcd set\n"); 4373 } 4374 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4375 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4376 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4377 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4378 } 4379 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4380 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4381 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4382 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4383 } 4384 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4385 un->un_f_cfg_no_read_header = TRUE; 4386 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4387 "sd_set_vers1_properties: no_read_header set\n"); 4388 } 4389 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4390 un->un_f_cfg_read_cd_xd4 = TRUE; 4391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4392 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4393 } 4394 4395 /* Support for devices which do not have valid/unique serial numbers */ 4396 if (flags & SD_CONF_BSET_FAB_DEVID) { 4397 un->un_f_opt_fab_devid = TRUE; 4398 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4399 "sd_set_vers1_properties: fab_devid bit set\n"); 4400 } 4401 4402 /* Support for user throttle configuration */ 4403 if (flags & SD_CONF_BSET_THROTTLE) { 4404 ASSERT(prop_list != NULL); 4405 un->un_saved_throttle = un->un_throttle = 4406 prop_list->sdt_throttle; 4407 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4408 "sd_set_vers1_properties: throttle set to %d\n", 4409 prop_list->sdt_throttle); 4410 } 4411 4412 /* Set the per disk retry count according to the conf file or table. */ 4413 if (flags & SD_CONF_BSET_NRR_COUNT) { 4414 ASSERT(prop_list != NULL); 4415 if (prop_list->sdt_not_rdy_retries) { 4416 un->un_notready_retry_count = 4417 prop_list->sdt_not_rdy_retries; 4418 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4419 "sd_set_vers1_properties: not ready retry count" 4420 " set to %d\n", un->un_notready_retry_count); 4421 } 4422 } 4423 4424 /* The controller type is reported for generic disk driver ioctls */ 4425 if (flags & SD_CONF_BSET_CTYPE) { 4426 ASSERT(prop_list != NULL); 4427 switch (prop_list->sdt_ctype) { 4428 case CTYPE_CDROM: 4429 un->un_ctype = prop_list->sdt_ctype; 4430 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4431 "sd_set_vers1_properties: ctype set to " 4432 "CTYPE_CDROM\n"); 4433 break; 4434 case CTYPE_CCS: 4435 un->un_ctype = prop_list->sdt_ctype; 4436 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4437 "sd_set_vers1_properties: ctype set to " 4438 "CTYPE_CCS\n"); 4439 break; 4440 case CTYPE_ROD: /* RW optical */ 4441 un->un_ctype = prop_list->sdt_ctype; 4442 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4443 "sd_set_vers1_properties: ctype set to " 4444 "CTYPE_ROD\n"); 4445 break; 4446 default: 4447 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4448 "sd_set_vers1_properties: Could not set " 4449 "invalid ctype value (%d)", 4450 prop_list->sdt_ctype); 4451 } 4452 } 4453 4454 /* Purple failover timeout */ 4455 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4456 ASSERT(prop_list != NULL); 4457 un->un_busy_retry_count = 4458 prop_list->sdt_busy_retries; 4459 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4460 "sd_set_vers1_properties: " 4461 "busy retry count set to %d\n", 4462 un->un_busy_retry_count); 4463 } 4464 4465 /* Purple reset retry count */ 4466 if (flags & SD_CONF_BSET_RST_RETRIES) { 4467 ASSERT(prop_list != NULL); 4468 un->un_reset_retry_count = 4469 prop_list->sdt_reset_retries; 4470 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4471 "sd_set_vers1_properties: " 4472 "reset retry count set to %d\n", 4473 un->un_reset_retry_count); 4474 } 4475 4476 /* Purple reservation release timeout */ 4477 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4478 ASSERT(prop_list != NULL); 4479 un->un_reserve_release_time = 4480 prop_list->sdt_reserv_rel_time; 4481 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4482 "sd_set_vers1_properties: " 4483 "reservation release timeout set to %d\n", 4484 un->un_reserve_release_time); 4485 } 4486 4487 /* 4488 * Driver flag telling the driver to verify that no commands are pending 4489 * for a device before issuing a Test Unit Ready. This is a workaround 4490 * for a firmware bug in some Seagate eliteI drives. 4491 */ 4492 if (flags & SD_CONF_BSET_TUR_CHECK) { 4493 un->un_f_cfg_tur_check = TRUE; 4494 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4495 "sd_set_vers1_properties: tur queue check set\n"); 4496 } 4497 4498 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4499 un->un_min_throttle = prop_list->sdt_min_throttle; 4500 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4501 "sd_set_vers1_properties: min throttle set to %d\n", 4502 un->un_min_throttle); 4503 } 4504 4505 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4506 un->un_f_disksort_disabled = 4507 (prop_list->sdt_disk_sort_dis != 0) ? 4508 TRUE : FALSE; 4509 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4510 "sd_set_vers1_properties: disksort disabled " 4511 "flag set to %d\n", 4512 prop_list->sdt_disk_sort_dis); 4513 } 4514 4515 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4516 un->un_f_lun_reset_enabled = 4517 (prop_list->sdt_lun_reset_enable != 0) ? 4518 TRUE : FALSE; 4519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4520 "sd_set_vers1_properties: lun reset enabled " 4521 "flag set to %d\n", 4522 prop_list->sdt_lun_reset_enable); 4523 } 4524 4525 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4526 un->un_f_suppress_cache_flush = 4527 (prop_list->sdt_suppress_cache_flush != 0) ? 4528 TRUE : FALSE; 4529 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4530 "sd_set_vers1_properties: suppress_cache_flush " 4531 "flag set to %d\n", 4532 prop_list->sdt_suppress_cache_flush); 4533 } 4534 4535 /* 4536 * Validate the throttle values. 4537 * If any of the numbers are invalid, set everything to defaults. 4538 */ 4539 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4540 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4541 (un->un_min_throttle > un->un_throttle)) { 4542 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4543 un->un_min_throttle = sd_min_throttle; 4544 } 4545 } 4546 4547 /* 4548 * Function: sd_is_lsi() 4549 * 4550 * Description: Check for lsi devices, step through the static device 4551 * table to match vid/pid. 4552 * 4553 * Args: un - ptr to sd_lun 4554 * 4555 * Notes: When creating new LSI property, need to add the new LSI property 4556 * to this function. 4557 */ 4558 static void 4559 sd_is_lsi(struct sd_lun *un) 4560 { 4561 char *id = NULL; 4562 int table_index; 4563 int idlen; 4564 void *prop; 4565 4566 ASSERT(un != NULL); 4567 for (table_index = 0; table_index < sd_disk_table_size; 4568 table_index++) { 4569 id = sd_disk_table[table_index].device_id; 4570 idlen = strlen(id); 4571 if (idlen == 0) { 4572 continue; 4573 } 4574 4575 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4576 prop = sd_disk_table[table_index].properties; 4577 if (prop == &lsi_properties || 4578 prop == &lsi_oem_properties || 4579 prop == &lsi_properties_scsi || 4580 prop == &symbios_properties) { 4581 un->un_f_cfg_is_lsi = TRUE; 4582 } 4583 break; 4584 } 4585 } 4586 } 4587 4588 /* 4589 * Function: sd_get_physical_geometry 4590 * 4591 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4592 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4593 * target, and use this information to initialize the physical 4594 * geometry cache specified by pgeom_p. 4595 * 4596 * MODE SENSE is an optional command, so failure in this case 4597 * does not necessarily denote an error. We want to use the 4598 * MODE SENSE commands to derive the physical geometry of the 4599 * device, but if either command fails, the logical geometry is 4600 * used as the fallback for disk label geometry in cmlb. 4601 * 4602 * This requires that un->un_blockcount and un->un_tgt_blocksize 4603 * have already been initialized for the current target and 4604 * that the current values be passed as args so that we don't 4605 * end up ever trying to use -1 as a valid value. This could 4606 * happen if either value is reset while we're not holding 4607 * the mutex. 4608 * 4609 * Arguments: un - driver soft state (unit) structure 4610 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4611 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4612 * to use the USCSI "direct" chain and bypass the normal 4613 * command waitq. 4614 * 4615 * Context: Kernel thread only (can sleep). 4616 */ 4617 4618 static int 4619 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4620 diskaddr_t capacity, int lbasize, int path_flag) 4621 { 4622 struct mode_format *page3p; 4623 struct mode_geometry *page4p; 4624 struct mode_header *headerp; 4625 int sector_size; 4626 int nsect; 4627 int nhead; 4628 int ncyl; 4629 int intrlv; 4630 int spc; 4631 diskaddr_t modesense_capacity; 4632 int rpm; 4633 int bd_len; 4634 int mode_header_length; 4635 uchar_t *p3bufp; 4636 uchar_t *p4bufp; 4637 int cdbsize; 4638 int ret = EIO; 4639 sd_ssc_t *ssc; 4640 int status; 4641 4642 ASSERT(un != NULL); 4643 4644 if (lbasize == 0) { 4645 if (ISCD(un)) { 4646 lbasize = 2048; 4647 } else { 4648 lbasize = un->un_sys_blocksize; 4649 } 4650 } 4651 pgeom_p->g_secsize = (unsigned short)lbasize; 4652 4653 /* 4654 * If the unit is a cd/dvd drive MODE SENSE page three 4655 * and MODE SENSE page four are reserved (see SBC spec 4656 * and MMC spec). To prevent soft errors just return 4657 * using the default LBA size. 4658 */ 4659 if (ISCD(un)) 4660 return (ret); 4661 4662 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4663 4664 /* 4665 * Retrieve MODE SENSE page 3 - Format Device Page 4666 */ 4667 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4668 ssc = sd_ssc_init(un); 4669 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4670 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4671 if (status != 0) { 4672 SD_ERROR(SD_LOG_COMMON, un, 4673 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4674 goto page3_exit; 4675 } 4676 4677 /* 4678 * Determine size of Block Descriptors in order to locate the mode 4679 * page data. ATAPI devices return 0, SCSI devices should return 4680 * MODE_BLK_DESC_LENGTH. 4681 */ 4682 headerp = (struct mode_header *)p3bufp; 4683 if (un->un_f_cfg_is_atapi == TRUE) { 4684 struct mode_header_grp2 *mhp = 4685 (struct mode_header_grp2 *)headerp; 4686 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4687 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4688 } else { 4689 mode_header_length = MODE_HEADER_LENGTH; 4690 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4691 } 4692 4693 if (bd_len > MODE_BLK_DESC_LENGTH) { 4694 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4695 "received unexpected bd_len of %d, page3\n", bd_len); 4696 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4697 "sd_get_physical_geometry: received unexpected " 4698 "bd_len of %d, page3", bd_len); 4699 status = EIO; 4700 goto page3_exit; 4701 } 4702 4703 page3p = (struct mode_format *) 4704 ((caddr_t)headerp + mode_header_length + bd_len); 4705 4706 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4707 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4708 "mode sense pg3 code mismatch %d\n", 4709 page3p->mode_page.code); 4710 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4711 "sd_get_physical_geometry: mode sense pg3 code " 4712 "mismatch %d", page3p->mode_page.code); 4713 status = EIO; 4714 goto page3_exit; 4715 } 4716 4717 /* 4718 * Use this physical geometry data only if BOTH MODE SENSE commands 4719 * complete successfully; otherwise, revert to the logical geometry. 4720 * So, we need to save everything in temporary variables. 4721 */ 4722 sector_size = BE_16(page3p->data_bytes_sect); 4723 4724 /* 4725 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4726 */ 4727 if (sector_size == 0) { 4728 sector_size = un->un_sys_blocksize; 4729 } else { 4730 sector_size &= ~(un->un_sys_blocksize - 1); 4731 } 4732 4733 nsect = BE_16(page3p->sect_track); 4734 intrlv = BE_16(page3p->interleave); 4735 4736 SD_INFO(SD_LOG_COMMON, un, 4737 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4738 SD_INFO(SD_LOG_COMMON, un, 4739 " mode page: %d; nsect: %d; sector size: %d;\n", 4740 page3p->mode_page.code, nsect, sector_size); 4741 SD_INFO(SD_LOG_COMMON, un, 4742 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4743 BE_16(page3p->track_skew), 4744 BE_16(page3p->cylinder_skew)); 4745 4746 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4747 4748 /* 4749 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4750 */ 4751 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4752 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4753 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4754 if (status != 0) { 4755 SD_ERROR(SD_LOG_COMMON, un, 4756 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4757 goto page4_exit; 4758 } 4759 4760 /* 4761 * Determine size of Block Descriptors in order to locate the mode 4762 * page data. ATAPI devices return 0, SCSI devices should return 4763 * MODE_BLK_DESC_LENGTH. 4764 */ 4765 headerp = (struct mode_header *)p4bufp; 4766 if (un->un_f_cfg_is_atapi == TRUE) { 4767 struct mode_header_grp2 *mhp = 4768 (struct mode_header_grp2 *)headerp; 4769 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4770 } else { 4771 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4772 } 4773 4774 if (bd_len > MODE_BLK_DESC_LENGTH) { 4775 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4776 "received unexpected bd_len of %d, page4\n", bd_len); 4777 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4778 "sd_get_physical_geometry: received unexpected " 4779 "bd_len of %d, page4", bd_len); 4780 status = EIO; 4781 goto page4_exit; 4782 } 4783 4784 page4p = (struct mode_geometry *) 4785 ((caddr_t)headerp + mode_header_length + bd_len); 4786 4787 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4788 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4789 "mode sense pg4 code mismatch %d\n", 4790 page4p->mode_page.code); 4791 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4792 "sd_get_physical_geometry: mode sense pg4 code " 4793 "mismatch %d", page4p->mode_page.code); 4794 status = EIO; 4795 goto page4_exit; 4796 } 4797 4798 /* 4799 * Stash the data now, after we know that both commands completed. 4800 */ 4801 4802 4803 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4804 spc = nhead * nsect; 4805 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4806 rpm = BE_16(page4p->rpm); 4807 4808 modesense_capacity = spc * ncyl; 4809 4810 SD_INFO(SD_LOG_COMMON, un, 4811 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4812 SD_INFO(SD_LOG_COMMON, un, 4813 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4814 SD_INFO(SD_LOG_COMMON, un, 4815 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4816 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4817 (void *)pgeom_p, capacity); 4818 4819 /* 4820 * Compensate if the drive's geometry is not rectangular, i.e., 4821 * the product of C * H * S returned by MODE SENSE >= that returned 4822 * by read capacity. This is an idiosyncrasy of the original x86 4823 * disk subsystem. 4824 */ 4825 if (modesense_capacity >= capacity) { 4826 SD_INFO(SD_LOG_COMMON, un, 4827 "sd_get_physical_geometry: adjusting acyl; " 4828 "old: %d; new: %d\n", pgeom_p->g_acyl, 4829 (modesense_capacity - capacity + spc - 1) / spc); 4830 if (sector_size != 0) { 4831 /* 1243403: NEC D38x7 drives don't support sec size */ 4832 pgeom_p->g_secsize = (unsigned short)sector_size; 4833 } 4834 pgeom_p->g_nsect = (unsigned short)nsect; 4835 pgeom_p->g_nhead = (unsigned short)nhead; 4836 pgeom_p->g_capacity = capacity; 4837 pgeom_p->g_acyl = 4838 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4839 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4840 } 4841 4842 pgeom_p->g_rpm = (unsigned short)rpm; 4843 pgeom_p->g_intrlv = (unsigned short)intrlv; 4844 ret = 0; 4845 4846 SD_INFO(SD_LOG_COMMON, un, 4847 "sd_get_physical_geometry: mode sense geometry:\n"); 4848 SD_INFO(SD_LOG_COMMON, un, 4849 " nsect: %d; sector size: %d; interlv: %d\n", 4850 nsect, sector_size, intrlv); 4851 SD_INFO(SD_LOG_COMMON, un, 4852 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4853 nhead, ncyl, rpm, modesense_capacity); 4854 SD_INFO(SD_LOG_COMMON, un, 4855 "sd_get_physical_geometry: (cached)\n"); 4856 SD_INFO(SD_LOG_COMMON, un, 4857 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4858 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4859 pgeom_p->g_nhead, pgeom_p->g_nsect); 4860 SD_INFO(SD_LOG_COMMON, un, 4861 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4862 pgeom_p->g_secsize, pgeom_p->g_capacity, 4863 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4864 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4865 4866 page4_exit: 4867 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4868 4869 page3_exit: 4870 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4871 4872 if (status != 0) { 4873 if (status == EIO) { 4874 /* 4875 * Some disks do not support mode sense(6), we 4876 * should ignore this kind of error(sense key is 4877 * 0x5 - illegal request). 4878 */ 4879 uint8_t *sensep; 4880 int senlen; 4881 4882 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4883 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4884 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4885 4886 if (senlen > 0 && 4887 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4888 sd_ssc_assessment(ssc, 4889 SD_FMT_IGNORE_COMPROMISE); 4890 } else { 4891 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4892 } 4893 } else { 4894 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4895 } 4896 } 4897 sd_ssc_fini(ssc); 4898 return (ret); 4899 } 4900 4901 /* 4902 * Function: sd_get_virtual_geometry 4903 * 4904 * Description: Ask the controller to tell us about the target device. 4905 * 4906 * Arguments: un - pointer to softstate 4907 * capacity - disk capacity in #blocks 4908 * lbasize - disk block size in bytes 4909 * 4910 * Context: Kernel thread only 4911 */ 4912 4913 static int 4914 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4915 diskaddr_t capacity, int lbasize) 4916 { 4917 uint_t geombuf; 4918 int spc; 4919 4920 ASSERT(un != NULL); 4921 4922 /* Set sector size, and total number of sectors */ 4923 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4924 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4925 4926 /* Let the HBA tell us its geometry */ 4927 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4928 4929 /* A value of -1 indicates an undefined "geometry" property */ 4930 if (geombuf == (-1)) { 4931 return (EINVAL); 4932 } 4933 4934 /* Initialize the logical geometry cache. */ 4935 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4936 lgeom_p->g_nsect = geombuf & 0xffff; 4937 lgeom_p->g_secsize = un->un_sys_blocksize; 4938 4939 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4940 4941 /* 4942 * Note: The driver originally converted the capacity value from 4943 * target blocks to system blocks. However, the capacity value passed 4944 * to this routine is already in terms of system blocks (this scaling 4945 * is done when the READ CAPACITY command is issued and processed). 4946 * This 'error' may have gone undetected because the usage of g_ncyl 4947 * (which is based upon g_capacity) is very limited within the driver 4948 */ 4949 lgeom_p->g_capacity = capacity; 4950 4951 /* 4952 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4953 * hba may return zero values if the device has been removed. 4954 */ 4955 if (spc == 0) { 4956 lgeom_p->g_ncyl = 0; 4957 } else { 4958 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4959 } 4960 lgeom_p->g_acyl = 0; 4961 4962 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4963 return (0); 4964 4965 } 4966 /* 4967 * Function: sd_update_block_info 4968 * 4969 * Description: Calculate a byte count to sector count bitshift value 4970 * from sector size. 4971 * 4972 * Arguments: un: unit struct. 4973 * lbasize: new target sector size 4974 * capacity: new target capacity, ie. block count 4975 * 4976 * Context: Kernel thread context 4977 */ 4978 4979 static void 4980 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4981 { 4982 if (lbasize != 0) { 4983 un->un_tgt_blocksize = lbasize; 4984 un->un_f_tgt_blocksize_is_valid = TRUE; 4985 } 4986 4987 if (capacity != 0) { 4988 un->un_blockcount = capacity; 4989 un->un_f_blockcount_is_valid = TRUE; 4990 } 4991 } 4992 4993 4994 /* 4995 * Function: sd_register_devid 4996 * 4997 * Description: This routine will obtain the device id information from the 4998 * target, obtain the serial number, and register the device 4999 * id with the ddi framework. 5000 * 5001 * Arguments: devi - the system's dev_info_t for the device. 5002 * un - driver soft state (unit) structure 5003 * reservation_flag - indicates if a reservation conflict 5004 * occurred during attach 5005 * 5006 * Context: Kernel Thread 5007 */ 5008 static void 5009 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5010 { 5011 int rval = 0; 5012 uchar_t *inq80 = NULL; 5013 size_t inq80_len = MAX_INQUIRY_SIZE; 5014 size_t inq80_resid = 0; 5015 uchar_t *inq83 = NULL; 5016 size_t inq83_len = MAX_INQUIRY_SIZE; 5017 size_t inq83_resid = 0; 5018 int dlen, len; 5019 char *sn; 5020 struct sd_lun *un; 5021 5022 ASSERT(ssc != NULL); 5023 un = ssc->ssc_un; 5024 ASSERT(un != NULL); 5025 ASSERT(mutex_owned(SD_MUTEX(un))); 5026 ASSERT((SD_DEVINFO(un)) == devi); 5027 5028 /* 5029 * If transport has already registered a devid for this target 5030 * then that takes precedence over the driver's determination 5031 * of the devid. 5032 */ 5033 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5034 ASSERT(un->un_devid); 5035 return; /* use devid registered by the transport */ 5036 } 5037 5038 /* 5039 * This is the case of antiquated Sun disk drives that have the 5040 * FAB_DEVID property set in the disk_table. These drives 5041 * manage the devid's by storing them in last 2 available sectors 5042 * on the drive and have them fabricated by the ddi layer by calling 5043 * ddi_devid_init and passing the DEVID_FAB flag. 5044 */ 5045 if (un->un_f_opt_fab_devid == TRUE) { 5046 /* 5047 * Depending on EINVAL isn't reliable, since a reserved disk 5048 * may result in invalid geometry, so check to make sure a 5049 * reservation conflict did not occur during attach. 5050 */ 5051 if ((sd_get_devid(ssc) == EINVAL) && 5052 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5053 /* 5054 * The devid is invalid AND there is no reservation 5055 * conflict. Fabricate a new devid. 5056 */ 5057 (void) sd_create_devid(ssc); 5058 } 5059 5060 /* Register the devid if it exists */ 5061 if (un->un_devid != NULL) { 5062 (void) ddi_devid_register(SD_DEVINFO(un), 5063 un->un_devid); 5064 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5065 "sd_register_devid: Devid Fabricated\n"); 5066 } 5067 return; 5068 } 5069 5070 /* 5071 * We check the availability of the World Wide Name (0x83) and Unit 5072 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5073 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5074 * 0x83 is available, that is the best choice. Our next choice is 5075 * 0x80. If neither are available, we munge the devid from the device 5076 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5077 * to fabricate a devid for non-Sun qualified disks. 5078 */ 5079 if (sd_check_vpd_page_support(ssc) == 0) { 5080 /* collect page 80 data if available */ 5081 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5082 5083 mutex_exit(SD_MUTEX(un)); 5084 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5085 5086 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5087 0x01, 0x80, &inq80_resid); 5088 5089 if (rval != 0) { 5090 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5091 kmem_free(inq80, inq80_len); 5092 inq80 = NULL; 5093 inq80_len = 0; 5094 } else if (ddi_prop_exists( 5095 DDI_DEV_T_NONE, SD_DEVINFO(un), 5096 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5097 INQUIRY_SERIAL_NO) == 0) { 5098 /* 5099 * If we don't already have a serial number 5100 * property, do quick verify of data returned 5101 * and define property. 5102 */ 5103 dlen = inq80_len - inq80_resid; 5104 len = (size_t)inq80[3]; 5105 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5106 /* 5107 * Ensure sn termination, skip leading 5108 * blanks, and create property 5109 * 'inquiry-serial-no'. 5110 */ 5111 sn = (char *)&inq80[4]; 5112 sn[len] = 0; 5113 while (*sn && (*sn == ' ')) 5114 sn++; 5115 if (*sn) { 5116 (void) ddi_prop_update_string( 5117 DDI_DEV_T_NONE, 5118 SD_DEVINFO(un), 5119 INQUIRY_SERIAL_NO, sn); 5120 } 5121 } 5122 } 5123 mutex_enter(SD_MUTEX(un)); 5124 } 5125 5126 /* collect page 83 data if available */ 5127 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5128 mutex_exit(SD_MUTEX(un)); 5129 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5130 5131 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5132 0x01, 0x83, &inq83_resid); 5133 5134 if (rval != 0) { 5135 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5136 kmem_free(inq83, inq83_len); 5137 inq83 = NULL; 5138 inq83_len = 0; 5139 } 5140 mutex_enter(SD_MUTEX(un)); 5141 } 5142 } 5143 5144 /* encode best devid possible based on data available */ 5145 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5146 (char *)ddi_driver_name(SD_DEVINFO(un)), 5147 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5148 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5149 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5150 5151 /* devid successfully encoded, register devid */ 5152 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5153 5154 } else { 5155 /* 5156 * Unable to encode a devid based on data available. 5157 * This is not a Sun qualified disk. Older Sun disk 5158 * drives that have the SD_FAB_DEVID property 5159 * set in the disk_table and non Sun qualified 5160 * disks are treated in the same manner. These 5161 * drives manage the devid's by storing them in 5162 * last 2 available sectors on the drive and 5163 * have them fabricated by the ddi layer by 5164 * calling ddi_devid_init and passing the 5165 * DEVID_FAB flag. 5166 * Create a fabricate devid only if there's no 5167 * fabricate devid existed. 5168 */ 5169 if (sd_get_devid(ssc) == EINVAL) { 5170 (void) sd_create_devid(ssc); 5171 } 5172 un->un_f_opt_fab_devid = TRUE; 5173 5174 /* Register the devid if it exists */ 5175 if (un->un_devid != NULL) { 5176 (void) ddi_devid_register(SD_DEVINFO(un), 5177 un->un_devid); 5178 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5179 "sd_register_devid: devid fabricated using " 5180 "ddi framework\n"); 5181 } 5182 } 5183 5184 /* clean up resources */ 5185 if (inq80 != NULL) { 5186 kmem_free(inq80, inq80_len); 5187 } 5188 if (inq83 != NULL) { 5189 kmem_free(inq83, inq83_len); 5190 } 5191 } 5192 5193 5194 5195 /* 5196 * Function: sd_get_devid 5197 * 5198 * Description: This routine will return 0 if a valid device id has been 5199 * obtained from the target and stored in the soft state. If a 5200 * valid device id has not been previously read and stored, a 5201 * read attempt will be made. 5202 * 5203 * Arguments: un - driver soft state (unit) structure 5204 * 5205 * Return Code: 0 if we successfully get the device id 5206 * 5207 * Context: Kernel Thread 5208 */ 5209 5210 static int 5211 sd_get_devid(sd_ssc_t *ssc) 5212 { 5213 struct dk_devid *dkdevid; 5214 ddi_devid_t tmpid; 5215 uint_t *ip; 5216 size_t sz; 5217 diskaddr_t blk; 5218 int status; 5219 int chksum; 5220 int i; 5221 size_t buffer_size; 5222 struct sd_lun *un; 5223 5224 ASSERT(ssc != NULL); 5225 un = ssc->ssc_un; 5226 ASSERT(un != NULL); 5227 ASSERT(mutex_owned(SD_MUTEX(un))); 5228 5229 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5230 un); 5231 5232 if (un->un_devid != NULL) { 5233 return (0); 5234 } 5235 5236 mutex_exit(SD_MUTEX(un)); 5237 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5238 (void *)SD_PATH_DIRECT) != 0) { 5239 mutex_enter(SD_MUTEX(un)); 5240 return (EINVAL); 5241 } 5242 5243 /* 5244 * Read and verify device id, stored in the reserved cylinders at the 5245 * end of the disk. Backup label is on the odd sectors of the last 5246 * track of the last cylinder. Device id will be on track of the next 5247 * to last cylinder. 5248 */ 5249 mutex_enter(SD_MUTEX(un)); 5250 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5251 mutex_exit(SD_MUTEX(un)); 5252 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5253 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5254 SD_PATH_DIRECT); 5255 5256 if (status != 0) { 5257 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5258 goto error; 5259 } 5260 5261 /* Validate the revision */ 5262 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5263 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5264 status = EINVAL; 5265 goto error; 5266 } 5267 5268 /* Calculate the checksum */ 5269 chksum = 0; 5270 ip = (uint_t *)dkdevid; 5271 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5272 i++) { 5273 chksum ^= ip[i]; 5274 } 5275 5276 /* Compare the checksums */ 5277 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5278 status = EINVAL; 5279 goto error; 5280 } 5281 5282 /* Validate the device id */ 5283 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5284 status = EINVAL; 5285 goto error; 5286 } 5287 5288 /* 5289 * Store the device id in the driver soft state 5290 */ 5291 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5292 tmpid = kmem_alloc(sz, KM_SLEEP); 5293 5294 mutex_enter(SD_MUTEX(un)); 5295 5296 un->un_devid = tmpid; 5297 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5298 5299 kmem_free(dkdevid, buffer_size); 5300 5301 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5302 5303 return (status); 5304 error: 5305 mutex_enter(SD_MUTEX(un)); 5306 kmem_free(dkdevid, buffer_size); 5307 return (status); 5308 } 5309 5310 5311 /* 5312 * Function: sd_create_devid 5313 * 5314 * Description: This routine will fabricate the device id and write it 5315 * to the disk. 5316 * 5317 * Arguments: un - driver soft state (unit) structure 5318 * 5319 * Return Code: value of the fabricated device id 5320 * 5321 * Context: Kernel Thread 5322 */ 5323 5324 static ddi_devid_t 5325 sd_create_devid(sd_ssc_t *ssc) 5326 { 5327 struct sd_lun *un; 5328 5329 ASSERT(ssc != NULL); 5330 un = ssc->ssc_un; 5331 ASSERT(un != NULL); 5332 5333 /* Fabricate the devid */ 5334 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5335 == DDI_FAILURE) { 5336 return (NULL); 5337 } 5338 5339 /* Write the devid to disk */ 5340 if (sd_write_deviceid(ssc) != 0) { 5341 ddi_devid_free(un->un_devid); 5342 un->un_devid = NULL; 5343 } 5344 5345 return (un->un_devid); 5346 } 5347 5348 5349 /* 5350 * Function: sd_write_deviceid 5351 * 5352 * Description: This routine will write the device id to the disk 5353 * reserved sector. 5354 * 5355 * Arguments: un - driver soft state (unit) structure 5356 * 5357 * Return Code: EINVAL 5358 * value returned by sd_send_scsi_cmd 5359 * 5360 * Context: Kernel Thread 5361 */ 5362 5363 static int 5364 sd_write_deviceid(sd_ssc_t *ssc) 5365 { 5366 struct dk_devid *dkdevid; 5367 diskaddr_t blk; 5368 uint_t *ip, chksum; 5369 int status; 5370 int i; 5371 struct sd_lun *un; 5372 5373 ASSERT(ssc != NULL); 5374 un = ssc->ssc_un; 5375 ASSERT(un != NULL); 5376 ASSERT(mutex_owned(SD_MUTEX(un))); 5377 5378 mutex_exit(SD_MUTEX(un)); 5379 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5380 (void *)SD_PATH_DIRECT) != 0) { 5381 mutex_enter(SD_MUTEX(un)); 5382 return (-1); 5383 } 5384 5385 5386 /* Allocate the buffer */ 5387 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5388 5389 /* Fill in the revision */ 5390 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5391 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5392 5393 /* Copy in the device id */ 5394 mutex_enter(SD_MUTEX(un)); 5395 bcopy(un->un_devid, &dkdevid->dkd_devid, 5396 ddi_devid_sizeof(un->un_devid)); 5397 mutex_exit(SD_MUTEX(un)); 5398 5399 /* Calculate the checksum */ 5400 chksum = 0; 5401 ip = (uint_t *)dkdevid; 5402 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5403 i++) { 5404 chksum ^= ip[i]; 5405 } 5406 5407 /* Fill-in checksum */ 5408 DKD_FORMCHKSUM(chksum, dkdevid); 5409 5410 /* Write the reserved sector */ 5411 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5412 SD_PATH_DIRECT); 5413 if (status != 0) 5414 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5415 5416 kmem_free(dkdevid, un->un_sys_blocksize); 5417 5418 mutex_enter(SD_MUTEX(un)); 5419 return (status); 5420 } 5421 5422 5423 /* 5424 * Function: sd_check_vpd_page_support 5425 * 5426 * Description: This routine sends an inquiry command with the EVPD bit set and 5427 * a page code of 0x00 to the device. It is used to determine which 5428 * vital product pages are available to find the devid. We are 5429 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5430 * device does not support that command. 5431 * 5432 * Arguments: un - driver soft state (unit) structure 5433 * 5434 * Return Code: 0 - success 5435 * 1 - check condition 5436 * 5437 * Context: This routine can sleep. 5438 */ 5439 5440 static int 5441 sd_check_vpd_page_support(sd_ssc_t *ssc) 5442 { 5443 uchar_t *page_list = NULL; 5444 uchar_t page_length = 0xff; /* Use max possible length */ 5445 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5446 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5447 int rval = 0; 5448 int counter; 5449 struct sd_lun *un; 5450 5451 ASSERT(ssc != NULL); 5452 un = ssc->ssc_un; 5453 ASSERT(un != NULL); 5454 ASSERT(mutex_owned(SD_MUTEX(un))); 5455 5456 mutex_exit(SD_MUTEX(un)); 5457 5458 /* 5459 * We'll set the page length to the maximum to save figuring it out 5460 * with an additional call. 5461 */ 5462 page_list = kmem_zalloc(page_length, KM_SLEEP); 5463 5464 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5465 page_code, NULL); 5466 5467 if (rval != 0) 5468 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5469 5470 mutex_enter(SD_MUTEX(un)); 5471 5472 /* 5473 * Now we must validate that the device accepted the command, as some 5474 * drives do not support it. If the drive does support it, we will 5475 * return 0, and the supported pages will be in un_vpd_page_mask. If 5476 * not, we return -1. 5477 */ 5478 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5479 /* Loop to find one of the 2 pages we need */ 5480 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5481 5482 /* 5483 * Pages are returned in ascending order, and 0x83 is what we 5484 * are hoping for. 5485 */ 5486 while ((page_list[counter] <= 0x86) && 5487 (counter <= (page_list[VPD_PAGE_LENGTH] + 5488 VPD_HEAD_OFFSET))) { 5489 /* 5490 * Add 3 because page_list[3] is the number of 5491 * pages minus 3 5492 */ 5493 5494 switch (page_list[counter]) { 5495 case 0x00: 5496 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5497 break; 5498 case 0x80: 5499 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5500 break; 5501 case 0x81: 5502 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5503 break; 5504 case 0x82: 5505 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5506 break; 5507 case 0x83: 5508 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5509 break; 5510 case 0x86: 5511 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5512 break; 5513 } 5514 counter++; 5515 } 5516 5517 } else { 5518 rval = -1; 5519 5520 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5521 "sd_check_vpd_page_support: This drive does not implement " 5522 "VPD pages.\n"); 5523 } 5524 5525 kmem_free(page_list, page_length); 5526 5527 return (rval); 5528 } 5529 5530 5531 /* 5532 * Function: sd_setup_pm 5533 * 5534 * Description: Initialize Power Management on the device 5535 * 5536 * Context: Kernel Thread 5537 */ 5538 5539 static void 5540 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5541 { 5542 uint_t log_page_size; 5543 uchar_t *log_page_data; 5544 int rval = 0; 5545 struct sd_lun *un; 5546 5547 ASSERT(ssc != NULL); 5548 un = ssc->ssc_un; 5549 ASSERT(un != NULL); 5550 5551 /* 5552 * Since we are called from attach, holding a mutex for 5553 * un is unnecessary. Because some of the routines called 5554 * from here require SD_MUTEX to not be held, assert this 5555 * right up front. 5556 */ 5557 ASSERT(!mutex_owned(SD_MUTEX(un))); 5558 /* 5559 * Since the sd device does not have the 'reg' property, 5560 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5561 * The following code is to tell cpr that this device 5562 * DOES need to be suspended and resumed. 5563 */ 5564 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5565 "pm-hardware-state", "needs-suspend-resume"); 5566 5567 /* 5568 * This complies with the new power management framework 5569 * for certain desktop machines. Create the pm_components 5570 * property as a string array property. 5571 */ 5572 if (un->un_f_pm_supported) { 5573 /* 5574 * not all devices have a motor, try it first. 5575 * some devices may return ILLEGAL REQUEST, some 5576 * will hang 5577 * The following START_STOP_UNIT is used to check if target 5578 * device has a motor. 5579 */ 5580 un->un_f_start_stop_supported = TRUE; 5581 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5582 SD_PATH_DIRECT); 5583 5584 if (rval != 0) { 5585 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5586 un->un_f_start_stop_supported = FALSE; 5587 } 5588 5589 /* 5590 * create pm properties anyways otherwise the parent can't 5591 * go to sleep 5592 */ 5593 (void) sd_create_pm_components(devi, un); 5594 un->un_f_pm_is_enabled = TRUE; 5595 return; 5596 } 5597 5598 if (!un->un_f_log_sense_supported) { 5599 un->un_power_level = SD_SPINDLE_ON; 5600 un->un_f_pm_is_enabled = FALSE; 5601 return; 5602 } 5603 5604 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5605 5606 #ifdef SDDEBUG 5607 if (sd_force_pm_supported) { 5608 /* Force a successful result */ 5609 rval = 1; 5610 } 5611 #endif 5612 5613 /* 5614 * If the start-stop cycle counter log page is not supported 5615 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5616 * then we should not create the pm_components property. 5617 */ 5618 if (rval == -1) { 5619 /* 5620 * Error. 5621 * Reading log sense failed, most likely this is 5622 * an older drive that does not support log sense. 5623 * If this fails auto-pm is not supported. 5624 */ 5625 un->un_power_level = SD_SPINDLE_ON; 5626 un->un_f_pm_is_enabled = FALSE; 5627 5628 } else if (rval == 0) { 5629 /* 5630 * Page not found. 5631 * The start stop cycle counter is implemented as page 5632 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5633 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5634 */ 5635 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5636 /* 5637 * Page found, use this one. 5638 */ 5639 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5640 un->un_f_pm_is_enabled = TRUE; 5641 } else { 5642 /* 5643 * Error or page not found. 5644 * auto-pm is not supported for this device. 5645 */ 5646 un->un_power_level = SD_SPINDLE_ON; 5647 un->un_f_pm_is_enabled = FALSE; 5648 } 5649 } else { 5650 /* 5651 * Page found, use it. 5652 */ 5653 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5654 un->un_f_pm_is_enabled = TRUE; 5655 } 5656 5657 5658 if (un->un_f_pm_is_enabled == TRUE) { 5659 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5660 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5661 5662 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5663 log_page_size, un->un_start_stop_cycle_page, 5664 0x01, 0, SD_PATH_DIRECT); 5665 5666 if (rval != 0) { 5667 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5668 } 5669 5670 #ifdef SDDEBUG 5671 if (sd_force_pm_supported) { 5672 /* Force a successful result */ 5673 rval = 0; 5674 } 5675 #endif 5676 5677 /* 5678 * If the Log sense for Page( Start/stop cycle counter page) 5679 * succeeds, then power management is supported and we can 5680 * enable auto-pm. 5681 */ 5682 if (rval == 0) { 5683 (void) sd_create_pm_components(devi, un); 5684 } else { 5685 un->un_power_level = SD_SPINDLE_ON; 5686 un->un_f_pm_is_enabled = FALSE; 5687 } 5688 5689 kmem_free(log_page_data, log_page_size); 5690 } 5691 } 5692 5693 5694 /* 5695 * Function: sd_create_pm_components 5696 * 5697 * Description: Initialize PM property. 5698 * 5699 * Context: Kernel thread context 5700 */ 5701 5702 static void 5703 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5704 { 5705 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5706 5707 ASSERT(!mutex_owned(SD_MUTEX(un))); 5708 5709 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5710 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5711 /* 5712 * When components are initially created they are idle, 5713 * power up any non-removables. 5714 * Note: the return value of pm_raise_power can't be used 5715 * for determining if PM should be enabled for this device. 5716 * Even if you check the return values and remove this 5717 * property created above, the PM framework will not honor the 5718 * change after the first call to pm_raise_power. Hence, 5719 * removal of that property does not help if pm_raise_power 5720 * fails. In the case of removable media, the start/stop 5721 * will fail if the media is not present. 5722 */ 5723 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5724 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5725 mutex_enter(SD_MUTEX(un)); 5726 un->un_power_level = SD_SPINDLE_ON; 5727 mutex_enter(&un->un_pm_mutex); 5728 /* Set to on and not busy. */ 5729 un->un_pm_count = 0; 5730 } else { 5731 mutex_enter(SD_MUTEX(un)); 5732 un->un_power_level = SD_SPINDLE_OFF; 5733 mutex_enter(&un->un_pm_mutex); 5734 /* Set to off. */ 5735 un->un_pm_count = -1; 5736 } 5737 mutex_exit(&un->un_pm_mutex); 5738 mutex_exit(SD_MUTEX(un)); 5739 } else { 5740 un->un_power_level = SD_SPINDLE_ON; 5741 un->un_f_pm_is_enabled = FALSE; 5742 } 5743 } 5744 5745 5746 /* 5747 * Function: sd_ddi_suspend 5748 * 5749 * Description: Performs system power-down operations. This includes 5750 * setting the drive state to indicate its suspended so 5751 * that no new commands will be accepted. Also, wait for 5752 * all commands that are in transport or queued to a timer 5753 * for retry to complete. All timeout threads are cancelled. 5754 * 5755 * Return Code: DDI_FAILURE or DDI_SUCCESS 5756 * 5757 * Context: Kernel thread context 5758 */ 5759 5760 static int 5761 sd_ddi_suspend(dev_info_t *devi) 5762 { 5763 struct sd_lun *un; 5764 clock_t wait_cmds_complete; 5765 5766 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5767 if (un == NULL) { 5768 return (DDI_FAILURE); 5769 } 5770 5771 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5772 5773 mutex_enter(SD_MUTEX(un)); 5774 5775 /* Return success if the device is already suspended. */ 5776 if (un->un_state == SD_STATE_SUSPENDED) { 5777 mutex_exit(SD_MUTEX(un)); 5778 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5779 "device already suspended, exiting\n"); 5780 return (DDI_SUCCESS); 5781 } 5782 5783 /* Return failure if the device is being used by HA */ 5784 if (un->un_resvd_status & 5785 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5786 mutex_exit(SD_MUTEX(un)); 5787 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5788 "device in use by HA, exiting\n"); 5789 return (DDI_FAILURE); 5790 } 5791 5792 /* 5793 * Return failure if the device is in a resource wait 5794 * or power changing state. 5795 */ 5796 if ((un->un_state == SD_STATE_RWAIT) || 5797 (un->un_state == SD_STATE_PM_CHANGING)) { 5798 mutex_exit(SD_MUTEX(un)); 5799 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5800 "device in resource wait state, exiting\n"); 5801 return (DDI_FAILURE); 5802 } 5803 5804 5805 un->un_save_state = un->un_last_state; 5806 New_state(un, SD_STATE_SUSPENDED); 5807 5808 /* 5809 * Wait for all commands that are in transport or queued to a timer 5810 * for retry to complete. 5811 * 5812 * While waiting, no new commands will be accepted or sent because of 5813 * the new state we set above. 5814 * 5815 * Wait till current operation has completed. If we are in the resource 5816 * wait state (with an intr outstanding) then we need to wait till the 5817 * intr completes and starts the next cmd. We want to wait for 5818 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5819 */ 5820 wait_cmds_complete = ddi_get_lbolt() + 5821 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5822 5823 while (un->un_ncmds_in_transport != 0) { 5824 /* 5825 * Fail if commands do not finish in the specified time. 5826 */ 5827 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5828 wait_cmds_complete) == -1) { 5829 /* 5830 * Undo the state changes made above. Everything 5831 * must go back to it's original value. 5832 */ 5833 Restore_state(un); 5834 un->un_last_state = un->un_save_state; 5835 /* Wake up any threads that might be waiting. */ 5836 cv_broadcast(&un->un_suspend_cv); 5837 mutex_exit(SD_MUTEX(un)); 5838 SD_ERROR(SD_LOG_IO_PM, un, 5839 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5840 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5841 return (DDI_FAILURE); 5842 } 5843 } 5844 5845 /* 5846 * Cancel SCSI watch thread and timeouts, if any are active 5847 */ 5848 5849 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5850 opaque_t temp_token = un->un_swr_token; 5851 mutex_exit(SD_MUTEX(un)); 5852 scsi_watch_suspend(temp_token); 5853 mutex_enter(SD_MUTEX(un)); 5854 } 5855 5856 if (un->un_reset_throttle_timeid != NULL) { 5857 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5858 un->un_reset_throttle_timeid = NULL; 5859 mutex_exit(SD_MUTEX(un)); 5860 (void) untimeout(temp_id); 5861 mutex_enter(SD_MUTEX(un)); 5862 } 5863 5864 if (un->un_dcvb_timeid != NULL) { 5865 timeout_id_t temp_id = un->un_dcvb_timeid; 5866 un->un_dcvb_timeid = NULL; 5867 mutex_exit(SD_MUTEX(un)); 5868 (void) untimeout(temp_id); 5869 mutex_enter(SD_MUTEX(un)); 5870 } 5871 5872 mutex_enter(&un->un_pm_mutex); 5873 if (un->un_pm_timeid != NULL) { 5874 timeout_id_t temp_id = un->un_pm_timeid; 5875 un->un_pm_timeid = NULL; 5876 mutex_exit(&un->un_pm_mutex); 5877 mutex_exit(SD_MUTEX(un)); 5878 (void) untimeout(temp_id); 5879 mutex_enter(SD_MUTEX(un)); 5880 } else { 5881 mutex_exit(&un->un_pm_mutex); 5882 } 5883 5884 if (un->un_retry_timeid != NULL) { 5885 timeout_id_t temp_id = un->un_retry_timeid; 5886 un->un_retry_timeid = NULL; 5887 mutex_exit(SD_MUTEX(un)); 5888 (void) untimeout(temp_id); 5889 mutex_enter(SD_MUTEX(un)); 5890 5891 if (un->un_retry_bp != NULL) { 5892 un->un_retry_bp->av_forw = un->un_waitq_headp; 5893 un->un_waitq_headp = un->un_retry_bp; 5894 if (un->un_waitq_tailp == NULL) { 5895 un->un_waitq_tailp = un->un_retry_bp; 5896 } 5897 un->un_retry_bp = NULL; 5898 un->un_retry_statp = NULL; 5899 } 5900 } 5901 5902 if (un->un_direct_priority_timeid != NULL) { 5903 timeout_id_t temp_id = un->un_direct_priority_timeid; 5904 un->un_direct_priority_timeid = NULL; 5905 mutex_exit(SD_MUTEX(un)); 5906 (void) untimeout(temp_id); 5907 mutex_enter(SD_MUTEX(un)); 5908 } 5909 5910 if (un->un_f_is_fibre == TRUE) { 5911 /* 5912 * Remove callbacks for insert and remove events 5913 */ 5914 if (un->un_insert_event != NULL) { 5915 mutex_exit(SD_MUTEX(un)); 5916 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5917 mutex_enter(SD_MUTEX(un)); 5918 un->un_insert_event = NULL; 5919 } 5920 5921 if (un->un_remove_event != NULL) { 5922 mutex_exit(SD_MUTEX(un)); 5923 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5924 mutex_enter(SD_MUTEX(un)); 5925 un->un_remove_event = NULL; 5926 } 5927 } 5928 5929 mutex_exit(SD_MUTEX(un)); 5930 5931 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5932 5933 return (DDI_SUCCESS); 5934 } 5935 5936 5937 /* 5938 * Function: sd_ddi_pm_suspend 5939 * 5940 * Description: Set the drive state to low power. 5941 * Someone else is required to actually change the drive 5942 * power level. 5943 * 5944 * Arguments: un - driver soft state (unit) structure 5945 * 5946 * Return Code: DDI_FAILURE or DDI_SUCCESS 5947 * 5948 * Context: Kernel thread context 5949 */ 5950 5951 static int 5952 sd_ddi_pm_suspend(struct sd_lun *un) 5953 { 5954 ASSERT(un != NULL); 5955 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5956 5957 ASSERT(!mutex_owned(SD_MUTEX(un))); 5958 mutex_enter(SD_MUTEX(un)); 5959 5960 /* 5961 * Exit if power management is not enabled for this device, or if 5962 * the device is being used by HA. 5963 */ 5964 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5965 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5966 mutex_exit(SD_MUTEX(un)); 5967 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5968 return (DDI_SUCCESS); 5969 } 5970 5971 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5972 un->un_ncmds_in_driver); 5973 5974 /* 5975 * See if the device is not busy, ie.: 5976 * - we have no commands in the driver for this device 5977 * - not waiting for resources 5978 */ 5979 if ((un->un_ncmds_in_driver == 0) && 5980 (un->un_state != SD_STATE_RWAIT)) { 5981 /* 5982 * The device is not busy, so it is OK to go to low power state. 5983 * Indicate low power, but rely on someone else to actually 5984 * change it. 5985 */ 5986 mutex_enter(&un->un_pm_mutex); 5987 un->un_pm_count = -1; 5988 mutex_exit(&un->un_pm_mutex); 5989 un->un_power_level = SD_SPINDLE_OFF; 5990 } 5991 5992 mutex_exit(SD_MUTEX(un)); 5993 5994 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5995 5996 return (DDI_SUCCESS); 5997 } 5998 5999 6000 /* 6001 * Function: sd_ddi_resume 6002 * 6003 * Description: Performs system power-up operations.. 6004 * 6005 * Return Code: DDI_SUCCESS 6006 * DDI_FAILURE 6007 * 6008 * Context: Kernel thread context 6009 */ 6010 6011 static int 6012 sd_ddi_resume(dev_info_t *devi) 6013 { 6014 struct sd_lun *un; 6015 6016 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6017 if (un == NULL) { 6018 return (DDI_FAILURE); 6019 } 6020 6021 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6022 6023 mutex_enter(SD_MUTEX(un)); 6024 Restore_state(un); 6025 6026 /* 6027 * Restore the state which was saved to give the 6028 * the right state in un_last_state 6029 */ 6030 un->un_last_state = un->un_save_state; 6031 /* 6032 * Note: throttle comes back at full. 6033 * Also note: this MUST be done before calling pm_raise_power 6034 * otherwise the system can get hung in biowait. The scenario where 6035 * this'll happen is under cpr suspend. Writing of the system 6036 * state goes through sddump, which writes 0 to un_throttle. If 6037 * writing the system state then fails, example if the partition is 6038 * too small, then cpr attempts a resume. If throttle isn't restored 6039 * from the saved value until after calling pm_raise_power then 6040 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6041 * in biowait. 6042 */ 6043 un->un_throttle = un->un_saved_throttle; 6044 6045 /* 6046 * The chance of failure is very rare as the only command done in power 6047 * entry point is START command when you transition from 0->1 or 6048 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6049 * which suspend was done. Ignore the return value as the resume should 6050 * not be failed. In the case of removable media the media need not be 6051 * inserted and hence there is a chance that raise power will fail with 6052 * media not present. 6053 */ 6054 if (un->un_f_attach_spinup) { 6055 mutex_exit(SD_MUTEX(un)); 6056 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6057 mutex_enter(SD_MUTEX(un)); 6058 } 6059 6060 /* 6061 * Don't broadcast to the suspend cv and therefore possibly 6062 * start I/O until after power has been restored. 6063 */ 6064 cv_broadcast(&un->un_suspend_cv); 6065 cv_broadcast(&un->un_state_cv); 6066 6067 /* restart thread */ 6068 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6069 scsi_watch_resume(un->un_swr_token); 6070 } 6071 6072 #if (defined(__fibre)) 6073 if (un->un_f_is_fibre == TRUE) { 6074 /* 6075 * Add callbacks for insert and remove events 6076 */ 6077 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6078 sd_init_event_callbacks(un); 6079 } 6080 } 6081 #endif 6082 6083 /* 6084 * Transport any pending commands to the target. 6085 * 6086 * If this is a low-activity device commands in queue will have to wait 6087 * until new commands come in, which may take awhile. Also, we 6088 * specifically don't check un_ncmds_in_transport because we know that 6089 * there really are no commands in progress after the unit was 6090 * suspended and we could have reached the throttle level, been 6091 * suspended, and have no new commands coming in for awhile. Highly 6092 * unlikely, but so is the low-activity disk scenario. 6093 */ 6094 ddi_xbuf_dispatch(un->un_xbuf_attr); 6095 6096 sd_start_cmds(un, NULL); 6097 mutex_exit(SD_MUTEX(un)); 6098 6099 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6100 6101 return (DDI_SUCCESS); 6102 } 6103 6104 6105 /* 6106 * Function: sd_ddi_pm_resume 6107 * 6108 * Description: Set the drive state to powered on. 6109 * Someone else is required to actually change the drive 6110 * power level. 6111 * 6112 * Arguments: un - driver soft state (unit) structure 6113 * 6114 * Return Code: DDI_SUCCESS 6115 * 6116 * Context: Kernel thread context 6117 */ 6118 6119 static int 6120 sd_ddi_pm_resume(struct sd_lun *un) 6121 { 6122 ASSERT(un != NULL); 6123 6124 ASSERT(!mutex_owned(SD_MUTEX(un))); 6125 mutex_enter(SD_MUTEX(un)); 6126 un->un_power_level = SD_SPINDLE_ON; 6127 6128 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6129 mutex_enter(&un->un_pm_mutex); 6130 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6131 un->un_pm_count++; 6132 ASSERT(un->un_pm_count == 0); 6133 /* 6134 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6135 * un_suspend_cv is for a system resume, not a power management 6136 * device resume. (4297749) 6137 * cv_broadcast(&un->un_suspend_cv); 6138 */ 6139 } 6140 mutex_exit(&un->un_pm_mutex); 6141 mutex_exit(SD_MUTEX(un)); 6142 6143 return (DDI_SUCCESS); 6144 } 6145 6146 6147 /* 6148 * Function: sd_pm_idletimeout_handler 6149 * 6150 * Description: A timer routine that's active only while a device is busy. 6151 * The purpose is to extend slightly the pm framework's busy 6152 * view of the device to prevent busy/idle thrashing for 6153 * back-to-back commands. Do this by comparing the current time 6154 * to the time at which the last command completed and when the 6155 * difference is greater than sd_pm_idletime, call 6156 * pm_idle_component. In addition to indicating idle to the pm 6157 * framework, update the chain type to again use the internal pm 6158 * layers of the driver. 6159 * 6160 * Arguments: arg - driver soft state (unit) structure 6161 * 6162 * Context: Executes in a timeout(9F) thread context 6163 */ 6164 6165 static void 6166 sd_pm_idletimeout_handler(void *arg) 6167 { 6168 struct sd_lun *un = arg; 6169 6170 time_t now; 6171 6172 mutex_enter(&sd_detach_mutex); 6173 if (un->un_detach_count != 0) { 6174 /* Abort if the instance is detaching */ 6175 mutex_exit(&sd_detach_mutex); 6176 return; 6177 } 6178 mutex_exit(&sd_detach_mutex); 6179 6180 now = ddi_get_time(); 6181 /* 6182 * Grab both mutexes, in the proper order, since we're accessing 6183 * both PM and softstate variables. 6184 */ 6185 mutex_enter(SD_MUTEX(un)); 6186 mutex_enter(&un->un_pm_mutex); 6187 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6188 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6189 /* 6190 * Update the chain types. 6191 * This takes affect on the next new command received. 6192 */ 6193 if (un->un_f_non_devbsize_supported) { 6194 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6195 } else { 6196 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6197 } 6198 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6199 6200 SD_TRACE(SD_LOG_IO_PM, un, 6201 "sd_pm_idletimeout_handler: idling device\n"); 6202 (void) pm_idle_component(SD_DEVINFO(un), 0); 6203 un->un_pm_idle_timeid = NULL; 6204 } else { 6205 un->un_pm_idle_timeid = 6206 timeout(sd_pm_idletimeout_handler, un, 6207 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6208 } 6209 mutex_exit(&un->un_pm_mutex); 6210 mutex_exit(SD_MUTEX(un)); 6211 } 6212 6213 6214 /* 6215 * Function: sd_pm_timeout_handler 6216 * 6217 * Description: Callback to tell framework we are idle. 6218 * 6219 * Context: timeout(9f) thread context. 6220 */ 6221 6222 static void 6223 sd_pm_timeout_handler(void *arg) 6224 { 6225 struct sd_lun *un = arg; 6226 6227 (void) pm_idle_component(SD_DEVINFO(un), 0); 6228 mutex_enter(&un->un_pm_mutex); 6229 un->un_pm_timeid = NULL; 6230 mutex_exit(&un->un_pm_mutex); 6231 } 6232 6233 6234 /* 6235 * Function: sdpower 6236 * 6237 * Description: PM entry point. 6238 * 6239 * Return Code: DDI_SUCCESS 6240 * DDI_FAILURE 6241 * 6242 * Context: Kernel thread context 6243 */ 6244 6245 static int 6246 sdpower(dev_info_t *devi, int component, int level) 6247 { 6248 struct sd_lun *un; 6249 int instance; 6250 int rval = DDI_SUCCESS; 6251 uint_t i, log_page_size, maxcycles, ncycles; 6252 uchar_t *log_page_data; 6253 int log_sense_page; 6254 int medium_present; 6255 time_t intvlp; 6256 dev_t dev; 6257 struct pm_trans_data sd_pm_tran_data; 6258 uchar_t save_state; 6259 int sval; 6260 uchar_t state_before_pm; 6261 int got_semaphore_here; 6262 sd_ssc_t *ssc; 6263 6264 instance = ddi_get_instance(devi); 6265 6266 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6267 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6268 component != 0) { 6269 return (DDI_FAILURE); 6270 } 6271 6272 dev = sd_make_device(SD_DEVINFO(un)); 6273 ssc = sd_ssc_init(un); 6274 6275 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6276 6277 /* 6278 * Must synchronize power down with close. 6279 * Attempt to decrement/acquire the open/close semaphore, 6280 * but do NOT wait on it. If it's not greater than zero, 6281 * ie. it can't be decremented without waiting, then 6282 * someone else, either open or close, already has it 6283 * and the try returns 0. Use that knowledge here to determine 6284 * if it's OK to change the device power level. 6285 * Also, only increment it on exit if it was decremented, ie. gotten, 6286 * here. 6287 */ 6288 got_semaphore_here = sema_tryp(&un->un_semoclose); 6289 6290 mutex_enter(SD_MUTEX(un)); 6291 6292 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6293 un->un_ncmds_in_driver); 6294 6295 /* 6296 * If un_ncmds_in_driver is non-zero it indicates commands are 6297 * already being processed in the driver, or if the semaphore was 6298 * not gotten here it indicates an open or close is being processed. 6299 * At the same time somebody is requesting to go low power which 6300 * can't happen, therefore we need to return failure. 6301 */ 6302 if ((level == SD_SPINDLE_OFF) && 6303 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6304 mutex_exit(SD_MUTEX(un)); 6305 6306 if (got_semaphore_here != 0) { 6307 sema_v(&un->un_semoclose); 6308 } 6309 SD_TRACE(SD_LOG_IO_PM, un, 6310 "sdpower: exit, device has queued cmds.\n"); 6311 6312 goto sdpower_failed; 6313 } 6314 6315 /* 6316 * if it is OFFLINE that means the disk is completely dead 6317 * in our case we have to put the disk in on or off by sending commands 6318 * Of course that will fail anyway so return back here. 6319 * 6320 * Power changes to a device that's OFFLINE or SUSPENDED 6321 * are not allowed. 6322 */ 6323 if ((un->un_state == SD_STATE_OFFLINE) || 6324 (un->un_state == SD_STATE_SUSPENDED)) { 6325 mutex_exit(SD_MUTEX(un)); 6326 6327 if (got_semaphore_here != 0) { 6328 sema_v(&un->un_semoclose); 6329 } 6330 SD_TRACE(SD_LOG_IO_PM, un, 6331 "sdpower: exit, device is off-line.\n"); 6332 6333 goto sdpower_failed; 6334 } 6335 6336 /* 6337 * Change the device's state to indicate it's power level 6338 * is being changed. Do this to prevent a power off in the 6339 * middle of commands, which is especially bad on devices 6340 * that are really powered off instead of just spun down. 6341 */ 6342 state_before_pm = un->un_state; 6343 un->un_state = SD_STATE_PM_CHANGING; 6344 6345 mutex_exit(SD_MUTEX(un)); 6346 6347 /* 6348 * If "pm-capable" property is set to TRUE by HBA drivers, 6349 * bypass the following checking, otherwise, check the log 6350 * sense information for this device 6351 */ 6352 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6353 /* 6354 * Get the log sense information to understand whether the 6355 * the powercycle counts have gone beyond the threshhold. 6356 */ 6357 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6358 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6359 6360 mutex_enter(SD_MUTEX(un)); 6361 log_sense_page = un->un_start_stop_cycle_page; 6362 mutex_exit(SD_MUTEX(un)); 6363 6364 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6365 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6366 6367 if (rval != 0) { 6368 if (rval == EIO) 6369 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6370 else 6371 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6372 } 6373 6374 #ifdef SDDEBUG 6375 if (sd_force_pm_supported) { 6376 /* Force a successful result */ 6377 rval = 0; 6378 } 6379 #endif 6380 if (rval != 0) { 6381 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6382 "Log Sense Failed\n"); 6383 6384 kmem_free(log_page_data, log_page_size); 6385 /* Cannot support power management on those drives */ 6386 6387 if (got_semaphore_here != 0) { 6388 sema_v(&un->un_semoclose); 6389 } 6390 /* 6391 * On exit put the state back to it's original value 6392 * and broadcast to anyone waiting for the power 6393 * change completion. 6394 */ 6395 mutex_enter(SD_MUTEX(un)); 6396 un->un_state = state_before_pm; 6397 cv_broadcast(&un->un_suspend_cv); 6398 mutex_exit(SD_MUTEX(un)); 6399 SD_TRACE(SD_LOG_IO_PM, un, 6400 "sdpower: exit, Log Sense Failed.\n"); 6401 6402 goto sdpower_failed; 6403 } 6404 6405 /* 6406 * From the page data - Convert the essential information to 6407 * pm_trans_data 6408 */ 6409 maxcycles = 6410 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6411 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6412 6413 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6414 6415 ncycles = 6416 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6417 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6418 6419 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6420 6421 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6422 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6423 log_page_data[8+i]; 6424 } 6425 6426 kmem_free(log_page_data, log_page_size); 6427 6428 /* 6429 * Call pm_trans_check routine to get the Ok from 6430 * the global policy 6431 */ 6432 6433 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6434 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6435 6436 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6437 #ifdef SDDEBUG 6438 if (sd_force_pm_supported) { 6439 /* Force a successful result */ 6440 rval = 1; 6441 } 6442 #endif 6443 switch (rval) { 6444 case 0: 6445 /* 6446 * Not Ok to Power cycle or error in parameters passed 6447 * Would have given the advised time to consider power 6448 * cycle. Based on the new intvlp parameter we are 6449 * supposed to pretend we are busy so that pm framework 6450 * will never call our power entry point. Because of 6451 * that install a timeout handler and wait for the 6452 * recommended time to elapse so that power management 6453 * can be effective again. 6454 * 6455 * To effect this behavior, call pm_busy_component to 6456 * indicate to the framework this device is busy. 6457 * By not adjusting un_pm_count the rest of PM in 6458 * the driver will function normally, and independent 6459 * of this but because the framework is told the device 6460 * is busy it won't attempt powering down until it gets 6461 * a matching idle. The timeout handler sends this. 6462 * Note: sd_pm_entry can't be called here to do this 6463 * because sdpower may have been called as a result 6464 * of a call to pm_raise_power from within sd_pm_entry. 6465 * 6466 * If a timeout handler is already active then 6467 * don't install another. 6468 */ 6469 mutex_enter(&un->un_pm_mutex); 6470 if (un->un_pm_timeid == NULL) { 6471 un->un_pm_timeid = 6472 timeout(sd_pm_timeout_handler, 6473 un, intvlp * drv_usectohz(1000000)); 6474 mutex_exit(&un->un_pm_mutex); 6475 (void) pm_busy_component(SD_DEVINFO(un), 0); 6476 } else { 6477 mutex_exit(&un->un_pm_mutex); 6478 } 6479 if (got_semaphore_here != 0) { 6480 sema_v(&un->un_semoclose); 6481 } 6482 /* 6483 * On exit put the state back to it's original value 6484 * and broadcast to anyone waiting for the power 6485 * change completion. 6486 */ 6487 mutex_enter(SD_MUTEX(un)); 6488 un->un_state = state_before_pm; 6489 cv_broadcast(&un->un_suspend_cv); 6490 mutex_exit(SD_MUTEX(un)); 6491 6492 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6493 "trans check Failed, not ok to power cycle.\n"); 6494 6495 goto sdpower_failed; 6496 case -1: 6497 if (got_semaphore_here != 0) { 6498 sema_v(&un->un_semoclose); 6499 } 6500 /* 6501 * On exit put the state back to it's original value 6502 * and broadcast to anyone waiting for the power 6503 * change completion. 6504 */ 6505 mutex_enter(SD_MUTEX(un)); 6506 un->un_state = state_before_pm; 6507 cv_broadcast(&un->un_suspend_cv); 6508 mutex_exit(SD_MUTEX(un)); 6509 SD_TRACE(SD_LOG_IO_PM, un, 6510 "sdpower: exit, trans check command Failed.\n"); 6511 6512 goto sdpower_failed; 6513 } 6514 } 6515 6516 if (level == SD_SPINDLE_OFF) { 6517 /* 6518 * Save the last state... if the STOP FAILS we need it 6519 * for restoring 6520 */ 6521 mutex_enter(SD_MUTEX(un)); 6522 save_state = un->un_last_state; 6523 /* 6524 * There must not be any cmds. getting processed 6525 * in the driver when we get here. Power to the 6526 * device is potentially going off. 6527 */ 6528 ASSERT(un->un_ncmds_in_driver == 0); 6529 mutex_exit(SD_MUTEX(un)); 6530 6531 /* 6532 * For now suspend the device completely before spindle is 6533 * turned off 6534 */ 6535 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6536 if (got_semaphore_here != 0) { 6537 sema_v(&un->un_semoclose); 6538 } 6539 /* 6540 * On exit put the state back to it's original value 6541 * and broadcast to anyone waiting for the power 6542 * change completion. 6543 */ 6544 mutex_enter(SD_MUTEX(un)); 6545 un->un_state = state_before_pm; 6546 cv_broadcast(&un->un_suspend_cv); 6547 mutex_exit(SD_MUTEX(un)); 6548 SD_TRACE(SD_LOG_IO_PM, un, 6549 "sdpower: exit, PM suspend Failed.\n"); 6550 6551 goto sdpower_failed; 6552 } 6553 } 6554 6555 /* 6556 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6557 * close, or strategy. Dump no long uses this routine, it uses it's 6558 * own code so it can be done in polled mode. 6559 */ 6560 6561 medium_present = TRUE; 6562 6563 /* 6564 * When powering up, issue a TUR in case the device is at unit 6565 * attention. Don't do retries. Bypass the PM layer, otherwise 6566 * a deadlock on un_pm_busy_cv will occur. 6567 */ 6568 if (level == SD_SPINDLE_ON) { 6569 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6570 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6571 if (sval != 0) 6572 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6573 } 6574 6575 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6576 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6577 6578 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6579 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6580 SD_PATH_DIRECT); 6581 if (sval != 0) { 6582 if (sval == EIO) 6583 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6584 else 6585 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6586 } 6587 6588 /* Command failed, check for media present. */ 6589 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6590 medium_present = FALSE; 6591 } 6592 6593 /* 6594 * The conditions of interest here are: 6595 * if a spindle off with media present fails, 6596 * then restore the state and return an error. 6597 * else if a spindle on fails, 6598 * then return an error (there's no state to restore). 6599 * In all other cases we setup for the new state 6600 * and return success. 6601 */ 6602 switch (level) { 6603 case SD_SPINDLE_OFF: 6604 if ((medium_present == TRUE) && (sval != 0)) { 6605 /* The stop command from above failed */ 6606 rval = DDI_FAILURE; 6607 /* 6608 * The stop command failed, and we have media 6609 * present. Put the level back by calling the 6610 * sd_pm_resume() and set the state back to 6611 * it's previous value. 6612 */ 6613 (void) sd_ddi_pm_resume(un); 6614 mutex_enter(SD_MUTEX(un)); 6615 un->un_last_state = save_state; 6616 mutex_exit(SD_MUTEX(un)); 6617 break; 6618 } 6619 /* 6620 * The stop command from above succeeded. 6621 */ 6622 if (un->un_f_monitor_media_state) { 6623 /* 6624 * Terminate watch thread in case of removable media 6625 * devices going into low power state. This is as per 6626 * the requirements of pm framework, otherwise commands 6627 * will be generated for the device (through watch 6628 * thread), even when the device is in low power state. 6629 */ 6630 mutex_enter(SD_MUTEX(un)); 6631 un->un_f_watcht_stopped = FALSE; 6632 if (un->un_swr_token != NULL) { 6633 opaque_t temp_token = un->un_swr_token; 6634 un->un_f_watcht_stopped = TRUE; 6635 un->un_swr_token = NULL; 6636 mutex_exit(SD_MUTEX(un)); 6637 (void) scsi_watch_request_terminate(temp_token, 6638 SCSI_WATCH_TERMINATE_ALL_WAIT); 6639 } else { 6640 mutex_exit(SD_MUTEX(un)); 6641 } 6642 } 6643 break; 6644 6645 default: /* The level requested is spindle on... */ 6646 /* 6647 * Legacy behavior: return success on a failed spinup 6648 * if there is no media in the drive. 6649 * Do this by looking at medium_present here. 6650 */ 6651 if ((sval != 0) && medium_present) { 6652 /* The start command from above failed */ 6653 rval = DDI_FAILURE; 6654 break; 6655 } 6656 /* 6657 * The start command from above succeeded 6658 * Resume the devices now that we have 6659 * started the disks 6660 */ 6661 (void) sd_ddi_pm_resume(un); 6662 6663 /* 6664 * Resume the watch thread since it was suspended 6665 * when the device went into low power mode. 6666 */ 6667 if (un->un_f_monitor_media_state) { 6668 mutex_enter(SD_MUTEX(un)); 6669 if (un->un_f_watcht_stopped == TRUE) { 6670 opaque_t temp_token; 6671 6672 un->un_f_watcht_stopped = FALSE; 6673 mutex_exit(SD_MUTEX(un)); 6674 temp_token = scsi_watch_request_submit( 6675 SD_SCSI_DEVP(un), 6676 sd_check_media_time, 6677 SENSE_LENGTH, sd_media_watch_cb, 6678 (caddr_t)dev); 6679 mutex_enter(SD_MUTEX(un)); 6680 un->un_swr_token = temp_token; 6681 } 6682 mutex_exit(SD_MUTEX(un)); 6683 } 6684 } 6685 if (got_semaphore_here != 0) { 6686 sema_v(&un->un_semoclose); 6687 } 6688 /* 6689 * On exit put the state back to it's original value 6690 * and broadcast to anyone waiting for the power 6691 * change completion. 6692 */ 6693 mutex_enter(SD_MUTEX(un)); 6694 un->un_state = state_before_pm; 6695 cv_broadcast(&un->un_suspend_cv); 6696 mutex_exit(SD_MUTEX(un)); 6697 6698 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6699 6700 sd_ssc_fini(ssc); 6701 return (rval); 6702 6703 sdpower_failed: 6704 6705 sd_ssc_fini(ssc); 6706 return (DDI_FAILURE); 6707 } 6708 6709 6710 6711 /* 6712 * Function: sdattach 6713 * 6714 * Description: Driver's attach(9e) entry point function. 6715 * 6716 * Arguments: devi - opaque device info handle 6717 * cmd - attach type 6718 * 6719 * Return Code: DDI_SUCCESS 6720 * DDI_FAILURE 6721 * 6722 * Context: Kernel thread context 6723 */ 6724 6725 static int 6726 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6727 { 6728 switch (cmd) { 6729 case DDI_ATTACH: 6730 return (sd_unit_attach(devi)); 6731 case DDI_RESUME: 6732 return (sd_ddi_resume(devi)); 6733 default: 6734 break; 6735 } 6736 return (DDI_FAILURE); 6737 } 6738 6739 6740 /* 6741 * Function: sddetach 6742 * 6743 * Description: Driver's detach(9E) entry point function. 6744 * 6745 * Arguments: devi - opaque device info handle 6746 * cmd - detach type 6747 * 6748 * Return Code: DDI_SUCCESS 6749 * DDI_FAILURE 6750 * 6751 * Context: Kernel thread context 6752 */ 6753 6754 static int 6755 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6756 { 6757 switch (cmd) { 6758 case DDI_DETACH: 6759 return (sd_unit_detach(devi)); 6760 case DDI_SUSPEND: 6761 return (sd_ddi_suspend(devi)); 6762 default: 6763 break; 6764 } 6765 return (DDI_FAILURE); 6766 } 6767 6768 6769 /* 6770 * Function: sd_sync_with_callback 6771 * 6772 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6773 * state while the callback routine is active. 6774 * 6775 * Arguments: un: softstate structure for the instance 6776 * 6777 * Context: Kernel thread context 6778 */ 6779 6780 static void 6781 sd_sync_with_callback(struct sd_lun *un) 6782 { 6783 ASSERT(un != NULL); 6784 6785 mutex_enter(SD_MUTEX(un)); 6786 6787 ASSERT(un->un_in_callback >= 0); 6788 6789 while (un->un_in_callback > 0) { 6790 mutex_exit(SD_MUTEX(un)); 6791 delay(2); 6792 mutex_enter(SD_MUTEX(un)); 6793 } 6794 6795 mutex_exit(SD_MUTEX(un)); 6796 } 6797 6798 /* 6799 * Function: sd_unit_attach 6800 * 6801 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6802 * the soft state structure for the device and performs 6803 * all necessary structure and device initializations. 6804 * 6805 * Arguments: devi: the system's dev_info_t for the device. 6806 * 6807 * Return Code: DDI_SUCCESS if attach is successful. 6808 * DDI_FAILURE if any part of the attach fails. 6809 * 6810 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6811 * Kernel thread context only. Can sleep. 6812 */ 6813 6814 static int 6815 sd_unit_attach(dev_info_t *devi) 6816 { 6817 struct scsi_device *devp; 6818 struct sd_lun *un; 6819 char *variantp; 6820 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6821 int instance; 6822 int rval; 6823 int wc_enabled; 6824 int tgt; 6825 uint64_t capacity; 6826 uint_t lbasize = 0; 6827 dev_info_t *pdip = ddi_get_parent(devi); 6828 int offbyone = 0; 6829 int geom_label_valid = 0; 6830 sd_ssc_t *ssc; 6831 int status; 6832 struct sd_fm_internal *sfip = NULL; 6833 #if defined(__sparc) 6834 int max_xfer_size; 6835 #endif 6836 6837 /* 6838 * Retrieve the target driver's private data area. This was set 6839 * up by the HBA. 6840 */ 6841 devp = ddi_get_driver_private(devi); 6842 6843 /* 6844 * Retrieve the target ID of the device. 6845 */ 6846 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6847 SCSI_ADDR_PROP_TARGET, -1); 6848 6849 /* 6850 * Since we have no idea what state things were left in by the last 6851 * user of the device, set up some 'default' settings, ie. turn 'em 6852 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6853 * Do this before the scsi_probe, which sends an inquiry. 6854 * This is a fix for bug (4430280). 6855 * Of special importance is wide-xfer. The drive could have been left 6856 * in wide transfer mode by the last driver to communicate with it, 6857 * this includes us. If that's the case, and if the following is not 6858 * setup properly or we don't re-negotiate with the drive prior to 6859 * transferring data to/from the drive, it causes bus parity errors, 6860 * data overruns, and unexpected interrupts. This first occurred when 6861 * the fix for bug (4378686) was made. 6862 */ 6863 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6864 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6865 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6866 6867 /* 6868 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6869 * on a target. Setting it per lun instance actually sets the 6870 * capability of this target, which affects those luns already 6871 * attached on the same target. So during attach, we can only disable 6872 * this capability only when no other lun has been attached on this 6873 * target. By doing this, we assume a target has the same tagged-qing 6874 * capability for every lun. The condition can be removed when HBA 6875 * is changed to support per lun based tagged-qing capability. 6876 */ 6877 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6878 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6879 } 6880 6881 /* 6882 * Use scsi_probe() to issue an INQUIRY command to the device. 6883 * This call will allocate and fill in the scsi_inquiry structure 6884 * and point the sd_inq member of the scsi_device structure to it. 6885 * If the attach succeeds, then this memory will not be de-allocated 6886 * (via scsi_unprobe()) until the instance is detached. 6887 */ 6888 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6889 goto probe_failed; 6890 } 6891 6892 /* 6893 * Check the device type as specified in the inquiry data and 6894 * claim it if it is of a type that we support. 6895 */ 6896 switch (devp->sd_inq->inq_dtype) { 6897 case DTYPE_DIRECT: 6898 break; 6899 case DTYPE_RODIRECT: 6900 break; 6901 case DTYPE_OPTICAL: 6902 break; 6903 case DTYPE_NOTPRESENT: 6904 default: 6905 /* Unsupported device type; fail the attach. */ 6906 goto probe_failed; 6907 } 6908 6909 /* 6910 * Allocate the soft state structure for this unit. 6911 * 6912 * We rely upon this memory being set to all zeroes by 6913 * ddi_soft_state_zalloc(). We assume that any member of the 6914 * soft state structure that is not explicitly initialized by 6915 * this routine will have a value of zero. 6916 */ 6917 instance = ddi_get_instance(devp->sd_dev); 6918 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6919 goto probe_failed; 6920 } 6921 6922 /* 6923 * Retrieve a pointer to the newly-allocated soft state. 6924 * 6925 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6926 * was successful, unless something has gone horribly wrong and the 6927 * ddi's soft state internals are corrupt (in which case it is 6928 * probably better to halt here than just fail the attach....) 6929 */ 6930 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6931 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6932 instance); 6933 /*NOTREACHED*/ 6934 } 6935 6936 /* 6937 * Link the back ptr of the driver soft state to the scsi_device 6938 * struct for this lun. 6939 * Save a pointer to the softstate in the driver-private area of 6940 * the scsi_device struct. 6941 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6942 * we first set un->un_sd below. 6943 */ 6944 un->un_sd = devp; 6945 devp->sd_private = (opaque_t)un; 6946 6947 /* 6948 * The following must be after devp is stored in the soft state struct. 6949 */ 6950 #ifdef SDDEBUG 6951 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6952 "%s_unit_attach: un:0x%p instance:%d\n", 6953 ddi_driver_name(devi), un, instance); 6954 #endif 6955 6956 /* 6957 * Set up the device type and node type (for the minor nodes). 6958 * By default we assume that the device can at least support the 6959 * Common Command Set. Call it a CD-ROM if it reports itself 6960 * as a RODIRECT device. 6961 */ 6962 switch (devp->sd_inq->inq_dtype) { 6963 case DTYPE_RODIRECT: 6964 un->un_node_type = DDI_NT_CD_CHAN; 6965 un->un_ctype = CTYPE_CDROM; 6966 break; 6967 case DTYPE_OPTICAL: 6968 un->un_node_type = DDI_NT_BLOCK_CHAN; 6969 un->un_ctype = CTYPE_ROD; 6970 break; 6971 default: 6972 un->un_node_type = DDI_NT_BLOCK_CHAN; 6973 un->un_ctype = CTYPE_CCS; 6974 break; 6975 } 6976 6977 /* 6978 * Try to read the interconnect type from the HBA. 6979 * 6980 * Note: This driver is currently compiled as two binaries, a parallel 6981 * scsi version (sd) and a fibre channel version (ssd). All functional 6982 * differences are determined at compile time. In the future a single 6983 * binary will be provided and the interconnect type will be used to 6984 * differentiate between fibre and parallel scsi behaviors. At that time 6985 * it will be necessary for all fibre channel HBAs to support this 6986 * property. 6987 * 6988 * set un_f_is_fiber to TRUE ( default fiber ) 6989 */ 6990 un->un_f_is_fibre = TRUE; 6991 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6992 case INTERCONNECT_SSA: 6993 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6995 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6996 break; 6997 case INTERCONNECT_PARALLEL: 6998 un->un_f_is_fibre = FALSE; 6999 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7001 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7002 break; 7003 case INTERCONNECT_SATA: 7004 un->un_f_is_fibre = FALSE; 7005 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7007 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7008 break; 7009 case INTERCONNECT_FIBRE: 7010 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7011 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7012 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7013 break; 7014 case INTERCONNECT_FABRIC: 7015 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7016 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7017 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7018 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7019 break; 7020 default: 7021 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7022 /* 7023 * The HBA does not support the "interconnect-type" property 7024 * (or did not provide a recognized type). 7025 * 7026 * Note: This will be obsoleted when a single fibre channel 7027 * and parallel scsi driver is delivered. In the meantime the 7028 * interconnect type will be set to the platform default.If that 7029 * type is not parallel SCSI, it means that we should be 7030 * assuming "ssd" semantics. However, here this also means that 7031 * the FC HBA is not supporting the "interconnect-type" property 7032 * like we expect it to, so log this occurrence. 7033 */ 7034 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7035 if (!SD_IS_PARALLEL_SCSI(un)) { 7036 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7037 "sd_unit_attach: un:0x%p Assuming " 7038 "INTERCONNECT_FIBRE\n", un); 7039 } else { 7040 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7041 "sd_unit_attach: un:0x%p Assuming " 7042 "INTERCONNECT_PARALLEL\n", un); 7043 un->un_f_is_fibre = FALSE; 7044 } 7045 #else 7046 /* 7047 * Note: This source will be implemented when a single fibre 7048 * channel and parallel scsi driver is delivered. The default 7049 * will be to assume that if a device does not support the 7050 * "interconnect-type" property it is a parallel SCSI HBA and 7051 * we will set the interconnect type for parallel scsi. 7052 */ 7053 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7054 un->un_f_is_fibre = FALSE; 7055 #endif 7056 break; 7057 } 7058 7059 if (un->un_f_is_fibre == TRUE) { 7060 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7061 SCSI_VERSION_3) { 7062 switch (un->un_interconnect_type) { 7063 case SD_INTERCONNECT_FIBRE: 7064 case SD_INTERCONNECT_SSA: 7065 un->un_node_type = DDI_NT_BLOCK_WWN; 7066 break; 7067 default: 7068 break; 7069 } 7070 } 7071 } 7072 7073 /* 7074 * Initialize the Request Sense command for the target 7075 */ 7076 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7077 goto alloc_rqs_failed; 7078 } 7079 7080 /* 7081 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7082 * with separate binary for sd and ssd. 7083 * 7084 * x86 has 1 binary, un_retry_count is set base on connection type. 7085 * The hardcoded values will go away when Sparc uses 1 binary 7086 * for sd and ssd. This hardcoded values need to match 7087 * SD_RETRY_COUNT in sddef.h 7088 * The value used is base on interconnect type. 7089 * fibre = 3, parallel = 5 7090 */ 7091 #if defined(__i386) || defined(__amd64) 7092 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7093 #else 7094 un->un_retry_count = SD_RETRY_COUNT; 7095 #endif 7096 7097 /* 7098 * Set the per disk retry count to the default number of retries 7099 * for disks and CDROMs. This value can be overridden by the 7100 * disk property list or an entry in sd.conf. 7101 */ 7102 un->un_notready_retry_count = 7103 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7104 : DISK_NOT_READY_RETRY_COUNT(un); 7105 7106 /* 7107 * Set the busy retry count to the default value of un_retry_count. 7108 * This can be overridden by entries in sd.conf or the device 7109 * config table. 7110 */ 7111 un->un_busy_retry_count = un->un_retry_count; 7112 7113 /* 7114 * Init the reset threshold for retries. This number determines 7115 * how many retries must be performed before a reset can be issued 7116 * (for certain error conditions). This can be overridden by entries 7117 * in sd.conf or the device config table. 7118 */ 7119 un->un_reset_retry_count = (un->un_retry_count / 2); 7120 7121 /* 7122 * Set the victim_retry_count to the default un_retry_count 7123 */ 7124 un->un_victim_retry_count = (2 * un->un_retry_count); 7125 7126 /* 7127 * Set the reservation release timeout to the default value of 7128 * 5 seconds. This can be overridden by entries in ssd.conf or the 7129 * device config table. 7130 */ 7131 un->un_reserve_release_time = 5; 7132 7133 /* 7134 * Set up the default maximum transfer size. Note that this may 7135 * get updated later in the attach, when setting up default wide 7136 * operations for disks. 7137 */ 7138 #if defined(__i386) || defined(__amd64) 7139 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7140 un->un_partial_dma_supported = 1; 7141 #else 7142 un->un_max_xfer_size = (uint_t)maxphys; 7143 #endif 7144 7145 /* 7146 * Get "allow bus device reset" property (defaults to "enabled" if 7147 * the property was not defined). This is to disable bus resets for 7148 * certain kinds of error recovery. Note: In the future when a run-time 7149 * fibre check is available the soft state flag should default to 7150 * enabled. 7151 */ 7152 if (un->un_f_is_fibre == TRUE) { 7153 un->un_f_allow_bus_device_reset = TRUE; 7154 } else { 7155 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7156 "allow-bus-device-reset", 1) != 0) { 7157 un->un_f_allow_bus_device_reset = TRUE; 7158 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7159 "sd_unit_attach: un:0x%p Bus device reset " 7160 "enabled\n", un); 7161 } else { 7162 un->un_f_allow_bus_device_reset = FALSE; 7163 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7164 "sd_unit_attach: un:0x%p Bus device reset " 7165 "disabled\n", un); 7166 } 7167 } 7168 7169 /* 7170 * Check if this is an ATAPI device. ATAPI devices use Group 1 7171 * Read/Write commands and Group 2 Mode Sense/Select commands. 7172 * 7173 * Note: The "obsolete" way of doing this is to check for the "atapi" 7174 * property. The new "variant" property with a value of "atapi" has been 7175 * introduced so that future 'variants' of standard SCSI behavior (like 7176 * atapi) could be specified by the underlying HBA drivers by supplying 7177 * a new value for the "variant" property, instead of having to define a 7178 * new property. 7179 */ 7180 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7181 un->un_f_cfg_is_atapi = TRUE; 7182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7183 "sd_unit_attach: un:0x%p Atapi device\n", un); 7184 } 7185 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7186 &variantp) == DDI_PROP_SUCCESS) { 7187 if (strcmp(variantp, "atapi") == 0) { 7188 un->un_f_cfg_is_atapi = TRUE; 7189 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7190 "sd_unit_attach: un:0x%p Atapi device\n", un); 7191 } 7192 ddi_prop_free(variantp); 7193 } 7194 7195 un->un_cmd_timeout = SD_IO_TIME; 7196 7197 un->un_busy_timeout = SD_BSY_TIMEOUT; 7198 7199 /* Info on current states, statuses, etc. (Updated frequently) */ 7200 un->un_state = SD_STATE_NORMAL; 7201 un->un_last_state = SD_STATE_NORMAL; 7202 7203 /* Control & status info for command throttling */ 7204 un->un_throttle = sd_max_throttle; 7205 un->un_saved_throttle = sd_max_throttle; 7206 un->un_min_throttle = sd_min_throttle; 7207 7208 if (un->un_f_is_fibre == TRUE) { 7209 un->un_f_use_adaptive_throttle = TRUE; 7210 } else { 7211 un->un_f_use_adaptive_throttle = FALSE; 7212 } 7213 7214 /* Removable media support. */ 7215 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7216 un->un_mediastate = DKIO_NONE; 7217 un->un_specified_mediastate = DKIO_NONE; 7218 7219 /* CVs for suspend/resume (PM or DR) */ 7220 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7221 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7222 7223 /* Power management support. */ 7224 un->un_power_level = SD_SPINDLE_UNINIT; 7225 7226 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7227 un->un_f_wcc_inprog = 0; 7228 7229 /* 7230 * The open/close semaphore is used to serialize threads executing 7231 * in the driver's open & close entry point routines for a given 7232 * instance. 7233 */ 7234 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7235 7236 /* 7237 * The conf file entry and softstate variable is a forceful override, 7238 * meaning a non-zero value must be entered to change the default. 7239 */ 7240 un->un_f_disksort_disabled = FALSE; 7241 7242 /* 7243 * Retrieve the properties from the static driver table or the driver 7244 * configuration file (.conf) for this unit and update the soft state 7245 * for the device as needed for the indicated properties. 7246 * Note: the property configuration needs to occur here as some of the 7247 * following routines may have dependencies on soft state flags set 7248 * as part of the driver property configuration. 7249 */ 7250 sd_read_unit_properties(un); 7251 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7252 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7253 7254 /* 7255 * Only if a device has "hotpluggable" property, it is 7256 * treated as hotpluggable device. Otherwise, it is 7257 * regarded as non-hotpluggable one. 7258 */ 7259 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7260 -1) != -1) { 7261 un->un_f_is_hotpluggable = TRUE; 7262 } 7263 7264 /* 7265 * set unit's attributes(flags) according to "hotpluggable" and 7266 * RMB bit in INQUIRY data. 7267 */ 7268 sd_set_unit_attributes(un, devi); 7269 7270 /* 7271 * By default, we mark the capacity, lbasize, and geometry 7272 * as invalid. Only if we successfully read a valid capacity 7273 * will we update the un_blockcount and un_tgt_blocksize with the 7274 * valid values (the geometry will be validated later). 7275 */ 7276 un->un_f_blockcount_is_valid = FALSE; 7277 un->un_f_tgt_blocksize_is_valid = FALSE; 7278 7279 /* 7280 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7281 * otherwise. 7282 */ 7283 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7284 un->un_blockcount = 0; 7285 7286 /* 7287 * Set up the per-instance info needed to determine the correct 7288 * CDBs and other info for issuing commands to the target. 7289 */ 7290 sd_init_cdb_limits(un); 7291 7292 /* 7293 * Set up the IO chains to use, based upon the target type. 7294 */ 7295 if (un->un_f_non_devbsize_supported) { 7296 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7297 } else { 7298 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7299 } 7300 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7301 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7302 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7303 7304 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7305 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7306 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7307 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7308 7309 7310 if (ISCD(un)) { 7311 un->un_additional_codes = sd_additional_codes; 7312 } else { 7313 un->un_additional_codes = NULL; 7314 } 7315 7316 /* 7317 * Create the kstats here so they can be available for attach-time 7318 * routines that send commands to the unit (either polled or via 7319 * sd_send_scsi_cmd). 7320 * 7321 * Note: This is a critical sequence that needs to be maintained: 7322 * 1) Instantiate the kstats here, before any routines using the 7323 * iopath (i.e. sd_send_scsi_cmd). 7324 * 2) Instantiate and initialize the partition stats 7325 * (sd_set_pstats). 7326 * 3) Initialize the error stats (sd_set_errstats), following 7327 * sd_validate_geometry(),sd_register_devid(), 7328 * and sd_cache_control(). 7329 */ 7330 7331 un->un_stats = kstat_create(sd_label, instance, 7332 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7333 if (un->un_stats != NULL) { 7334 un->un_stats->ks_lock = SD_MUTEX(un); 7335 kstat_install(un->un_stats); 7336 } 7337 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7338 "sd_unit_attach: un:0x%p un_stats created\n", un); 7339 7340 sd_create_errstats(un, instance); 7341 if (un->un_errstats == NULL) { 7342 goto create_errstats_failed; 7343 } 7344 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7345 "sd_unit_attach: un:0x%p errstats created\n", un); 7346 7347 /* 7348 * The following if/else code was relocated here from below as part 7349 * of the fix for bug (4430280). However with the default setup added 7350 * on entry to this routine, it's no longer absolutely necessary for 7351 * this to be before the call to sd_spin_up_unit. 7352 */ 7353 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7354 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7355 (devp->sd_inq->inq_ansi == 5)) && 7356 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7357 7358 /* 7359 * If tagged queueing is supported by the target 7360 * and by the host adapter then we will enable it 7361 */ 7362 un->un_tagflags = 0; 7363 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7364 (un->un_f_arq_enabled == TRUE)) { 7365 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7366 1, 1) == 1) { 7367 un->un_tagflags = FLAG_STAG; 7368 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7369 "sd_unit_attach: un:0x%p tag queueing " 7370 "enabled\n", un); 7371 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7372 "untagged-qing", 0) == 1) { 7373 un->un_f_opt_queueing = TRUE; 7374 un->un_saved_throttle = un->un_throttle = 7375 min(un->un_throttle, 3); 7376 } else { 7377 un->un_f_opt_queueing = FALSE; 7378 un->un_saved_throttle = un->un_throttle = 1; 7379 } 7380 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7381 == 1) && (un->un_f_arq_enabled == TRUE)) { 7382 /* The Host Adapter supports internal queueing. */ 7383 un->un_f_opt_queueing = TRUE; 7384 un->un_saved_throttle = un->un_throttle = 7385 min(un->un_throttle, 3); 7386 } else { 7387 un->un_f_opt_queueing = FALSE; 7388 un->un_saved_throttle = un->un_throttle = 1; 7389 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7390 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7391 } 7392 7393 /* 7394 * Enable large transfers for SATA/SAS drives 7395 */ 7396 if (SD_IS_SERIAL(un)) { 7397 un->un_max_xfer_size = 7398 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7399 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7400 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7401 "sd_unit_attach: un:0x%p max transfer " 7402 "size=0x%x\n", un, un->un_max_xfer_size); 7403 7404 } 7405 7406 /* Setup or tear down default wide operations for disks */ 7407 7408 /* 7409 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7410 * and "ssd_max_xfer_size" to exist simultaneously on the same 7411 * system and be set to different values. In the future this 7412 * code may need to be updated when the ssd module is 7413 * obsoleted and removed from the system. (4299588) 7414 */ 7415 if (SD_IS_PARALLEL_SCSI(un) && 7416 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7417 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7418 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7419 1, 1) == 1) { 7420 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7421 "sd_unit_attach: un:0x%p Wide Transfer " 7422 "enabled\n", un); 7423 } 7424 7425 /* 7426 * If tagged queuing has also been enabled, then 7427 * enable large xfers 7428 */ 7429 if (un->un_saved_throttle == sd_max_throttle) { 7430 un->un_max_xfer_size = 7431 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7432 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7433 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7434 "sd_unit_attach: un:0x%p max transfer " 7435 "size=0x%x\n", un, un->un_max_xfer_size); 7436 } 7437 } else { 7438 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7439 0, 1) == 1) { 7440 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7441 "sd_unit_attach: un:0x%p " 7442 "Wide Transfer disabled\n", un); 7443 } 7444 } 7445 } else { 7446 un->un_tagflags = FLAG_STAG; 7447 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7448 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7449 } 7450 7451 /* 7452 * If this target supports LUN reset, try to enable it. 7453 */ 7454 if (un->un_f_lun_reset_enabled) { 7455 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7456 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7457 "un:0x%p lun_reset capability set\n", un); 7458 } else { 7459 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7460 "un:0x%p lun-reset capability not set\n", un); 7461 } 7462 } 7463 7464 /* 7465 * Adjust the maximum transfer size. This is to fix 7466 * the problem of partial DMA support on SPARC. Some 7467 * HBA driver, like aac, has very small dma_attr_maxxfer 7468 * size, which requires partial DMA support on SPARC. 7469 * In the future the SPARC pci nexus driver may solve 7470 * the problem instead of this fix. 7471 */ 7472 #if defined(__sparc) 7473 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7474 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7475 un->un_max_xfer_size = max_xfer_size; 7476 un->un_partial_dma_supported = 1; 7477 } 7478 #endif 7479 7480 /* 7481 * Set PKT_DMA_PARTIAL flag. 7482 */ 7483 if (un->un_partial_dma_supported == 1) { 7484 un->un_pkt_flags = PKT_DMA_PARTIAL; 7485 } else { 7486 un->un_pkt_flags = 0; 7487 } 7488 7489 /* Initialize sd_ssc_t for internal uscsi commands */ 7490 ssc = sd_ssc_init(un); 7491 scsi_fm_init(devp); 7492 7493 /* 7494 * Allocate memory for SCSI FMA stuffs. 7495 */ 7496 un->un_fm_private = 7497 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7498 sfip = (struct sd_fm_internal *)un->un_fm_private; 7499 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7500 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7501 sfip->fm_ssc.ssc_un = un; 7502 7503 /* 7504 * At this point in the attach, we have enough info in the 7505 * soft state to be able to issue commands to the target. 7506 * 7507 * All command paths used below MUST issue their commands as 7508 * SD_PATH_DIRECT. This is important as intermediate layers 7509 * are not all initialized yet (such as PM). 7510 */ 7511 7512 /* 7513 * Send a TEST UNIT READY command to the device. This should clear 7514 * any outstanding UNIT ATTENTION that may be present. 7515 * 7516 * Note: Don't check for success, just track if there is a reservation, 7517 * this is a throw away command to clear any unit attentions. 7518 * 7519 * Note: This MUST be the first command issued to the target during 7520 * attach to ensure power on UNIT ATTENTIONS are cleared. 7521 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7522 * with attempts at spinning up a device with no media. 7523 */ 7524 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7525 if (status != 0) { 7526 if (status == EACCES) 7527 reservation_flag = SD_TARGET_IS_RESERVED; 7528 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7529 } 7530 7531 /* 7532 * If the device is NOT a removable media device, attempt to spin 7533 * it up (using the START_STOP_UNIT command) and read its capacity 7534 * (using the READ CAPACITY command). Note, however, that either 7535 * of these could fail and in some cases we would continue with 7536 * the attach despite the failure (see below). 7537 */ 7538 if (un->un_f_descr_format_supported) { 7539 7540 switch (sd_spin_up_unit(ssc)) { 7541 case 0: 7542 /* 7543 * Spin-up was successful; now try to read the 7544 * capacity. If successful then save the results 7545 * and mark the capacity & lbasize as valid. 7546 */ 7547 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7548 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7549 7550 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7551 &lbasize, SD_PATH_DIRECT); 7552 7553 switch (status) { 7554 case 0: { 7555 if (capacity > DK_MAX_BLOCKS) { 7556 #ifdef _LP64 7557 if ((capacity + 1) > 7558 SD_GROUP1_MAX_ADDRESS) { 7559 /* 7560 * Enable descriptor format 7561 * sense data so that we can 7562 * get 64 bit sense data 7563 * fields. 7564 */ 7565 sd_enable_descr_sense(ssc); 7566 } 7567 #else 7568 /* 32-bit kernels can't handle this */ 7569 scsi_log(SD_DEVINFO(un), 7570 sd_label, CE_WARN, 7571 "disk has %llu blocks, which " 7572 "is too large for a 32-bit " 7573 "kernel", capacity); 7574 7575 #if defined(__i386) || defined(__amd64) 7576 /* 7577 * 1TB disk was treated as (1T - 512)B 7578 * in the past, so that it might have 7579 * valid VTOC and solaris partitions, 7580 * we have to allow it to continue to 7581 * work. 7582 */ 7583 if (capacity -1 > DK_MAX_BLOCKS) 7584 #endif 7585 goto spinup_failed; 7586 #endif 7587 } 7588 7589 /* 7590 * Here it's not necessary to check the case: 7591 * the capacity of the device is bigger than 7592 * what the max hba cdb can support. Because 7593 * sd_send_scsi_READ_CAPACITY will retrieve 7594 * the capacity by sending USCSI command, which 7595 * is constrained by the max hba cdb. Actually, 7596 * sd_send_scsi_READ_CAPACITY will return 7597 * EINVAL when using bigger cdb than required 7598 * cdb length. Will handle this case in 7599 * "case EINVAL". 7600 */ 7601 7602 /* 7603 * The following relies on 7604 * sd_send_scsi_READ_CAPACITY never 7605 * returning 0 for capacity and/or lbasize. 7606 */ 7607 sd_update_block_info(un, lbasize, capacity); 7608 7609 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7610 "sd_unit_attach: un:0x%p capacity = %ld " 7611 "blocks; lbasize= %ld.\n", un, 7612 un->un_blockcount, un->un_tgt_blocksize); 7613 7614 break; 7615 } 7616 case EINVAL: 7617 /* 7618 * In the case where the max-cdb-length property 7619 * is smaller than the required CDB length for 7620 * a SCSI device, a target driver can fail to 7621 * attach to that device. 7622 */ 7623 scsi_log(SD_DEVINFO(un), 7624 sd_label, CE_WARN, 7625 "disk capacity is too large " 7626 "for current cdb length"); 7627 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7628 7629 goto spinup_failed; 7630 case EACCES: 7631 /* 7632 * Should never get here if the spin-up 7633 * succeeded, but code it in anyway. 7634 * From here, just continue with the attach... 7635 */ 7636 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7637 "sd_unit_attach: un:0x%p " 7638 "sd_send_scsi_READ_CAPACITY " 7639 "returned reservation conflict\n", un); 7640 reservation_flag = SD_TARGET_IS_RESERVED; 7641 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7642 break; 7643 default: 7644 /* 7645 * Likewise, should never get here if the 7646 * spin-up succeeded. Just continue with 7647 * the attach... 7648 */ 7649 if (status == EIO) 7650 sd_ssc_assessment(ssc, 7651 SD_FMT_STATUS_CHECK); 7652 else 7653 sd_ssc_assessment(ssc, 7654 SD_FMT_IGNORE); 7655 break; 7656 } 7657 break; 7658 case EACCES: 7659 /* 7660 * Device is reserved by another host. In this case 7661 * we could not spin it up or read the capacity, but 7662 * we continue with the attach anyway. 7663 */ 7664 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7665 "sd_unit_attach: un:0x%p spin-up reservation " 7666 "conflict.\n", un); 7667 reservation_flag = SD_TARGET_IS_RESERVED; 7668 break; 7669 default: 7670 /* Fail the attach if the spin-up failed. */ 7671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7672 "sd_unit_attach: un:0x%p spin-up failed.", un); 7673 goto spinup_failed; 7674 } 7675 7676 } 7677 7678 /* 7679 * Check to see if this is a MMC drive 7680 */ 7681 if (ISCD(un)) { 7682 sd_set_mmc_caps(ssc); 7683 } 7684 7685 7686 /* 7687 * Add a zero-length attribute to tell the world we support 7688 * kernel ioctls (for layered drivers) 7689 */ 7690 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7691 DDI_KERNEL_IOCTL, NULL, 0); 7692 7693 /* 7694 * Add a boolean property to tell the world we support 7695 * the B_FAILFAST flag (for layered drivers) 7696 */ 7697 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7698 "ddi-failfast-supported", NULL, 0); 7699 7700 /* 7701 * Initialize power management 7702 */ 7703 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7704 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7705 sd_setup_pm(ssc, devi); 7706 if (un->un_f_pm_is_enabled == FALSE) { 7707 /* 7708 * For performance, point to a jump table that does 7709 * not include pm. 7710 * The direct and priority chains don't change with PM. 7711 * 7712 * Note: this is currently done based on individual device 7713 * capabilities. When an interface for determining system 7714 * power enabled state becomes available, or when additional 7715 * layers are added to the command chain, these values will 7716 * have to be re-evaluated for correctness. 7717 */ 7718 if (un->un_f_non_devbsize_supported) { 7719 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7720 } else { 7721 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7722 } 7723 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7724 } 7725 7726 /* 7727 * This property is set to 0 by HA software to avoid retries 7728 * on a reserved disk. (The preferred property name is 7729 * "retry-on-reservation-conflict") (1189689) 7730 * 7731 * Note: The use of a global here can have unintended consequences. A 7732 * per instance variable is preferable to match the capabilities of 7733 * different underlying hba's (4402600) 7734 */ 7735 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7736 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7737 sd_retry_on_reservation_conflict); 7738 if (sd_retry_on_reservation_conflict != 0) { 7739 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7740 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7741 sd_retry_on_reservation_conflict); 7742 } 7743 7744 /* Set up options for QFULL handling. */ 7745 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7746 "qfull-retries", -1)) != -1) { 7747 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7748 rval, 1); 7749 } 7750 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7751 "qfull-retry-interval", -1)) != -1) { 7752 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7753 rval, 1); 7754 } 7755 7756 /* 7757 * This just prints a message that announces the existence of the 7758 * device. The message is always printed in the system logfile, but 7759 * only appears on the console if the system is booted with the 7760 * -v (verbose) argument. 7761 */ 7762 ddi_report_dev(devi); 7763 7764 un->un_mediastate = DKIO_NONE; 7765 7766 cmlb_alloc_handle(&un->un_cmlbhandle); 7767 7768 #if defined(__i386) || defined(__amd64) 7769 /* 7770 * On x86, compensate for off-by-1 legacy error 7771 */ 7772 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7773 (lbasize == un->un_sys_blocksize)) 7774 offbyone = CMLB_OFF_BY_ONE; 7775 #endif 7776 7777 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7778 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7779 un->un_node_type, offbyone, un->un_cmlbhandle, 7780 (void *)SD_PATH_DIRECT) != 0) { 7781 goto cmlb_attach_failed; 7782 } 7783 7784 7785 /* 7786 * Read and validate the device's geometry (ie, disk label) 7787 * A new unformatted drive will not have a valid geometry, but 7788 * the driver needs to successfully attach to this device so 7789 * the drive can be formatted via ioctls. 7790 */ 7791 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7792 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7793 7794 mutex_enter(SD_MUTEX(un)); 7795 7796 /* 7797 * Read and initialize the devid for the unit. 7798 */ 7799 if (un->un_f_devid_supported) { 7800 sd_register_devid(ssc, devi, reservation_flag); 7801 } 7802 mutex_exit(SD_MUTEX(un)); 7803 7804 #if (defined(__fibre)) 7805 /* 7806 * Register callbacks for fibre only. You can't do this solely 7807 * on the basis of the devid_type because this is hba specific. 7808 * We need to query our hba capabilities to find out whether to 7809 * register or not. 7810 */ 7811 if (un->un_f_is_fibre) { 7812 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7813 sd_init_event_callbacks(un); 7814 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7815 "sd_unit_attach: un:0x%p event callbacks inserted", 7816 un); 7817 } 7818 } 7819 #endif 7820 7821 if (un->un_f_opt_disable_cache == TRUE) { 7822 /* 7823 * Disable both read cache and write cache. This is 7824 * the historic behavior of the keywords in the config file. 7825 */ 7826 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7827 0) { 7828 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7829 "sd_unit_attach: un:0x%p Could not disable " 7830 "caching", un); 7831 goto devid_failed; 7832 } 7833 } 7834 7835 /* 7836 * Check the value of the WCE bit now and 7837 * set un_f_write_cache_enabled accordingly. 7838 */ 7839 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7840 mutex_enter(SD_MUTEX(un)); 7841 un->un_f_write_cache_enabled = (wc_enabled != 0); 7842 mutex_exit(SD_MUTEX(un)); 7843 7844 /* 7845 * Check the value of the NV_SUP bit and set 7846 * un_f_suppress_cache_flush accordingly. 7847 */ 7848 sd_get_nv_sup(ssc); 7849 7850 /* 7851 * Find out what type of reservation this disk supports. 7852 */ 7853 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7854 7855 switch (status) { 7856 case 0: 7857 /* 7858 * SCSI-3 reservations are supported. 7859 */ 7860 un->un_reservation_type = SD_SCSI3_RESERVATION; 7861 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7862 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7863 break; 7864 case ENOTSUP: 7865 /* 7866 * The PERSISTENT RESERVE IN command would not be recognized by 7867 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7868 */ 7869 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7870 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7871 un->un_reservation_type = SD_SCSI2_RESERVATION; 7872 7873 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7874 break; 7875 default: 7876 /* 7877 * default to SCSI-3 reservations 7878 */ 7879 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7880 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7881 un->un_reservation_type = SD_SCSI3_RESERVATION; 7882 7883 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7884 break; 7885 } 7886 7887 /* 7888 * Set the pstat and error stat values here, so data obtained during the 7889 * previous attach-time routines is available. 7890 * 7891 * Note: This is a critical sequence that needs to be maintained: 7892 * 1) Instantiate the kstats before any routines using the iopath 7893 * (i.e. sd_send_scsi_cmd). 7894 * 2) Initialize the error stats (sd_set_errstats) and partition 7895 * stats (sd_set_pstats)here, following 7896 * cmlb_validate_geometry(), sd_register_devid(), and 7897 * sd_cache_control(). 7898 */ 7899 7900 if (un->un_f_pkstats_enabled && geom_label_valid) { 7901 sd_set_pstats(un); 7902 SD_TRACE(SD_LOG_IO_PARTITION, un, 7903 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7904 } 7905 7906 sd_set_errstats(un); 7907 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7908 "sd_unit_attach: un:0x%p errstats set\n", un); 7909 7910 7911 /* 7912 * After successfully attaching an instance, we record the information 7913 * of how many luns have been attached on the relative target and 7914 * controller for parallel SCSI. This information is used when sd tries 7915 * to set the tagged queuing capability in HBA. 7916 */ 7917 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7918 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7919 } 7920 7921 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7922 "sd_unit_attach: un:0x%p exit success\n", un); 7923 7924 /* Uninitialize sd_ssc_t pointer */ 7925 sd_ssc_fini(ssc); 7926 7927 return (DDI_SUCCESS); 7928 7929 /* 7930 * An error occurred during the attach; clean up & return failure. 7931 */ 7932 7933 devid_failed: 7934 7935 setup_pm_failed: 7936 ddi_remove_minor_node(devi, NULL); 7937 7938 cmlb_attach_failed: 7939 /* 7940 * Cleanup from the scsi_ifsetcap() calls (437868) 7941 */ 7942 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7943 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7944 7945 /* 7946 * Refer to the comments of setting tagged-qing in the beginning of 7947 * sd_unit_attach. We can only disable tagged queuing when there is 7948 * no lun attached on the target. 7949 */ 7950 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7951 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7952 } 7953 7954 if (un->un_f_is_fibre == FALSE) { 7955 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7956 } 7957 7958 spinup_failed: 7959 7960 /* Uninitialize sd_ssc_t pointer */ 7961 sd_ssc_fini(ssc); 7962 7963 mutex_enter(SD_MUTEX(un)); 7964 7965 /* Deallocate SCSI FMA memory spaces */ 7966 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 7967 7968 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7969 if (un->un_direct_priority_timeid != NULL) { 7970 timeout_id_t temp_id = un->un_direct_priority_timeid; 7971 un->un_direct_priority_timeid = NULL; 7972 mutex_exit(SD_MUTEX(un)); 7973 (void) untimeout(temp_id); 7974 mutex_enter(SD_MUTEX(un)); 7975 } 7976 7977 /* Cancel any pending start/stop timeouts */ 7978 if (un->un_startstop_timeid != NULL) { 7979 timeout_id_t temp_id = un->un_startstop_timeid; 7980 un->un_startstop_timeid = NULL; 7981 mutex_exit(SD_MUTEX(un)); 7982 (void) untimeout(temp_id); 7983 mutex_enter(SD_MUTEX(un)); 7984 } 7985 7986 /* Cancel any pending reset-throttle timeouts */ 7987 if (un->un_reset_throttle_timeid != NULL) { 7988 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7989 un->un_reset_throttle_timeid = NULL; 7990 mutex_exit(SD_MUTEX(un)); 7991 (void) untimeout(temp_id); 7992 mutex_enter(SD_MUTEX(un)); 7993 } 7994 7995 /* Cancel any pending retry timeouts */ 7996 if (un->un_retry_timeid != NULL) { 7997 timeout_id_t temp_id = un->un_retry_timeid; 7998 un->un_retry_timeid = NULL; 7999 mutex_exit(SD_MUTEX(un)); 8000 (void) untimeout(temp_id); 8001 mutex_enter(SD_MUTEX(un)); 8002 } 8003 8004 /* Cancel any pending delayed cv broadcast timeouts */ 8005 if (un->un_dcvb_timeid != NULL) { 8006 timeout_id_t temp_id = un->un_dcvb_timeid; 8007 un->un_dcvb_timeid = NULL; 8008 mutex_exit(SD_MUTEX(un)); 8009 (void) untimeout(temp_id); 8010 mutex_enter(SD_MUTEX(un)); 8011 } 8012 8013 mutex_exit(SD_MUTEX(un)); 8014 8015 /* There should not be any in-progress I/O so ASSERT this check */ 8016 ASSERT(un->un_ncmds_in_transport == 0); 8017 ASSERT(un->un_ncmds_in_driver == 0); 8018 8019 /* Do not free the softstate if the callback routine is active */ 8020 sd_sync_with_callback(un); 8021 8022 /* 8023 * Partition stats apparently are not used with removables. These would 8024 * not have been created during attach, so no need to clean them up... 8025 */ 8026 if (un->un_errstats != NULL) { 8027 kstat_delete(un->un_errstats); 8028 un->un_errstats = NULL; 8029 } 8030 8031 create_errstats_failed: 8032 8033 if (un->un_stats != NULL) { 8034 kstat_delete(un->un_stats); 8035 un->un_stats = NULL; 8036 } 8037 8038 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8039 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8040 8041 ddi_prop_remove_all(devi); 8042 sema_destroy(&un->un_semoclose); 8043 cv_destroy(&un->un_state_cv); 8044 8045 getrbuf_failed: 8046 8047 sd_free_rqs(un); 8048 8049 alloc_rqs_failed: 8050 8051 devp->sd_private = NULL; 8052 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8053 8054 get_softstate_failed: 8055 /* 8056 * Note: the man pages are unclear as to whether or not doing a 8057 * ddi_soft_state_free(sd_state, instance) is the right way to 8058 * clean up after the ddi_soft_state_zalloc() if the subsequent 8059 * ddi_get_soft_state() fails. The implication seems to be 8060 * that the get_soft_state cannot fail if the zalloc succeeds. 8061 */ 8062 ddi_soft_state_free(sd_state, instance); 8063 8064 probe_failed: 8065 scsi_unprobe(devp); 8066 8067 return (DDI_FAILURE); 8068 } 8069 8070 8071 /* 8072 * Function: sd_unit_detach 8073 * 8074 * Description: Performs DDI_DETACH processing for sddetach(). 8075 * 8076 * Return Code: DDI_SUCCESS 8077 * DDI_FAILURE 8078 * 8079 * Context: Kernel thread context 8080 */ 8081 8082 static int 8083 sd_unit_detach(dev_info_t *devi) 8084 { 8085 struct scsi_device *devp; 8086 struct sd_lun *un; 8087 int i; 8088 int tgt; 8089 dev_t dev; 8090 dev_info_t *pdip = ddi_get_parent(devi); 8091 int instance = ddi_get_instance(devi); 8092 8093 mutex_enter(&sd_detach_mutex); 8094 8095 /* 8096 * Fail the detach for any of the following: 8097 * - Unable to get the sd_lun struct for the instance 8098 * - A layered driver has an outstanding open on the instance 8099 * - Another thread is already detaching this instance 8100 * - Another thread is currently performing an open 8101 */ 8102 devp = ddi_get_driver_private(devi); 8103 if ((devp == NULL) || 8104 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8105 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8106 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8107 mutex_exit(&sd_detach_mutex); 8108 return (DDI_FAILURE); 8109 } 8110 8111 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8112 8113 /* 8114 * Mark this instance as currently in a detach, to inhibit any 8115 * opens from a layered driver. 8116 */ 8117 un->un_detach_count++; 8118 mutex_exit(&sd_detach_mutex); 8119 8120 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8121 SCSI_ADDR_PROP_TARGET, -1); 8122 8123 dev = sd_make_device(SD_DEVINFO(un)); 8124 8125 #ifndef lint 8126 _NOTE(COMPETING_THREADS_NOW); 8127 #endif 8128 8129 mutex_enter(SD_MUTEX(un)); 8130 8131 /* 8132 * Fail the detach if there are any outstanding layered 8133 * opens on this device. 8134 */ 8135 for (i = 0; i < NDKMAP; i++) { 8136 if (un->un_ocmap.lyropen[i] != 0) { 8137 goto err_notclosed; 8138 } 8139 } 8140 8141 /* 8142 * Verify there are NO outstanding commands issued to this device. 8143 * ie, un_ncmds_in_transport == 0. 8144 * It's possible to have outstanding commands through the physio 8145 * code path, even though everything's closed. 8146 */ 8147 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8148 (un->un_direct_priority_timeid != NULL) || 8149 (un->un_state == SD_STATE_RWAIT)) { 8150 mutex_exit(SD_MUTEX(un)); 8151 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8152 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8153 goto err_stillbusy; 8154 } 8155 8156 /* 8157 * If we have the device reserved, release the reservation. 8158 */ 8159 if ((un->un_resvd_status & SD_RESERVE) && 8160 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8161 mutex_exit(SD_MUTEX(un)); 8162 /* 8163 * Note: sd_reserve_release sends a command to the device 8164 * via the sd_ioctlcmd() path, and can sleep. 8165 */ 8166 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8167 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8168 "sd_dr_detach: Cannot release reservation \n"); 8169 } 8170 } else { 8171 mutex_exit(SD_MUTEX(un)); 8172 } 8173 8174 /* 8175 * Untimeout any reserve recover, throttle reset, restart unit 8176 * and delayed broadcast timeout threads. Protect the timeout pointer 8177 * from getting nulled by their callback functions. 8178 */ 8179 mutex_enter(SD_MUTEX(un)); 8180 if (un->un_resvd_timeid != NULL) { 8181 timeout_id_t temp_id = un->un_resvd_timeid; 8182 un->un_resvd_timeid = NULL; 8183 mutex_exit(SD_MUTEX(un)); 8184 (void) untimeout(temp_id); 8185 mutex_enter(SD_MUTEX(un)); 8186 } 8187 8188 if (un->un_reset_throttle_timeid != NULL) { 8189 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8190 un->un_reset_throttle_timeid = NULL; 8191 mutex_exit(SD_MUTEX(un)); 8192 (void) untimeout(temp_id); 8193 mutex_enter(SD_MUTEX(un)); 8194 } 8195 8196 if (un->un_startstop_timeid != NULL) { 8197 timeout_id_t temp_id = un->un_startstop_timeid; 8198 un->un_startstop_timeid = NULL; 8199 mutex_exit(SD_MUTEX(un)); 8200 (void) untimeout(temp_id); 8201 mutex_enter(SD_MUTEX(un)); 8202 } 8203 8204 if (un->un_dcvb_timeid != NULL) { 8205 timeout_id_t temp_id = un->un_dcvb_timeid; 8206 un->un_dcvb_timeid = NULL; 8207 mutex_exit(SD_MUTEX(un)); 8208 (void) untimeout(temp_id); 8209 } else { 8210 mutex_exit(SD_MUTEX(un)); 8211 } 8212 8213 /* Remove any pending reservation reclaim requests for this device */ 8214 sd_rmv_resv_reclaim_req(dev); 8215 8216 mutex_enter(SD_MUTEX(un)); 8217 8218 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8219 if (un->un_direct_priority_timeid != NULL) { 8220 timeout_id_t temp_id = un->un_direct_priority_timeid; 8221 un->un_direct_priority_timeid = NULL; 8222 mutex_exit(SD_MUTEX(un)); 8223 (void) untimeout(temp_id); 8224 mutex_enter(SD_MUTEX(un)); 8225 } 8226 8227 /* Cancel any active multi-host disk watch thread requests */ 8228 if (un->un_mhd_token != NULL) { 8229 mutex_exit(SD_MUTEX(un)); 8230 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8231 if (scsi_watch_request_terminate(un->un_mhd_token, 8232 SCSI_WATCH_TERMINATE_NOWAIT)) { 8233 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8234 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8235 /* 8236 * Note: We are returning here after having removed 8237 * some driver timeouts above. This is consistent with 8238 * the legacy implementation but perhaps the watch 8239 * terminate call should be made with the wait flag set. 8240 */ 8241 goto err_stillbusy; 8242 } 8243 mutex_enter(SD_MUTEX(un)); 8244 un->un_mhd_token = NULL; 8245 } 8246 8247 if (un->un_swr_token != NULL) { 8248 mutex_exit(SD_MUTEX(un)); 8249 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8250 if (scsi_watch_request_terminate(un->un_swr_token, 8251 SCSI_WATCH_TERMINATE_NOWAIT)) { 8252 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8253 "sd_dr_detach: Cannot cancel swr watch request\n"); 8254 /* 8255 * Note: We are returning here after having removed 8256 * some driver timeouts above. This is consistent with 8257 * the legacy implementation but perhaps the watch 8258 * terminate call should be made with the wait flag set. 8259 */ 8260 goto err_stillbusy; 8261 } 8262 mutex_enter(SD_MUTEX(un)); 8263 un->un_swr_token = NULL; 8264 } 8265 8266 mutex_exit(SD_MUTEX(un)); 8267 8268 /* 8269 * Clear any scsi_reset_notifies. We clear the reset notifies 8270 * if we have not registered one. 8271 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8272 */ 8273 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8274 sd_mhd_reset_notify_cb, (caddr_t)un); 8275 8276 /* 8277 * protect the timeout pointers from getting nulled by 8278 * their callback functions during the cancellation process. 8279 * In such a scenario untimeout can be invoked with a null value. 8280 */ 8281 _NOTE(NO_COMPETING_THREADS_NOW); 8282 8283 mutex_enter(&un->un_pm_mutex); 8284 if (un->un_pm_idle_timeid != NULL) { 8285 timeout_id_t temp_id = un->un_pm_idle_timeid; 8286 un->un_pm_idle_timeid = NULL; 8287 mutex_exit(&un->un_pm_mutex); 8288 8289 /* 8290 * Timeout is active; cancel it. 8291 * Note that it'll never be active on a device 8292 * that does not support PM therefore we don't 8293 * have to check before calling pm_idle_component. 8294 */ 8295 (void) untimeout(temp_id); 8296 (void) pm_idle_component(SD_DEVINFO(un), 0); 8297 mutex_enter(&un->un_pm_mutex); 8298 } 8299 8300 /* 8301 * Check whether there is already a timeout scheduled for power 8302 * management. If yes then don't lower the power here, that's. 8303 * the timeout handler's job. 8304 */ 8305 if (un->un_pm_timeid != NULL) { 8306 timeout_id_t temp_id = un->un_pm_timeid; 8307 un->un_pm_timeid = NULL; 8308 mutex_exit(&un->un_pm_mutex); 8309 /* 8310 * Timeout is active; cancel it. 8311 * Note that it'll never be active on a device 8312 * that does not support PM therefore we don't 8313 * have to check before calling pm_idle_component. 8314 */ 8315 (void) untimeout(temp_id); 8316 (void) pm_idle_component(SD_DEVINFO(un), 0); 8317 8318 } else { 8319 mutex_exit(&un->un_pm_mutex); 8320 if ((un->un_f_pm_is_enabled == TRUE) && 8321 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8322 DDI_SUCCESS)) { 8323 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8324 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8325 /* 8326 * Fix for bug: 4297749, item # 13 8327 * The above test now includes a check to see if PM is 8328 * supported by this device before call 8329 * pm_lower_power(). 8330 * Note, the following is not dead code. The call to 8331 * pm_lower_power above will generate a call back into 8332 * our sdpower routine which might result in a timeout 8333 * handler getting activated. Therefore the following 8334 * code is valid and necessary. 8335 */ 8336 mutex_enter(&un->un_pm_mutex); 8337 if (un->un_pm_timeid != NULL) { 8338 timeout_id_t temp_id = un->un_pm_timeid; 8339 un->un_pm_timeid = NULL; 8340 mutex_exit(&un->un_pm_mutex); 8341 (void) untimeout(temp_id); 8342 (void) pm_idle_component(SD_DEVINFO(un), 0); 8343 } else { 8344 mutex_exit(&un->un_pm_mutex); 8345 } 8346 } 8347 } 8348 8349 /* 8350 * Cleanup from the scsi_ifsetcap() calls (437868) 8351 * Relocated here from above to be after the call to 8352 * pm_lower_power, which was getting errors. 8353 */ 8354 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8355 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8356 8357 /* 8358 * Currently, tagged queuing is supported per target based by HBA. 8359 * Setting this per lun instance actually sets the capability of this 8360 * target in HBA, which affects those luns already attached on the 8361 * same target. So during detach, we can only disable this capability 8362 * only when this is the only lun left on this target. By doing 8363 * this, we assume a target has the same tagged queuing capability 8364 * for every lun. The condition can be removed when HBA is changed to 8365 * support per lun based tagged queuing capability. 8366 */ 8367 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8368 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8369 } 8370 8371 if (un->un_f_is_fibre == FALSE) { 8372 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8373 } 8374 8375 /* 8376 * Remove any event callbacks, fibre only 8377 */ 8378 if (un->un_f_is_fibre == TRUE) { 8379 if ((un->un_insert_event != NULL) && 8380 (ddi_remove_event_handler(un->un_insert_cb_id) != 8381 DDI_SUCCESS)) { 8382 /* 8383 * Note: We are returning here after having done 8384 * substantial cleanup above. This is consistent 8385 * with the legacy implementation but this may not 8386 * be the right thing to do. 8387 */ 8388 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8389 "sd_dr_detach: Cannot cancel insert event\n"); 8390 goto err_remove_event; 8391 } 8392 un->un_insert_event = NULL; 8393 8394 if ((un->un_remove_event != NULL) && 8395 (ddi_remove_event_handler(un->un_remove_cb_id) != 8396 DDI_SUCCESS)) { 8397 /* 8398 * Note: We are returning here after having done 8399 * substantial cleanup above. This is consistent 8400 * with the legacy implementation but this may not 8401 * be the right thing to do. 8402 */ 8403 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8404 "sd_dr_detach: Cannot cancel remove event\n"); 8405 goto err_remove_event; 8406 } 8407 un->un_remove_event = NULL; 8408 } 8409 8410 /* Do not free the softstate if the callback routine is active */ 8411 sd_sync_with_callback(un); 8412 8413 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8414 cmlb_free_handle(&un->un_cmlbhandle); 8415 8416 /* 8417 * Hold the detach mutex here, to make sure that no other threads ever 8418 * can access a (partially) freed soft state structure. 8419 */ 8420 mutex_enter(&sd_detach_mutex); 8421 8422 /* 8423 * Clean up the soft state struct. 8424 * Cleanup is done in reverse order of allocs/inits. 8425 * At this point there should be no competing threads anymore. 8426 */ 8427 8428 scsi_fm_fini(devp); 8429 8430 /* 8431 * Deallocate memory for SCSI FMA. 8432 */ 8433 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8434 8435 /* Unregister and free device id. */ 8436 ddi_devid_unregister(devi); 8437 if (un->un_devid) { 8438 ddi_devid_free(un->un_devid); 8439 un->un_devid = NULL; 8440 } 8441 8442 /* 8443 * Destroy wmap cache if it exists. 8444 */ 8445 if (un->un_wm_cache != NULL) { 8446 kmem_cache_destroy(un->un_wm_cache); 8447 un->un_wm_cache = NULL; 8448 } 8449 8450 /* 8451 * kstat cleanup is done in detach for all device types (4363169). 8452 * We do not want to fail detach if the device kstats are not deleted 8453 * since there is a confusion about the devo_refcnt for the device. 8454 * We just delete the kstats and let detach complete successfully. 8455 */ 8456 if (un->un_stats != NULL) { 8457 kstat_delete(un->un_stats); 8458 un->un_stats = NULL; 8459 } 8460 if (un->un_errstats != NULL) { 8461 kstat_delete(un->un_errstats); 8462 un->un_errstats = NULL; 8463 } 8464 8465 /* Remove partition stats */ 8466 if (un->un_f_pkstats_enabled) { 8467 for (i = 0; i < NSDMAP; i++) { 8468 if (un->un_pstats[i] != NULL) { 8469 kstat_delete(un->un_pstats[i]); 8470 un->un_pstats[i] = NULL; 8471 } 8472 } 8473 } 8474 8475 /* Remove xbuf registration */ 8476 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8477 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8478 8479 /* Remove driver properties */ 8480 ddi_prop_remove_all(devi); 8481 8482 mutex_destroy(&un->un_pm_mutex); 8483 cv_destroy(&un->un_pm_busy_cv); 8484 8485 cv_destroy(&un->un_wcc_cv); 8486 8487 /* Open/close semaphore */ 8488 sema_destroy(&un->un_semoclose); 8489 8490 /* Removable media condvar. */ 8491 cv_destroy(&un->un_state_cv); 8492 8493 /* Suspend/resume condvar. */ 8494 cv_destroy(&un->un_suspend_cv); 8495 cv_destroy(&un->un_disk_busy_cv); 8496 8497 sd_free_rqs(un); 8498 8499 /* Free up soft state */ 8500 devp->sd_private = NULL; 8501 8502 bzero(un, sizeof (struct sd_lun)); 8503 ddi_soft_state_free(sd_state, instance); 8504 8505 mutex_exit(&sd_detach_mutex); 8506 8507 /* This frees up the INQUIRY data associated with the device. */ 8508 scsi_unprobe(devp); 8509 8510 /* 8511 * After successfully detaching an instance, we update the information 8512 * of how many luns have been attached in the relative target and 8513 * controller for parallel SCSI. This information is used when sd tries 8514 * to set the tagged queuing capability in HBA. 8515 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8516 * check if the device is parallel SCSI. However, we don't need to 8517 * check here because we've already checked during attach. No device 8518 * that is not parallel SCSI is in the chain. 8519 */ 8520 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8521 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8522 } 8523 8524 return (DDI_SUCCESS); 8525 8526 err_notclosed: 8527 mutex_exit(SD_MUTEX(un)); 8528 8529 err_stillbusy: 8530 _NOTE(NO_COMPETING_THREADS_NOW); 8531 8532 err_remove_event: 8533 mutex_enter(&sd_detach_mutex); 8534 un->un_detach_count--; 8535 mutex_exit(&sd_detach_mutex); 8536 8537 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8538 return (DDI_FAILURE); 8539 } 8540 8541 8542 /* 8543 * Function: sd_create_errstats 8544 * 8545 * Description: This routine instantiates the device error stats. 8546 * 8547 * Note: During attach the stats are instantiated first so they are 8548 * available for attach-time routines that utilize the driver 8549 * iopath to send commands to the device. The stats are initialized 8550 * separately so data obtained during some attach-time routines is 8551 * available. (4362483) 8552 * 8553 * Arguments: un - driver soft state (unit) structure 8554 * instance - driver instance 8555 * 8556 * Context: Kernel thread context 8557 */ 8558 8559 static void 8560 sd_create_errstats(struct sd_lun *un, int instance) 8561 { 8562 struct sd_errstats *stp; 8563 char kstatmodule_err[KSTAT_STRLEN]; 8564 char kstatname[KSTAT_STRLEN]; 8565 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8566 8567 ASSERT(un != NULL); 8568 8569 if (un->un_errstats != NULL) { 8570 return; 8571 } 8572 8573 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8574 "%serr", sd_label); 8575 (void) snprintf(kstatname, sizeof (kstatname), 8576 "%s%d,err", sd_label, instance); 8577 8578 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8579 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8580 8581 if (un->un_errstats == NULL) { 8582 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8583 "sd_create_errstats: Failed kstat_create\n"); 8584 return; 8585 } 8586 8587 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8588 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8589 KSTAT_DATA_UINT32); 8590 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8591 KSTAT_DATA_UINT32); 8592 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8593 KSTAT_DATA_UINT32); 8594 kstat_named_init(&stp->sd_vid, "Vendor", 8595 KSTAT_DATA_CHAR); 8596 kstat_named_init(&stp->sd_pid, "Product", 8597 KSTAT_DATA_CHAR); 8598 kstat_named_init(&stp->sd_revision, "Revision", 8599 KSTAT_DATA_CHAR); 8600 kstat_named_init(&stp->sd_serial, "Serial No", 8601 KSTAT_DATA_CHAR); 8602 kstat_named_init(&stp->sd_capacity, "Size", 8603 KSTAT_DATA_ULONGLONG); 8604 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8605 KSTAT_DATA_UINT32); 8606 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8607 KSTAT_DATA_UINT32); 8608 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8609 KSTAT_DATA_UINT32); 8610 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8611 KSTAT_DATA_UINT32); 8612 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8613 KSTAT_DATA_UINT32); 8614 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8615 KSTAT_DATA_UINT32); 8616 8617 un->un_errstats->ks_private = un; 8618 un->un_errstats->ks_update = nulldev; 8619 8620 kstat_install(un->un_errstats); 8621 } 8622 8623 8624 /* 8625 * Function: sd_set_errstats 8626 * 8627 * Description: This routine sets the value of the vendor id, product id, 8628 * revision, serial number, and capacity device error stats. 8629 * 8630 * Note: During attach the stats are instantiated first so they are 8631 * available for attach-time routines that utilize the driver 8632 * iopath to send commands to the device. The stats are initialized 8633 * separately so data obtained during some attach-time routines is 8634 * available. (4362483) 8635 * 8636 * Arguments: un - driver soft state (unit) structure 8637 * 8638 * Context: Kernel thread context 8639 */ 8640 8641 static void 8642 sd_set_errstats(struct sd_lun *un) 8643 { 8644 struct sd_errstats *stp; 8645 8646 ASSERT(un != NULL); 8647 ASSERT(un->un_errstats != NULL); 8648 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8649 ASSERT(stp != NULL); 8650 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8651 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8652 (void) strncpy(stp->sd_revision.value.c, 8653 un->un_sd->sd_inq->inq_revision, 4); 8654 8655 /* 8656 * All the errstats are persistent across detach/attach, 8657 * so reset all the errstats here in case of the hot 8658 * replacement of disk drives, except for not changed 8659 * Sun qualified drives. 8660 */ 8661 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8662 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8663 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8664 stp->sd_softerrs.value.ui32 = 0; 8665 stp->sd_harderrs.value.ui32 = 0; 8666 stp->sd_transerrs.value.ui32 = 0; 8667 stp->sd_rq_media_err.value.ui32 = 0; 8668 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8669 stp->sd_rq_nodev_err.value.ui32 = 0; 8670 stp->sd_rq_recov_err.value.ui32 = 0; 8671 stp->sd_rq_illrq_err.value.ui32 = 0; 8672 stp->sd_rq_pfa_err.value.ui32 = 0; 8673 } 8674 8675 /* 8676 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8677 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8678 * (4376302)) 8679 */ 8680 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8681 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8682 sizeof (SD_INQUIRY(un)->inq_serial)); 8683 } 8684 8685 if (un->un_f_blockcount_is_valid != TRUE) { 8686 /* 8687 * Set capacity error stat to 0 for no media. This ensures 8688 * a valid capacity is displayed in response to 'iostat -E' 8689 * when no media is present in the device. 8690 */ 8691 stp->sd_capacity.value.ui64 = 0; 8692 } else { 8693 /* 8694 * Multiply un_blockcount by un->un_sys_blocksize to get 8695 * capacity. 8696 * 8697 * Note: for non-512 blocksize devices "un_blockcount" has been 8698 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8699 * (un_tgt_blocksize / un->un_sys_blocksize). 8700 */ 8701 stp->sd_capacity.value.ui64 = (uint64_t) 8702 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8703 } 8704 } 8705 8706 8707 /* 8708 * Function: sd_set_pstats 8709 * 8710 * Description: This routine instantiates and initializes the partition 8711 * stats for each partition with more than zero blocks. 8712 * (4363169) 8713 * 8714 * Arguments: un - driver soft state (unit) structure 8715 * 8716 * Context: Kernel thread context 8717 */ 8718 8719 static void 8720 sd_set_pstats(struct sd_lun *un) 8721 { 8722 char kstatname[KSTAT_STRLEN]; 8723 int instance; 8724 int i; 8725 diskaddr_t nblks = 0; 8726 char *partname = NULL; 8727 8728 ASSERT(un != NULL); 8729 8730 instance = ddi_get_instance(SD_DEVINFO(un)); 8731 8732 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8733 for (i = 0; i < NSDMAP; i++) { 8734 8735 if (cmlb_partinfo(un->un_cmlbhandle, i, 8736 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8737 continue; 8738 mutex_enter(SD_MUTEX(un)); 8739 8740 if ((un->un_pstats[i] == NULL) && 8741 (nblks != 0)) { 8742 8743 (void) snprintf(kstatname, sizeof (kstatname), 8744 "%s%d,%s", sd_label, instance, 8745 partname); 8746 8747 un->un_pstats[i] = kstat_create(sd_label, 8748 instance, kstatname, "partition", KSTAT_TYPE_IO, 8749 1, KSTAT_FLAG_PERSISTENT); 8750 if (un->un_pstats[i] != NULL) { 8751 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8752 kstat_install(un->un_pstats[i]); 8753 } 8754 } 8755 mutex_exit(SD_MUTEX(un)); 8756 } 8757 } 8758 8759 8760 #if (defined(__fibre)) 8761 /* 8762 * Function: sd_init_event_callbacks 8763 * 8764 * Description: This routine initializes the insertion and removal event 8765 * callbacks. (fibre only) 8766 * 8767 * Arguments: un - driver soft state (unit) structure 8768 * 8769 * Context: Kernel thread context 8770 */ 8771 8772 static void 8773 sd_init_event_callbacks(struct sd_lun *un) 8774 { 8775 ASSERT(un != NULL); 8776 8777 if ((un->un_insert_event == NULL) && 8778 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8779 &un->un_insert_event) == DDI_SUCCESS)) { 8780 /* 8781 * Add the callback for an insertion event 8782 */ 8783 (void) ddi_add_event_handler(SD_DEVINFO(un), 8784 un->un_insert_event, sd_event_callback, (void *)un, 8785 &(un->un_insert_cb_id)); 8786 } 8787 8788 if ((un->un_remove_event == NULL) && 8789 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8790 &un->un_remove_event) == DDI_SUCCESS)) { 8791 /* 8792 * Add the callback for a removal event 8793 */ 8794 (void) ddi_add_event_handler(SD_DEVINFO(un), 8795 un->un_remove_event, sd_event_callback, (void *)un, 8796 &(un->un_remove_cb_id)); 8797 } 8798 } 8799 8800 8801 /* 8802 * Function: sd_event_callback 8803 * 8804 * Description: This routine handles insert/remove events (photon). The 8805 * state is changed to OFFLINE which can be used to supress 8806 * error msgs. (fibre only) 8807 * 8808 * Arguments: un - driver soft state (unit) structure 8809 * 8810 * Context: Callout thread context 8811 */ 8812 /* ARGSUSED */ 8813 static void 8814 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8815 void *bus_impldata) 8816 { 8817 struct sd_lun *un = (struct sd_lun *)arg; 8818 8819 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8820 if (event == un->un_insert_event) { 8821 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8822 mutex_enter(SD_MUTEX(un)); 8823 if (un->un_state == SD_STATE_OFFLINE) { 8824 if (un->un_last_state != SD_STATE_SUSPENDED) { 8825 un->un_state = un->un_last_state; 8826 } else { 8827 /* 8828 * We have gone through SUSPEND/RESUME while 8829 * we were offline. Restore the last state 8830 */ 8831 un->un_state = un->un_save_state; 8832 } 8833 } 8834 mutex_exit(SD_MUTEX(un)); 8835 8836 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8837 } else if (event == un->un_remove_event) { 8838 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8839 mutex_enter(SD_MUTEX(un)); 8840 /* 8841 * We need to handle an event callback that occurs during 8842 * the suspend operation, since we don't prevent it. 8843 */ 8844 if (un->un_state != SD_STATE_OFFLINE) { 8845 if (un->un_state != SD_STATE_SUSPENDED) { 8846 New_state(un, SD_STATE_OFFLINE); 8847 } else { 8848 un->un_last_state = SD_STATE_OFFLINE; 8849 } 8850 } 8851 mutex_exit(SD_MUTEX(un)); 8852 } else { 8853 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8854 "!Unknown event\n"); 8855 } 8856 8857 } 8858 #endif 8859 8860 /* 8861 * Function: sd_cache_control() 8862 * 8863 * Description: This routine is the driver entry point for setting 8864 * read and write caching by modifying the WCE (write cache 8865 * enable) and RCD (read cache disable) bits of mode 8866 * page 8 (MODEPAGE_CACHING). 8867 * 8868 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8869 * structure for this target. 8870 * rcd_flag - flag for controlling the read cache 8871 * wce_flag - flag for controlling the write cache 8872 * 8873 * Return Code: EIO 8874 * code returned by sd_send_scsi_MODE_SENSE and 8875 * sd_send_scsi_MODE_SELECT 8876 * 8877 * Context: Kernel Thread 8878 */ 8879 8880 static int 8881 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8882 { 8883 struct mode_caching *mode_caching_page; 8884 uchar_t *header; 8885 size_t buflen; 8886 int hdrlen; 8887 int bd_len; 8888 int rval = 0; 8889 struct mode_header_grp2 *mhp; 8890 struct sd_lun *un; 8891 int status; 8892 8893 ASSERT(ssc != NULL); 8894 un = ssc->ssc_un; 8895 ASSERT(un != NULL); 8896 8897 /* 8898 * Do a test unit ready, otherwise a mode sense may not work if this 8899 * is the first command sent to the device after boot. 8900 */ 8901 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8902 if (status != 0) 8903 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8904 8905 if (un->un_f_cfg_is_atapi == TRUE) { 8906 hdrlen = MODE_HEADER_LENGTH_GRP2; 8907 } else { 8908 hdrlen = MODE_HEADER_LENGTH; 8909 } 8910 8911 /* 8912 * Allocate memory for the retrieved mode page and its headers. Set 8913 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8914 * we get all of the mode sense data otherwise, the mode select 8915 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8916 */ 8917 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8918 sizeof (struct mode_cache_scsi3); 8919 8920 header = kmem_zalloc(buflen, KM_SLEEP); 8921 8922 /* Get the information from the device. */ 8923 if (un->un_f_cfg_is_atapi == TRUE) { 8924 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 8925 MODEPAGE_CACHING, SD_PATH_DIRECT); 8926 } else { 8927 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 8928 MODEPAGE_CACHING, SD_PATH_DIRECT); 8929 } 8930 8931 if (rval != 0) { 8932 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8933 "sd_cache_control: Mode Sense Failed\n"); 8934 goto mode_sense_failed; 8935 } 8936 8937 /* 8938 * Determine size of Block Descriptors in order to locate 8939 * the mode page data. ATAPI devices return 0, SCSI devices 8940 * should return MODE_BLK_DESC_LENGTH. 8941 */ 8942 if (un->un_f_cfg_is_atapi == TRUE) { 8943 mhp = (struct mode_header_grp2 *)header; 8944 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8945 } else { 8946 bd_len = ((struct mode_header *)header)->bdesc_length; 8947 } 8948 8949 if (bd_len > MODE_BLK_DESC_LENGTH) { 8950 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8951 "sd_cache_control: Mode Sense returned invalid " 8952 "block descriptor length\n"); 8953 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8954 "sd_cache_control: Mode Sense returned invalid " 8955 "block descriptor length"); 8956 rval = EIO; 8957 goto mode_sense_failed; 8958 } 8959 8960 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8961 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8962 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8963 " caching page code mismatch %d\n", 8964 mode_caching_page->mode_page.code); 8965 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8966 "sd_cache_control: Mode Sense caching page code " 8967 "mismatch %d", mode_caching_page->mode_page.code); 8968 rval = EIO; 8969 goto mode_sense_failed; 8970 } 8971 8972 /* Check the relevant bits on successful mode sense. */ 8973 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8974 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8975 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8976 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8977 8978 size_t sbuflen; 8979 uchar_t save_pg; 8980 8981 /* 8982 * Construct select buffer length based on the 8983 * length of the sense data returned. 8984 */ 8985 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8986 sizeof (struct mode_page) + 8987 (int)mode_caching_page->mode_page.length; 8988 8989 /* 8990 * Set the caching bits as requested. 8991 */ 8992 if (rcd_flag == SD_CACHE_ENABLE) 8993 mode_caching_page->rcd = 0; 8994 else if (rcd_flag == SD_CACHE_DISABLE) 8995 mode_caching_page->rcd = 1; 8996 8997 if (wce_flag == SD_CACHE_ENABLE) 8998 mode_caching_page->wce = 1; 8999 else if (wce_flag == SD_CACHE_DISABLE) 9000 mode_caching_page->wce = 0; 9001 9002 /* 9003 * Save the page if the mode sense says the 9004 * drive supports it. 9005 */ 9006 save_pg = mode_caching_page->mode_page.ps ? 9007 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9008 9009 /* Clear reserved bits before mode select. */ 9010 mode_caching_page->mode_page.ps = 0; 9011 9012 /* 9013 * Clear out mode header for mode select. 9014 * The rest of the retrieved page will be reused. 9015 */ 9016 bzero(header, hdrlen); 9017 9018 if (un->un_f_cfg_is_atapi == TRUE) { 9019 mhp = (struct mode_header_grp2 *)header; 9020 mhp->bdesc_length_hi = bd_len >> 8; 9021 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9022 } else { 9023 ((struct mode_header *)header)->bdesc_length = bd_len; 9024 } 9025 9026 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9027 9028 /* Issue mode select to change the cache settings */ 9029 if (un->un_f_cfg_is_atapi == TRUE) { 9030 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9031 sbuflen, save_pg, SD_PATH_DIRECT); 9032 } else { 9033 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9034 sbuflen, save_pg, SD_PATH_DIRECT); 9035 } 9036 9037 } 9038 9039 9040 mode_sense_failed: 9041 9042 kmem_free(header, buflen); 9043 9044 if (rval != 0) { 9045 if (rval == EIO) 9046 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9047 else 9048 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9049 } 9050 return (rval); 9051 } 9052 9053 9054 /* 9055 * Function: sd_get_write_cache_enabled() 9056 * 9057 * Description: This routine is the driver entry point for determining if 9058 * write caching is enabled. It examines the WCE (write cache 9059 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9060 * 9061 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9062 * structure for this target. 9063 * is_enabled - pointer to int where write cache enabled state 9064 * is returned (non-zero -> write cache enabled) 9065 * 9066 * 9067 * Return Code: EIO 9068 * code returned by sd_send_scsi_MODE_SENSE 9069 * 9070 * Context: Kernel Thread 9071 * 9072 * NOTE: If ioctl is added to disable write cache, this sequence should 9073 * be followed so that no locking is required for accesses to 9074 * un->un_f_write_cache_enabled: 9075 * do mode select to clear wce 9076 * do synchronize cache to flush cache 9077 * set un->un_f_write_cache_enabled = FALSE 9078 * 9079 * Conversely, an ioctl to enable the write cache should be done 9080 * in this order: 9081 * set un->un_f_write_cache_enabled = TRUE 9082 * do mode select to set wce 9083 */ 9084 9085 static int 9086 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9087 { 9088 struct mode_caching *mode_caching_page; 9089 uchar_t *header; 9090 size_t buflen; 9091 int hdrlen; 9092 int bd_len; 9093 int rval = 0; 9094 struct sd_lun *un; 9095 int status; 9096 9097 ASSERT(ssc != NULL); 9098 un = ssc->ssc_un; 9099 ASSERT(un != NULL); 9100 ASSERT(is_enabled != NULL); 9101 9102 /* in case of error, flag as enabled */ 9103 *is_enabled = TRUE; 9104 9105 /* 9106 * Do a test unit ready, otherwise a mode sense may not work if this 9107 * is the first command sent to the device after boot. 9108 */ 9109 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9110 9111 if (status != 0) 9112 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9113 9114 if (un->un_f_cfg_is_atapi == TRUE) { 9115 hdrlen = MODE_HEADER_LENGTH_GRP2; 9116 } else { 9117 hdrlen = MODE_HEADER_LENGTH; 9118 } 9119 9120 /* 9121 * Allocate memory for the retrieved mode page and its headers. Set 9122 * a pointer to the page itself. 9123 */ 9124 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9125 header = kmem_zalloc(buflen, KM_SLEEP); 9126 9127 /* Get the information from the device. */ 9128 if (un->un_f_cfg_is_atapi == TRUE) { 9129 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9130 MODEPAGE_CACHING, SD_PATH_DIRECT); 9131 } else { 9132 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9133 MODEPAGE_CACHING, SD_PATH_DIRECT); 9134 } 9135 9136 if (rval != 0) { 9137 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9138 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9139 goto mode_sense_failed; 9140 } 9141 9142 /* 9143 * Determine size of Block Descriptors in order to locate 9144 * the mode page data. ATAPI devices return 0, SCSI devices 9145 * should return MODE_BLK_DESC_LENGTH. 9146 */ 9147 if (un->un_f_cfg_is_atapi == TRUE) { 9148 struct mode_header_grp2 *mhp; 9149 mhp = (struct mode_header_grp2 *)header; 9150 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9151 } else { 9152 bd_len = ((struct mode_header *)header)->bdesc_length; 9153 } 9154 9155 if (bd_len > MODE_BLK_DESC_LENGTH) { 9156 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9157 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9158 "block descriptor length\n"); 9159 /* FMA should make upset complain here */ 9160 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9161 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9162 "block descriptor length %d", bd_len); 9163 rval = EIO; 9164 goto mode_sense_failed; 9165 } 9166 9167 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9168 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9169 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 9170 " caching page code mismatch %d\n", 9171 mode_caching_page->mode_page.code); 9172 /* FMA could make upset complain here */ 9173 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9174 "sd_cache_control: Mode Sense caching page code " 9175 "mismatch %d", mode_caching_page->mode_page.code); 9176 rval = EIO; 9177 goto mode_sense_failed; 9178 } 9179 *is_enabled = mode_caching_page->wce; 9180 9181 mode_sense_failed: 9182 if (rval == 0) { 9183 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9184 } else if (rval == EIO) { 9185 /* 9186 * Some disks do not support mode sense(6), we 9187 * should ignore this kind of error(sense key is 9188 * 0x5 - illegal request). 9189 */ 9190 uint8_t *sensep; 9191 int senlen; 9192 9193 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9194 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9195 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9196 9197 if (senlen > 0 && 9198 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9199 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9200 } else { 9201 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9202 } 9203 } else { 9204 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9205 } 9206 kmem_free(header, buflen); 9207 return (rval); 9208 } 9209 9210 /* 9211 * Function: sd_get_nv_sup() 9212 * 9213 * Description: This routine is the driver entry point for 9214 * determining whether non-volatile cache is supported. This 9215 * determination process works as follows: 9216 * 9217 * 1. sd first queries sd.conf on whether 9218 * suppress_cache_flush bit is set for this device. 9219 * 9220 * 2. if not there, then queries the internal disk table. 9221 * 9222 * 3. if either sd.conf or internal disk table specifies 9223 * cache flush be suppressed, we don't bother checking 9224 * NV_SUP bit. 9225 * 9226 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9227 * the optional INQUIRY VPD page 0x86. If the device 9228 * supports VPD page 0x86, sd examines the NV_SUP 9229 * (non-volatile cache support) bit in the INQUIRY VPD page 9230 * 0x86: 9231 * o If NV_SUP bit is set, sd assumes the device has a 9232 * non-volatile cache and set the 9233 * un_f_sync_nv_supported to TRUE. 9234 * o Otherwise cache is not non-volatile, 9235 * un_f_sync_nv_supported is set to FALSE. 9236 * 9237 * Arguments: un - driver soft state (unit) structure 9238 * 9239 * Return Code: 9240 * 9241 * Context: Kernel Thread 9242 */ 9243 9244 static void 9245 sd_get_nv_sup(sd_ssc_t *ssc) 9246 { 9247 int rval = 0; 9248 uchar_t *inq86 = NULL; 9249 size_t inq86_len = MAX_INQUIRY_SIZE; 9250 size_t inq86_resid = 0; 9251 struct dk_callback *dkc; 9252 struct sd_lun *un; 9253 9254 ASSERT(ssc != NULL); 9255 un = ssc->ssc_un; 9256 ASSERT(un != NULL); 9257 9258 mutex_enter(SD_MUTEX(un)); 9259 9260 /* 9261 * Be conservative on the device's support of 9262 * SYNC_NV bit: un_f_sync_nv_supported is 9263 * initialized to be false. 9264 */ 9265 un->un_f_sync_nv_supported = FALSE; 9266 9267 /* 9268 * If either sd.conf or internal disk table 9269 * specifies cache flush be suppressed, then 9270 * we don't bother checking NV_SUP bit. 9271 */ 9272 if (un->un_f_suppress_cache_flush == TRUE) { 9273 mutex_exit(SD_MUTEX(un)); 9274 return; 9275 } 9276 9277 if (sd_check_vpd_page_support(ssc) == 0 && 9278 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9279 mutex_exit(SD_MUTEX(un)); 9280 /* collect page 86 data if available */ 9281 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9282 9283 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9284 0x01, 0x86, &inq86_resid); 9285 9286 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9287 SD_TRACE(SD_LOG_COMMON, un, 9288 "sd_get_nv_sup: \ 9289 successfully get VPD page: %x \ 9290 PAGE LENGTH: %x BYTE 6: %x\n", 9291 inq86[1], inq86[3], inq86[6]); 9292 9293 mutex_enter(SD_MUTEX(un)); 9294 /* 9295 * check the value of NV_SUP bit: only if the device 9296 * reports NV_SUP bit to be 1, the 9297 * un_f_sync_nv_supported bit will be set to true. 9298 */ 9299 if (inq86[6] & SD_VPD_NV_SUP) { 9300 un->un_f_sync_nv_supported = TRUE; 9301 } 9302 mutex_exit(SD_MUTEX(un)); 9303 } else if (rval != 0) { 9304 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9305 } 9306 9307 kmem_free(inq86, inq86_len); 9308 } else { 9309 mutex_exit(SD_MUTEX(un)); 9310 } 9311 9312 /* 9313 * Send a SYNC CACHE command to check whether 9314 * SYNC_NV bit is supported. This command should have 9315 * un_f_sync_nv_supported set to correct value. 9316 */ 9317 mutex_enter(SD_MUTEX(un)); 9318 if (un->un_f_sync_nv_supported) { 9319 mutex_exit(SD_MUTEX(un)); 9320 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9321 dkc->dkc_flag = FLUSH_VOLATILE; 9322 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9323 9324 /* 9325 * Send a TEST UNIT READY command to the device. This should 9326 * clear any outstanding UNIT ATTENTION that may be present. 9327 */ 9328 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9329 if (rval != 0) 9330 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9331 9332 kmem_free(dkc, sizeof (struct dk_callback)); 9333 } else { 9334 mutex_exit(SD_MUTEX(un)); 9335 } 9336 9337 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9338 un_f_suppress_cache_flush is set to %d\n", 9339 un->un_f_suppress_cache_flush); 9340 } 9341 9342 /* 9343 * Function: sd_make_device 9344 * 9345 * Description: Utility routine to return the Solaris device number from 9346 * the data in the device's dev_info structure. 9347 * 9348 * Return Code: The Solaris device number 9349 * 9350 * Context: Any 9351 */ 9352 9353 static dev_t 9354 sd_make_device(dev_info_t *devi) 9355 { 9356 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9357 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9358 } 9359 9360 9361 /* 9362 * Function: sd_pm_entry 9363 * 9364 * Description: Called at the start of a new command to manage power 9365 * and busy status of a device. This includes determining whether 9366 * the current power state of the device is sufficient for 9367 * performing the command or whether it must be changed. 9368 * The PM framework is notified appropriately. 9369 * Only with a return status of DDI_SUCCESS will the 9370 * component be busy to the framework. 9371 * 9372 * All callers of sd_pm_entry must check the return status 9373 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9374 * of DDI_FAILURE indicates the device failed to power up. 9375 * In this case un_pm_count has been adjusted so the result 9376 * on exit is still powered down, ie. count is less than 0. 9377 * Calling sd_pm_exit with this count value hits an ASSERT. 9378 * 9379 * Return Code: DDI_SUCCESS or DDI_FAILURE 9380 * 9381 * Context: Kernel thread context. 9382 */ 9383 9384 static int 9385 sd_pm_entry(struct sd_lun *un) 9386 { 9387 int return_status = DDI_SUCCESS; 9388 9389 ASSERT(!mutex_owned(SD_MUTEX(un))); 9390 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9391 9392 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9393 9394 if (un->un_f_pm_is_enabled == FALSE) { 9395 SD_TRACE(SD_LOG_IO_PM, un, 9396 "sd_pm_entry: exiting, PM not enabled\n"); 9397 return (return_status); 9398 } 9399 9400 /* 9401 * Just increment a counter if PM is enabled. On the transition from 9402 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9403 * the count with each IO and mark the device as idle when the count 9404 * hits 0. 9405 * 9406 * If the count is less than 0 the device is powered down. If a powered 9407 * down device is successfully powered up then the count must be 9408 * incremented to reflect the power up. Note that it'll get incremented 9409 * a second time to become busy. 9410 * 9411 * Because the following has the potential to change the device state 9412 * and must release the un_pm_mutex to do so, only one thread can be 9413 * allowed through at a time. 9414 */ 9415 9416 mutex_enter(&un->un_pm_mutex); 9417 while (un->un_pm_busy == TRUE) { 9418 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9419 } 9420 un->un_pm_busy = TRUE; 9421 9422 if (un->un_pm_count < 1) { 9423 9424 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9425 9426 /* 9427 * Indicate we are now busy so the framework won't attempt to 9428 * power down the device. This call will only fail if either 9429 * we passed a bad component number or the device has no 9430 * components. Neither of these should ever happen. 9431 */ 9432 mutex_exit(&un->un_pm_mutex); 9433 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9434 ASSERT(return_status == DDI_SUCCESS); 9435 9436 mutex_enter(&un->un_pm_mutex); 9437 9438 if (un->un_pm_count < 0) { 9439 mutex_exit(&un->un_pm_mutex); 9440 9441 SD_TRACE(SD_LOG_IO_PM, un, 9442 "sd_pm_entry: power up component\n"); 9443 9444 /* 9445 * pm_raise_power will cause sdpower to be called 9446 * which brings the device power level to the 9447 * desired state, ON in this case. If successful, 9448 * un_pm_count and un_power_level will be updated 9449 * appropriately. 9450 */ 9451 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9452 SD_SPINDLE_ON); 9453 9454 mutex_enter(&un->un_pm_mutex); 9455 9456 if (return_status != DDI_SUCCESS) { 9457 /* 9458 * Power up failed. 9459 * Idle the device and adjust the count 9460 * so the result on exit is that we're 9461 * still powered down, ie. count is less than 0. 9462 */ 9463 SD_TRACE(SD_LOG_IO_PM, un, 9464 "sd_pm_entry: power up failed," 9465 " idle the component\n"); 9466 9467 (void) pm_idle_component(SD_DEVINFO(un), 0); 9468 un->un_pm_count--; 9469 } else { 9470 /* 9471 * Device is powered up, verify the 9472 * count is non-negative. 9473 * This is debug only. 9474 */ 9475 ASSERT(un->un_pm_count == 0); 9476 } 9477 } 9478 9479 if (return_status == DDI_SUCCESS) { 9480 /* 9481 * For performance, now that the device has been tagged 9482 * as busy, and it's known to be powered up, update the 9483 * chain types to use jump tables that do not include 9484 * pm. This significantly lowers the overhead and 9485 * therefore improves performance. 9486 */ 9487 9488 mutex_exit(&un->un_pm_mutex); 9489 mutex_enter(SD_MUTEX(un)); 9490 SD_TRACE(SD_LOG_IO_PM, un, 9491 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9492 un->un_uscsi_chain_type); 9493 9494 if (un->un_f_non_devbsize_supported) { 9495 un->un_buf_chain_type = 9496 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9497 } else { 9498 un->un_buf_chain_type = 9499 SD_CHAIN_INFO_DISK_NO_PM; 9500 } 9501 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9502 9503 SD_TRACE(SD_LOG_IO_PM, un, 9504 " changed uscsi_chain_type to %d\n", 9505 un->un_uscsi_chain_type); 9506 mutex_exit(SD_MUTEX(un)); 9507 mutex_enter(&un->un_pm_mutex); 9508 9509 if (un->un_pm_idle_timeid == NULL) { 9510 /* 300 ms. */ 9511 un->un_pm_idle_timeid = 9512 timeout(sd_pm_idletimeout_handler, un, 9513 (drv_usectohz((clock_t)300000))); 9514 /* 9515 * Include an extra call to busy which keeps the 9516 * device busy with-respect-to the PM layer 9517 * until the timer fires, at which time it'll 9518 * get the extra idle call. 9519 */ 9520 (void) pm_busy_component(SD_DEVINFO(un), 0); 9521 } 9522 } 9523 } 9524 un->un_pm_busy = FALSE; 9525 /* Next... */ 9526 cv_signal(&un->un_pm_busy_cv); 9527 9528 un->un_pm_count++; 9529 9530 SD_TRACE(SD_LOG_IO_PM, un, 9531 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9532 9533 mutex_exit(&un->un_pm_mutex); 9534 9535 return (return_status); 9536 } 9537 9538 9539 /* 9540 * Function: sd_pm_exit 9541 * 9542 * Description: Called at the completion of a command to manage busy 9543 * status for the device. If the device becomes idle the 9544 * PM framework is notified. 9545 * 9546 * Context: Kernel thread context 9547 */ 9548 9549 static void 9550 sd_pm_exit(struct sd_lun *un) 9551 { 9552 ASSERT(!mutex_owned(SD_MUTEX(un))); 9553 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9554 9555 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9556 9557 /* 9558 * After attach the following flag is only read, so don't 9559 * take the penalty of acquiring a mutex for it. 9560 */ 9561 if (un->un_f_pm_is_enabled == TRUE) { 9562 9563 mutex_enter(&un->un_pm_mutex); 9564 un->un_pm_count--; 9565 9566 SD_TRACE(SD_LOG_IO_PM, un, 9567 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9568 9569 ASSERT(un->un_pm_count >= 0); 9570 if (un->un_pm_count == 0) { 9571 mutex_exit(&un->un_pm_mutex); 9572 9573 SD_TRACE(SD_LOG_IO_PM, un, 9574 "sd_pm_exit: idle component\n"); 9575 9576 (void) pm_idle_component(SD_DEVINFO(un), 0); 9577 9578 } else { 9579 mutex_exit(&un->un_pm_mutex); 9580 } 9581 } 9582 9583 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9584 } 9585 9586 9587 /* 9588 * Function: sdopen 9589 * 9590 * Description: Driver's open(9e) entry point function. 9591 * 9592 * Arguments: dev_i - pointer to device number 9593 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9594 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9595 * cred_p - user credential pointer 9596 * 9597 * Return Code: EINVAL 9598 * ENXIO 9599 * EIO 9600 * EROFS 9601 * EBUSY 9602 * 9603 * Context: Kernel thread context 9604 */ 9605 /* ARGSUSED */ 9606 static int 9607 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9608 { 9609 struct sd_lun *un; 9610 int nodelay; 9611 int part; 9612 uint64_t partmask; 9613 int instance; 9614 dev_t dev; 9615 int rval = EIO; 9616 diskaddr_t nblks = 0; 9617 diskaddr_t label_cap; 9618 9619 /* Validate the open type */ 9620 if (otyp >= OTYPCNT) { 9621 return (EINVAL); 9622 } 9623 9624 dev = *dev_p; 9625 instance = SDUNIT(dev); 9626 mutex_enter(&sd_detach_mutex); 9627 9628 /* 9629 * Fail the open if there is no softstate for the instance, or 9630 * if another thread somewhere is trying to detach the instance. 9631 */ 9632 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9633 (un->un_detach_count != 0)) { 9634 mutex_exit(&sd_detach_mutex); 9635 /* 9636 * The probe cache only needs to be cleared when open (9e) fails 9637 * with ENXIO (4238046). 9638 */ 9639 /* 9640 * un-conditionally clearing probe cache is ok with 9641 * separate sd/ssd binaries 9642 * x86 platform can be an issue with both parallel 9643 * and fibre in 1 binary 9644 */ 9645 sd_scsi_clear_probe_cache(); 9646 return (ENXIO); 9647 } 9648 9649 /* 9650 * The un_layer_count is to prevent another thread in specfs from 9651 * trying to detach the instance, which can happen when we are 9652 * called from a higher-layer driver instead of thru specfs. 9653 * This will not be needed when DDI provides a layered driver 9654 * interface that allows specfs to know that an instance is in 9655 * use by a layered driver & should not be detached. 9656 * 9657 * Note: the semantics for layered driver opens are exactly one 9658 * close for every open. 9659 */ 9660 if (otyp == OTYP_LYR) { 9661 un->un_layer_count++; 9662 } 9663 9664 /* 9665 * Keep a count of the current # of opens in progress. This is because 9666 * some layered drivers try to call us as a regular open. This can 9667 * cause problems that we cannot prevent, however by keeping this count 9668 * we can at least keep our open and detach routines from racing against 9669 * each other under such conditions. 9670 */ 9671 un->un_opens_in_progress++; 9672 mutex_exit(&sd_detach_mutex); 9673 9674 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9675 part = SDPART(dev); 9676 partmask = 1 << part; 9677 9678 /* 9679 * We use a semaphore here in order to serialize 9680 * open and close requests on the device. 9681 */ 9682 sema_p(&un->un_semoclose); 9683 9684 mutex_enter(SD_MUTEX(un)); 9685 9686 /* 9687 * All device accesses go thru sdstrategy() where we check 9688 * on suspend status but there could be a scsi_poll command, 9689 * which bypasses sdstrategy(), so we need to check pm 9690 * status. 9691 */ 9692 9693 if (!nodelay) { 9694 while ((un->un_state == SD_STATE_SUSPENDED) || 9695 (un->un_state == SD_STATE_PM_CHANGING)) { 9696 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9697 } 9698 9699 mutex_exit(SD_MUTEX(un)); 9700 if (sd_pm_entry(un) != DDI_SUCCESS) { 9701 rval = EIO; 9702 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9703 "sdopen: sd_pm_entry failed\n"); 9704 goto open_failed_with_pm; 9705 } 9706 mutex_enter(SD_MUTEX(un)); 9707 } 9708 9709 /* check for previous exclusive open */ 9710 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9711 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9712 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9713 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9714 9715 if (un->un_exclopen & (partmask)) { 9716 goto excl_open_fail; 9717 } 9718 9719 if (flag & FEXCL) { 9720 int i; 9721 if (un->un_ocmap.lyropen[part]) { 9722 goto excl_open_fail; 9723 } 9724 for (i = 0; i < (OTYPCNT - 1); i++) { 9725 if (un->un_ocmap.regopen[i] & (partmask)) { 9726 goto excl_open_fail; 9727 } 9728 } 9729 } 9730 9731 /* 9732 * Check the write permission if this is a removable media device, 9733 * NDELAY has not been set, and writable permission is requested. 9734 * 9735 * Note: If NDELAY was set and this is write-protected media the WRITE 9736 * attempt will fail with EIO as part of the I/O processing. This is a 9737 * more permissive implementation that allows the open to succeed and 9738 * WRITE attempts to fail when appropriate. 9739 */ 9740 if (un->un_f_chk_wp_open) { 9741 if ((flag & FWRITE) && (!nodelay)) { 9742 mutex_exit(SD_MUTEX(un)); 9743 /* 9744 * Defer the check for write permission on writable 9745 * DVD drive till sdstrategy and will not fail open even 9746 * if FWRITE is set as the device can be writable 9747 * depending upon the media and the media can change 9748 * after the call to open(). 9749 */ 9750 if (un->un_f_dvdram_writable_device == FALSE) { 9751 if (ISCD(un) || sr_check_wp(dev)) { 9752 rval = EROFS; 9753 mutex_enter(SD_MUTEX(un)); 9754 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9755 "write to cd or write protected media\n"); 9756 goto open_fail; 9757 } 9758 } 9759 mutex_enter(SD_MUTEX(un)); 9760 } 9761 } 9762 9763 /* 9764 * If opening in NDELAY/NONBLOCK mode, just return. 9765 * Check if disk is ready and has a valid geometry later. 9766 */ 9767 if (!nodelay) { 9768 sd_ssc_t *ssc; 9769 9770 mutex_exit(SD_MUTEX(un)); 9771 ssc = sd_ssc_init(un); 9772 rval = sd_ready_and_valid(ssc, part); 9773 sd_ssc_fini(ssc); 9774 mutex_enter(SD_MUTEX(un)); 9775 /* 9776 * Fail if device is not ready or if the number of disk 9777 * blocks is zero or negative for non CD devices. 9778 */ 9779 9780 nblks = 0; 9781 9782 if (rval == SD_READY_VALID && (!ISCD(un))) { 9783 /* if cmlb_partinfo fails, nblks remains 0 */ 9784 mutex_exit(SD_MUTEX(un)); 9785 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9786 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9787 mutex_enter(SD_MUTEX(un)); 9788 } 9789 9790 if ((rval != SD_READY_VALID) || 9791 (!ISCD(un) && nblks <= 0)) { 9792 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9793 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9794 "device not ready or invalid disk block value\n"); 9795 goto open_fail; 9796 } 9797 #if defined(__i386) || defined(__amd64) 9798 } else { 9799 uchar_t *cp; 9800 /* 9801 * x86 requires special nodelay handling, so that p0 is 9802 * always defined and accessible. 9803 * Invalidate geometry only if device is not already open. 9804 */ 9805 cp = &un->un_ocmap.chkd[0]; 9806 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9807 if (*cp != (uchar_t)0) { 9808 break; 9809 } 9810 cp++; 9811 } 9812 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9813 mutex_exit(SD_MUTEX(un)); 9814 cmlb_invalidate(un->un_cmlbhandle, 9815 (void *)SD_PATH_DIRECT); 9816 mutex_enter(SD_MUTEX(un)); 9817 } 9818 9819 #endif 9820 } 9821 9822 if (otyp == OTYP_LYR) { 9823 un->un_ocmap.lyropen[part]++; 9824 } else { 9825 un->un_ocmap.regopen[otyp] |= partmask; 9826 } 9827 9828 /* Set up open and exclusive open flags */ 9829 if (flag & FEXCL) { 9830 un->un_exclopen |= (partmask); 9831 } 9832 9833 /* 9834 * If the lun is EFI labeled and lun capacity is greater than the 9835 * capacity contained in the label, log a sys-event to notify the 9836 * interested module. 9837 * To avoid an infinite loop of logging sys-event, we only log the 9838 * event when the lun is not opened in NDELAY mode. The event handler 9839 * should open the lun in NDELAY mode. 9840 */ 9841 if (!(flag & FNDELAY)) { 9842 mutex_exit(SD_MUTEX(un)); 9843 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9844 (void*)SD_PATH_DIRECT) == 0) { 9845 mutex_enter(SD_MUTEX(un)); 9846 if (un->un_f_blockcount_is_valid && 9847 un->un_blockcount > label_cap) { 9848 mutex_exit(SD_MUTEX(un)); 9849 sd_log_lun_expansion_event(un, 9850 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9851 mutex_enter(SD_MUTEX(un)); 9852 } 9853 } else { 9854 mutex_enter(SD_MUTEX(un)); 9855 } 9856 } 9857 9858 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9859 "open of part %d type %d\n", part, otyp); 9860 9861 mutex_exit(SD_MUTEX(un)); 9862 if (!nodelay) { 9863 sd_pm_exit(un); 9864 } 9865 9866 sema_v(&un->un_semoclose); 9867 9868 mutex_enter(&sd_detach_mutex); 9869 un->un_opens_in_progress--; 9870 mutex_exit(&sd_detach_mutex); 9871 9872 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9873 return (DDI_SUCCESS); 9874 9875 excl_open_fail: 9876 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9877 rval = EBUSY; 9878 9879 open_fail: 9880 mutex_exit(SD_MUTEX(un)); 9881 9882 /* 9883 * On a failed open we must exit the pm management. 9884 */ 9885 if (!nodelay) { 9886 sd_pm_exit(un); 9887 } 9888 open_failed_with_pm: 9889 sema_v(&un->un_semoclose); 9890 9891 mutex_enter(&sd_detach_mutex); 9892 un->un_opens_in_progress--; 9893 if (otyp == OTYP_LYR) { 9894 un->un_layer_count--; 9895 } 9896 mutex_exit(&sd_detach_mutex); 9897 9898 return (rval); 9899 } 9900 9901 9902 /* 9903 * Function: sdclose 9904 * 9905 * Description: Driver's close(9e) entry point function. 9906 * 9907 * Arguments: dev - device number 9908 * flag - file status flag, informational only 9909 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9910 * cred_p - user credential pointer 9911 * 9912 * Return Code: ENXIO 9913 * 9914 * Context: Kernel thread context 9915 */ 9916 /* ARGSUSED */ 9917 static int 9918 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9919 { 9920 struct sd_lun *un; 9921 uchar_t *cp; 9922 int part; 9923 int nodelay; 9924 int rval = 0; 9925 9926 /* Validate the open type */ 9927 if (otyp >= OTYPCNT) { 9928 return (ENXIO); 9929 } 9930 9931 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9932 return (ENXIO); 9933 } 9934 9935 part = SDPART(dev); 9936 nodelay = flag & (FNDELAY | FNONBLOCK); 9937 9938 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9939 "sdclose: close of part %d type %d\n", part, otyp); 9940 9941 /* 9942 * We use a semaphore here in order to serialize 9943 * open and close requests on the device. 9944 */ 9945 sema_p(&un->un_semoclose); 9946 9947 mutex_enter(SD_MUTEX(un)); 9948 9949 /* Don't proceed if power is being changed. */ 9950 while (un->un_state == SD_STATE_PM_CHANGING) { 9951 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9952 } 9953 9954 if (un->un_exclopen & (1 << part)) { 9955 un->un_exclopen &= ~(1 << part); 9956 } 9957 9958 /* Update the open partition map */ 9959 if (otyp == OTYP_LYR) { 9960 un->un_ocmap.lyropen[part] -= 1; 9961 } else { 9962 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9963 } 9964 9965 cp = &un->un_ocmap.chkd[0]; 9966 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9967 if (*cp != NULL) { 9968 break; 9969 } 9970 cp++; 9971 } 9972 9973 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9974 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9975 9976 /* 9977 * We avoid persistance upon the last close, and set 9978 * the throttle back to the maximum. 9979 */ 9980 un->un_throttle = un->un_saved_throttle; 9981 9982 if (un->un_state == SD_STATE_OFFLINE) { 9983 if (un->un_f_is_fibre == FALSE) { 9984 scsi_log(SD_DEVINFO(un), sd_label, 9985 CE_WARN, "offline\n"); 9986 } 9987 mutex_exit(SD_MUTEX(un)); 9988 cmlb_invalidate(un->un_cmlbhandle, 9989 (void *)SD_PATH_DIRECT); 9990 mutex_enter(SD_MUTEX(un)); 9991 9992 } else { 9993 /* 9994 * Flush any outstanding writes in NVRAM cache. 9995 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9996 * cmd, it may not work for non-Pluto devices. 9997 * SYNCHRONIZE CACHE is not required for removables, 9998 * except DVD-RAM drives. 9999 * 10000 * Also note: because SYNCHRONIZE CACHE is currently 10001 * the only command issued here that requires the 10002 * drive be powered up, only do the power up before 10003 * sending the Sync Cache command. If additional 10004 * commands are added which require a powered up 10005 * drive, the following sequence may have to change. 10006 * 10007 * And finally, note that parallel SCSI on SPARC 10008 * only issues a Sync Cache to DVD-RAM, a newly 10009 * supported device. 10010 */ 10011 #if defined(__i386) || defined(__amd64) 10012 if ((un->un_f_sync_cache_supported && 10013 un->un_f_sync_cache_required) || 10014 un->un_f_dvdram_writable_device == TRUE) { 10015 #else 10016 if (un->un_f_dvdram_writable_device == TRUE) { 10017 #endif 10018 mutex_exit(SD_MUTEX(un)); 10019 if (sd_pm_entry(un) == DDI_SUCCESS) { 10020 rval = 10021 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10022 NULL); 10023 /* ignore error if not supported */ 10024 if (rval == ENOTSUP) { 10025 rval = 0; 10026 } else if (rval != 0) { 10027 rval = EIO; 10028 } 10029 sd_pm_exit(un); 10030 } else { 10031 rval = EIO; 10032 } 10033 mutex_enter(SD_MUTEX(un)); 10034 } 10035 10036 /* 10037 * For devices which supports DOOR_LOCK, send an ALLOW 10038 * MEDIA REMOVAL command, but don't get upset if it 10039 * fails. We need to raise the power of the drive before 10040 * we can call sd_send_scsi_DOORLOCK() 10041 */ 10042 if (un->un_f_doorlock_supported) { 10043 mutex_exit(SD_MUTEX(un)); 10044 if (sd_pm_entry(un) == DDI_SUCCESS) { 10045 sd_ssc_t *ssc; 10046 10047 ssc = sd_ssc_init(un); 10048 rval = sd_send_scsi_DOORLOCK(ssc, 10049 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10050 if (rval != 0) 10051 sd_ssc_assessment(ssc, 10052 SD_FMT_IGNORE); 10053 sd_ssc_fini(ssc); 10054 10055 sd_pm_exit(un); 10056 if (ISCD(un) && (rval != 0) && 10057 (nodelay != 0)) { 10058 rval = ENXIO; 10059 } 10060 } else { 10061 rval = EIO; 10062 } 10063 mutex_enter(SD_MUTEX(un)); 10064 } 10065 10066 /* 10067 * If a device has removable media, invalidate all 10068 * parameters related to media, such as geometry, 10069 * blocksize, and blockcount. 10070 */ 10071 if (un->un_f_has_removable_media) { 10072 sr_ejected(un); 10073 } 10074 10075 /* 10076 * Destroy the cache (if it exists) which was 10077 * allocated for the write maps since this is 10078 * the last close for this media. 10079 */ 10080 if (un->un_wm_cache) { 10081 /* 10082 * Check if there are pending commands. 10083 * and if there are give a warning and 10084 * do not destroy the cache. 10085 */ 10086 if (un->un_ncmds_in_driver > 0) { 10087 scsi_log(SD_DEVINFO(un), 10088 sd_label, CE_WARN, 10089 "Unable to clean up memory " 10090 "because of pending I/O\n"); 10091 } else { 10092 kmem_cache_destroy( 10093 un->un_wm_cache); 10094 un->un_wm_cache = NULL; 10095 } 10096 } 10097 } 10098 } 10099 10100 mutex_exit(SD_MUTEX(un)); 10101 sema_v(&un->un_semoclose); 10102 10103 if (otyp == OTYP_LYR) { 10104 mutex_enter(&sd_detach_mutex); 10105 /* 10106 * The detach routine may run when the layer count 10107 * drops to zero. 10108 */ 10109 un->un_layer_count--; 10110 mutex_exit(&sd_detach_mutex); 10111 } 10112 10113 return (rval); 10114 } 10115 10116 10117 /* 10118 * Function: sd_ready_and_valid 10119 * 10120 * Description: Test if device is ready and has a valid geometry. 10121 * 10122 * Arguments: ssc - sd_ssc_t will contain un 10123 * un - driver soft state (unit) structure 10124 * 10125 * Return Code: SD_READY_VALID ready and valid label 10126 * SD_NOT_READY_VALID not ready, no label 10127 * SD_RESERVED_BY_OTHERS reservation conflict 10128 * 10129 * Context: Never called at interrupt context. 10130 */ 10131 10132 static int 10133 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10134 { 10135 struct sd_errstats *stp; 10136 uint64_t capacity; 10137 uint_t lbasize; 10138 int rval = SD_READY_VALID; 10139 char name_str[48]; 10140 int is_valid; 10141 struct sd_lun *un; 10142 int status; 10143 10144 ASSERT(ssc != NULL); 10145 un = ssc->ssc_un; 10146 ASSERT(un != NULL); 10147 ASSERT(!mutex_owned(SD_MUTEX(un))); 10148 10149 mutex_enter(SD_MUTEX(un)); 10150 /* 10151 * If a device has removable media, we must check if media is 10152 * ready when checking if this device is ready and valid. 10153 */ 10154 if (un->un_f_has_removable_media) { 10155 mutex_exit(SD_MUTEX(un)); 10156 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10157 10158 if (status != 0) { 10159 rval = SD_NOT_READY_VALID; 10160 mutex_enter(SD_MUTEX(un)); 10161 10162 /* Ignore all failed status for removalbe media */ 10163 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10164 10165 goto done; 10166 } 10167 10168 is_valid = SD_IS_VALID_LABEL(un); 10169 mutex_enter(SD_MUTEX(un)); 10170 if (!is_valid || 10171 (un->un_f_blockcount_is_valid == FALSE) || 10172 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10173 10174 /* capacity has to be read every open. */ 10175 mutex_exit(SD_MUTEX(un)); 10176 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10177 &lbasize, SD_PATH_DIRECT); 10178 10179 if (status != 0) { 10180 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10181 10182 cmlb_invalidate(un->un_cmlbhandle, 10183 (void *)SD_PATH_DIRECT); 10184 mutex_enter(SD_MUTEX(un)); 10185 rval = SD_NOT_READY_VALID; 10186 10187 goto done; 10188 } else { 10189 mutex_enter(SD_MUTEX(un)); 10190 sd_update_block_info(un, lbasize, capacity); 10191 } 10192 } 10193 10194 /* 10195 * Check if the media in the device is writable or not. 10196 */ 10197 if (!is_valid && ISCD(un)) { 10198 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10199 } 10200 10201 } else { 10202 /* 10203 * Do a test unit ready to clear any unit attention from non-cd 10204 * devices. 10205 */ 10206 mutex_exit(SD_MUTEX(un)); 10207 10208 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10209 if (status != 0) { 10210 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10211 } 10212 10213 mutex_enter(SD_MUTEX(un)); 10214 } 10215 10216 10217 /* 10218 * If this is a non 512 block device, allocate space for 10219 * the wmap cache. This is being done here since every time 10220 * a media is changed this routine will be called and the 10221 * block size is a function of media rather than device. 10222 */ 10223 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10224 if (!(un->un_wm_cache)) { 10225 (void) snprintf(name_str, sizeof (name_str), 10226 "%s%d_cache", 10227 ddi_driver_name(SD_DEVINFO(un)), 10228 ddi_get_instance(SD_DEVINFO(un))); 10229 un->un_wm_cache = kmem_cache_create( 10230 name_str, sizeof (struct sd_w_map), 10231 8, sd_wm_cache_constructor, 10232 sd_wm_cache_destructor, NULL, 10233 (void *)un, NULL, 0); 10234 if (!(un->un_wm_cache)) { 10235 rval = ENOMEM; 10236 goto done; 10237 } 10238 } 10239 } 10240 10241 if (un->un_state == SD_STATE_NORMAL) { 10242 /* 10243 * If the target is not yet ready here (defined by a TUR 10244 * failure), invalidate the geometry and print an 'offline' 10245 * message. This is a legacy message, as the state of the 10246 * target is not actually changed to SD_STATE_OFFLINE. 10247 * 10248 * If the TUR fails for EACCES (Reservation Conflict), 10249 * SD_RESERVED_BY_OTHERS will be returned to indicate 10250 * reservation conflict. If the TUR fails for other 10251 * reasons, SD_NOT_READY_VALID will be returned. 10252 */ 10253 int err; 10254 10255 mutex_exit(SD_MUTEX(un)); 10256 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10257 mutex_enter(SD_MUTEX(un)); 10258 10259 if (err != 0) { 10260 mutex_exit(SD_MUTEX(un)); 10261 cmlb_invalidate(un->un_cmlbhandle, 10262 (void *)SD_PATH_DIRECT); 10263 mutex_enter(SD_MUTEX(un)); 10264 if (err == EACCES) { 10265 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10266 "reservation conflict\n"); 10267 rval = SD_RESERVED_BY_OTHERS; 10268 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10269 } else { 10270 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10271 "drive offline\n"); 10272 rval = SD_NOT_READY_VALID; 10273 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10274 } 10275 goto done; 10276 } 10277 } 10278 10279 if (un->un_f_format_in_progress == FALSE) { 10280 mutex_exit(SD_MUTEX(un)); 10281 10282 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10283 NULL, (void *) SD_PATH_DIRECT) != 0) { 10284 rval = SD_NOT_READY_VALID; 10285 mutex_enter(SD_MUTEX(un)); 10286 10287 goto done; 10288 } 10289 if (un->un_f_pkstats_enabled) { 10290 sd_set_pstats(un); 10291 SD_TRACE(SD_LOG_IO_PARTITION, un, 10292 "sd_ready_and_valid: un:0x%p pstats created and " 10293 "set\n", un); 10294 } 10295 mutex_enter(SD_MUTEX(un)); 10296 } 10297 10298 /* 10299 * If this device supports DOOR_LOCK command, try and send 10300 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10301 * if it fails. For a CD, however, it is an error 10302 */ 10303 if (un->un_f_doorlock_supported) { 10304 mutex_exit(SD_MUTEX(un)); 10305 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10306 SD_PATH_DIRECT); 10307 10308 if ((status != 0) && ISCD(un)) { 10309 rval = SD_NOT_READY_VALID; 10310 mutex_enter(SD_MUTEX(un)); 10311 10312 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10313 10314 goto done; 10315 } else if (status != 0) 10316 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10317 mutex_enter(SD_MUTEX(un)); 10318 } 10319 10320 /* The state has changed, inform the media watch routines */ 10321 un->un_mediastate = DKIO_INSERTED; 10322 cv_broadcast(&un->un_state_cv); 10323 rval = SD_READY_VALID; 10324 10325 done: 10326 10327 /* 10328 * Initialize the capacity kstat value, if no media previously 10329 * (capacity kstat is 0) and a media has been inserted 10330 * (un_blockcount > 0). 10331 */ 10332 if (un->un_errstats != NULL) { 10333 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10334 if ((stp->sd_capacity.value.ui64 == 0) && 10335 (un->un_f_blockcount_is_valid == TRUE)) { 10336 stp->sd_capacity.value.ui64 = 10337 (uint64_t)((uint64_t)un->un_blockcount * 10338 un->un_sys_blocksize); 10339 } 10340 } 10341 10342 mutex_exit(SD_MUTEX(un)); 10343 return (rval); 10344 } 10345 10346 10347 /* 10348 * Function: sdmin 10349 * 10350 * Description: Routine to limit the size of a data transfer. Used in 10351 * conjunction with physio(9F). 10352 * 10353 * Arguments: bp - pointer to the indicated buf(9S) struct. 10354 * 10355 * Context: Kernel thread context. 10356 */ 10357 10358 static void 10359 sdmin(struct buf *bp) 10360 { 10361 struct sd_lun *un; 10362 int instance; 10363 10364 instance = SDUNIT(bp->b_edev); 10365 10366 un = ddi_get_soft_state(sd_state, instance); 10367 ASSERT(un != NULL); 10368 10369 if (bp->b_bcount > un->un_max_xfer_size) { 10370 bp->b_bcount = un->un_max_xfer_size; 10371 } 10372 } 10373 10374 10375 /* 10376 * Function: sdread 10377 * 10378 * Description: Driver's read(9e) entry point function. 10379 * 10380 * Arguments: dev - device number 10381 * uio - structure pointer describing where data is to be stored 10382 * in user's space 10383 * cred_p - user credential pointer 10384 * 10385 * Return Code: ENXIO 10386 * EIO 10387 * EINVAL 10388 * value returned by physio 10389 * 10390 * Context: Kernel thread context. 10391 */ 10392 /* ARGSUSED */ 10393 static int 10394 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10395 { 10396 struct sd_lun *un = NULL; 10397 int secmask; 10398 int err = 0; 10399 sd_ssc_t *ssc; 10400 10401 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10402 return (ENXIO); 10403 } 10404 10405 ASSERT(!mutex_owned(SD_MUTEX(un))); 10406 10407 10408 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10409 mutex_enter(SD_MUTEX(un)); 10410 /* 10411 * Because the call to sd_ready_and_valid will issue I/O we 10412 * must wait here if either the device is suspended or 10413 * if it's power level is changing. 10414 */ 10415 while ((un->un_state == SD_STATE_SUSPENDED) || 10416 (un->un_state == SD_STATE_PM_CHANGING)) { 10417 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10418 } 10419 un->un_ncmds_in_driver++; 10420 mutex_exit(SD_MUTEX(un)); 10421 10422 /* Initialize sd_ssc_t for internal uscsi commands */ 10423 ssc = sd_ssc_init(un); 10424 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10425 err = EIO; 10426 } else { 10427 err = 0; 10428 } 10429 sd_ssc_fini(ssc); 10430 10431 mutex_enter(SD_MUTEX(un)); 10432 un->un_ncmds_in_driver--; 10433 ASSERT(un->un_ncmds_in_driver >= 0); 10434 mutex_exit(SD_MUTEX(un)); 10435 if (err != 0) 10436 return (err); 10437 } 10438 10439 /* 10440 * Read requests are restricted to multiples of the system block size. 10441 */ 10442 secmask = un->un_sys_blocksize - 1; 10443 10444 if (uio->uio_loffset & ((offset_t)(secmask))) { 10445 SD_ERROR(SD_LOG_READ_WRITE, un, 10446 "sdread: file offset not modulo %d\n", 10447 un->un_sys_blocksize); 10448 err = EINVAL; 10449 } else if (uio->uio_iov->iov_len & (secmask)) { 10450 SD_ERROR(SD_LOG_READ_WRITE, un, 10451 "sdread: transfer length not modulo %d\n", 10452 un->un_sys_blocksize); 10453 err = EINVAL; 10454 } else { 10455 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10456 } 10457 10458 return (err); 10459 } 10460 10461 10462 /* 10463 * Function: sdwrite 10464 * 10465 * Description: Driver's write(9e) entry point function. 10466 * 10467 * Arguments: dev - device number 10468 * uio - structure pointer describing where data is stored in 10469 * user's space 10470 * cred_p - user credential pointer 10471 * 10472 * Return Code: ENXIO 10473 * EIO 10474 * EINVAL 10475 * value returned by physio 10476 * 10477 * Context: Kernel thread context. 10478 */ 10479 /* ARGSUSED */ 10480 static int 10481 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10482 { 10483 struct sd_lun *un = NULL; 10484 int secmask; 10485 int err = 0; 10486 sd_ssc_t *ssc; 10487 10488 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10489 return (ENXIO); 10490 } 10491 10492 ASSERT(!mutex_owned(SD_MUTEX(un))); 10493 10494 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10495 mutex_enter(SD_MUTEX(un)); 10496 /* 10497 * Because the call to sd_ready_and_valid will issue I/O we 10498 * must wait here if either the device is suspended or 10499 * if it's power level is changing. 10500 */ 10501 while ((un->un_state == SD_STATE_SUSPENDED) || 10502 (un->un_state == SD_STATE_PM_CHANGING)) { 10503 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10504 } 10505 un->un_ncmds_in_driver++; 10506 mutex_exit(SD_MUTEX(un)); 10507 10508 /* Initialize sd_ssc_t for internal uscsi commands */ 10509 ssc = sd_ssc_init(un); 10510 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10511 err = EIO; 10512 } else { 10513 err = 0; 10514 } 10515 sd_ssc_fini(ssc); 10516 10517 mutex_enter(SD_MUTEX(un)); 10518 un->un_ncmds_in_driver--; 10519 ASSERT(un->un_ncmds_in_driver >= 0); 10520 mutex_exit(SD_MUTEX(un)); 10521 if (err != 0) 10522 return (err); 10523 } 10524 10525 /* 10526 * Write requests are restricted to multiples of the system block size. 10527 */ 10528 secmask = un->un_sys_blocksize - 1; 10529 10530 if (uio->uio_loffset & ((offset_t)(secmask))) { 10531 SD_ERROR(SD_LOG_READ_WRITE, un, 10532 "sdwrite: file offset not modulo %d\n", 10533 un->un_sys_blocksize); 10534 err = EINVAL; 10535 } else if (uio->uio_iov->iov_len & (secmask)) { 10536 SD_ERROR(SD_LOG_READ_WRITE, un, 10537 "sdwrite: transfer length not modulo %d\n", 10538 un->un_sys_blocksize); 10539 err = EINVAL; 10540 } else { 10541 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10542 } 10543 10544 return (err); 10545 } 10546 10547 10548 /* 10549 * Function: sdaread 10550 * 10551 * Description: Driver's aread(9e) entry point function. 10552 * 10553 * Arguments: dev - device number 10554 * aio - structure pointer describing where data is to be stored 10555 * cred_p - user credential pointer 10556 * 10557 * Return Code: ENXIO 10558 * EIO 10559 * EINVAL 10560 * value returned by aphysio 10561 * 10562 * Context: Kernel thread context. 10563 */ 10564 /* ARGSUSED */ 10565 static int 10566 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10567 { 10568 struct sd_lun *un = NULL; 10569 struct uio *uio = aio->aio_uio; 10570 int secmask; 10571 int err = 0; 10572 sd_ssc_t *ssc; 10573 10574 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10575 return (ENXIO); 10576 } 10577 10578 ASSERT(!mutex_owned(SD_MUTEX(un))); 10579 10580 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10581 mutex_enter(SD_MUTEX(un)); 10582 /* 10583 * Because the call to sd_ready_and_valid will issue I/O we 10584 * must wait here if either the device is suspended or 10585 * if it's power level is changing. 10586 */ 10587 while ((un->un_state == SD_STATE_SUSPENDED) || 10588 (un->un_state == SD_STATE_PM_CHANGING)) { 10589 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10590 } 10591 un->un_ncmds_in_driver++; 10592 mutex_exit(SD_MUTEX(un)); 10593 10594 /* Initialize sd_ssc_t for internal uscsi commands */ 10595 ssc = sd_ssc_init(un); 10596 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10597 err = EIO; 10598 } else { 10599 err = 0; 10600 } 10601 sd_ssc_fini(ssc); 10602 10603 mutex_enter(SD_MUTEX(un)); 10604 un->un_ncmds_in_driver--; 10605 ASSERT(un->un_ncmds_in_driver >= 0); 10606 mutex_exit(SD_MUTEX(un)); 10607 if (err != 0) 10608 return (err); 10609 } 10610 10611 /* 10612 * Read requests are restricted to multiples of the system block size. 10613 */ 10614 secmask = un->un_sys_blocksize - 1; 10615 10616 if (uio->uio_loffset & ((offset_t)(secmask))) { 10617 SD_ERROR(SD_LOG_READ_WRITE, un, 10618 "sdaread: file offset not modulo %d\n", 10619 un->un_sys_blocksize); 10620 err = EINVAL; 10621 } else if (uio->uio_iov->iov_len & (secmask)) { 10622 SD_ERROR(SD_LOG_READ_WRITE, un, 10623 "sdaread: transfer length not modulo %d\n", 10624 un->un_sys_blocksize); 10625 err = EINVAL; 10626 } else { 10627 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10628 } 10629 10630 return (err); 10631 } 10632 10633 10634 /* 10635 * Function: sdawrite 10636 * 10637 * Description: Driver's awrite(9e) entry point function. 10638 * 10639 * Arguments: dev - device number 10640 * aio - structure pointer describing where data is stored 10641 * cred_p - user credential pointer 10642 * 10643 * Return Code: ENXIO 10644 * EIO 10645 * EINVAL 10646 * value returned by aphysio 10647 * 10648 * Context: Kernel thread context. 10649 */ 10650 /* ARGSUSED */ 10651 static int 10652 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10653 { 10654 struct sd_lun *un = NULL; 10655 struct uio *uio = aio->aio_uio; 10656 int secmask; 10657 int err = 0; 10658 sd_ssc_t *ssc; 10659 10660 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10661 return (ENXIO); 10662 } 10663 10664 ASSERT(!mutex_owned(SD_MUTEX(un))); 10665 10666 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10667 mutex_enter(SD_MUTEX(un)); 10668 /* 10669 * Because the call to sd_ready_and_valid will issue I/O we 10670 * must wait here if either the device is suspended or 10671 * if it's power level is changing. 10672 */ 10673 while ((un->un_state == SD_STATE_SUSPENDED) || 10674 (un->un_state == SD_STATE_PM_CHANGING)) { 10675 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10676 } 10677 un->un_ncmds_in_driver++; 10678 mutex_exit(SD_MUTEX(un)); 10679 10680 /* Initialize sd_ssc_t for internal uscsi commands */ 10681 ssc = sd_ssc_init(un); 10682 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10683 err = EIO; 10684 } else { 10685 err = 0; 10686 } 10687 sd_ssc_fini(ssc); 10688 10689 mutex_enter(SD_MUTEX(un)); 10690 un->un_ncmds_in_driver--; 10691 ASSERT(un->un_ncmds_in_driver >= 0); 10692 mutex_exit(SD_MUTEX(un)); 10693 if (err != 0) 10694 return (err); 10695 } 10696 10697 /* 10698 * Write requests are restricted to multiples of the system block size. 10699 */ 10700 secmask = un->un_sys_blocksize - 1; 10701 10702 if (uio->uio_loffset & ((offset_t)(secmask))) { 10703 SD_ERROR(SD_LOG_READ_WRITE, un, 10704 "sdawrite: file offset not modulo %d\n", 10705 un->un_sys_blocksize); 10706 err = EINVAL; 10707 } else if (uio->uio_iov->iov_len & (secmask)) { 10708 SD_ERROR(SD_LOG_READ_WRITE, un, 10709 "sdawrite: transfer length not modulo %d\n", 10710 un->un_sys_blocksize); 10711 err = EINVAL; 10712 } else { 10713 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10714 } 10715 10716 return (err); 10717 } 10718 10719 10720 10721 10722 10723 /* 10724 * Driver IO processing follows the following sequence: 10725 * 10726 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10727 * | | ^ 10728 * v v | 10729 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10730 * | | | | 10731 * v | | | 10732 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10733 * | | ^ ^ 10734 * v v | | 10735 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10736 * | | | | 10737 * +---+ | +------------+ +-------+ 10738 * | | | | 10739 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10740 * | v | | 10741 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10742 * | | ^ | 10743 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10744 * | v | | 10745 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10746 * | | ^ | 10747 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10748 * | v | | 10749 * | sd_checksum_iostart() sd_checksum_iodone() | 10750 * | | ^ | 10751 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10752 * | v | | 10753 * | sd_pm_iostart() sd_pm_iodone() | 10754 * | | ^ | 10755 * | | | | 10756 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10757 * | ^ 10758 * v | 10759 * sd_core_iostart() | 10760 * | | 10761 * | +------>(*destroypkt)() 10762 * +-> sd_start_cmds() <-+ | | 10763 * | | | v 10764 * | | | scsi_destroy_pkt(9F) 10765 * | | | 10766 * +->(*initpkt)() +- sdintr() 10767 * | | | | 10768 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10769 * | +-> scsi_setup_cdb(9F) | 10770 * | | 10771 * +--> scsi_transport(9F) | 10772 * | | 10773 * +----> SCSA ---->+ 10774 * 10775 * 10776 * This code is based upon the following presumptions: 10777 * 10778 * - iostart and iodone functions operate on buf(9S) structures. These 10779 * functions perform the necessary operations on the buf(9S) and pass 10780 * them along to the next function in the chain by using the macros 10781 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10782 * (for iodone side functions). 10783 * 10784 * - The iostart side functions may sleep. The iodone side functions 10785 * are called under interrupt context and may NOT sleep. Therefore 10786 * iodone side functions also may not call iostart side functions. 10787 * (NOTE: iostart side functions should NOT sleep for memory, as 10788 * this could result in deadlock.) 10789 * 10790 * - An iostart side function may call its corresponding iodone side 10791 * function directly (if necessary). 10792 * 10793 * - In the event of an error, an iostart side function can return a buf(9S) 10794 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10795 * b_error in the usual way of course). 10796 * 10797 * - The taskq mechanism may be used by the iodone side functions to dispatch 10798 * requests to the iostart side functions. The iostart side functions in 10799 * this case would be called under the context of a taskq thread, so it's 10800 * OK for them to block/sleep/spin in this case. 10801 * 10802 * - iostart side functions may allocate "shadow" buf(9S) structs and 10803 * pass them along to the next function in the chain. The corresponding 10804 * iodone side functions must coalesce the "shadow" bufs and return 10805 * the "original" buf to the next higher layer. 10806 * 10807 * - The b_private field of the buf(9S) struct holds a pointer to 10808 * an sd_xbuf struct, which contains information needed to 10809 * construct the scsi_pkt for the command. 10810 * 10811 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10812 * layer must acquire & release the SD_MUTEX(un) as needed. 10813 */ 10814 10815 10816 /* 10817 * Create taskq for all targets in the system. This is created at 10818 * _init(9E) and destroyed at _fini(9E). 10819 * 10820 * Note: here we set the minalloc to a reasonably high number to ensure that 10821 * we will have an adequate supply of task entries available at interrupt time. 10822 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10823 * sd_create_taskq(). Since we do not want to sleep for allocations at 10824 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10825 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10826 * requests any one instant in time. 10827 */ 10828 #define SD_TASKQ_NUMTHREADS 8 10829 #define SD_TASKQ_MINALLOC 256 10830 #define SD_TASKQ_MAXALLOC 256 10831 10832 static taskq_t *sd_tq = NULL; 10833 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10834 10835 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10836 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10837 10838 /* 10839 * The following task queue is being created for the write part of 10840 * read-modify-write of non-512 block size devices. 10841 * Limit the number of threads to 1 for now. This number has been chosen 10842 * considering the fact that it applies only to dvd ram drives/MO drives 10843 * currently. Performance for which is not main criteria at this stage. 10844 * Note: It needs to be explored if we can use a single taskq in future 10845 */ 10846 #define SD_WMR_TASKQ_NUMTHREADS 1 10847 static taskq_t *sd_wmr_tq = NULL; 10848 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10849 10850 /* 10851 * Function: sd_taskq_create 10852 * 10853 * Description: Create taskq thread(s) and preallocate task entries 10854 * 10855 * Return Code: Returns a pointer to the allocated taskq_t. 10856 * 10857 * Context: Can sleep. Requires blockable context. 10858 * 10859 * Notes: - The taskq() facility currently is NOT part of the DDI. 10860 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10861 * - taskq_create() will block for memory, also it will panic 10862 * if it cannot create the requested number of threads. 10863 * - Currently taskq_create() creates threads that cannot be 10864 * swapped. 10865 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10866 * supply of taskq entries at interrupt time (ie, so that we 10867 * do not have to sleep for memory) 10868 */ 10869 10870 static void 10871 sd_taskq_create(void) 10872 { 10873 char taskq_name[TASKQ_NAMELEN]; 10874 10875 ASSERT(sd_tq == NULL); 10876 ASSERT(sd_wmr_tq == NULL); 10877 10878 (void) snprintf(taskq_name, sizeof (taskq_name), 10879 "%s_drv_taskq", sd_label); 10880 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10881 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10882 TASKQ_PREPOPULATE)); 10883 10884 (void) snprintf(taskq_name, sizeof (taskq_name), 10885 "%s_rmw_taskq", sd_label); 10886 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10887 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10888 TASKQ_PREPOPULATE)); 10889 } 10890 10891 10892 /* 10893 * Function: sd_taskq_delete 10894 * 10895 * Description: Complementary cleanup routine for sd_taskq_create(). 10896 * 10897 * Context: Kernel thread context. 10898 */ 10899 10900 static void 10901 sd_taskq_delete(void) 10902 { 10903 ASSERT(sd_tq != NULL); 10904 ASSERT(sd_wmr_tq != NULL); 10905 taskq_destroy(sd_tq); 10906 taskq_destroy(sd_wmr_tq); 10907 sd_tq = NULL; 10908 sd_wmr_tq = NULL; 10909 } 10910 10911 10912 /* 10913 * Function: sdstrategy 10914 * 10915 * Description: Driver's strategy (9E) entry point function. 10916 * 10917 * Arguments: bp - pointer to buf(9S) 10918 * 10919 * Return Code: Always returns zero 10920 * 10921 * Context: Kernel thread context. 10922 */ 10923 10924 static int 10925 sdstrategy(struct buf *bp) 10926 { 10927 struct sd_lun *un; 10928 10929 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10930 if (un == NULL) { 10931 bioerror(bp, EIO); 10932 bp->b_resid = bp->b_bcount; 10933 biodone(bp); 10934 return (0); 10935 } 10936 /* As was done in the past, fail new cmds. if state is dumping. */ 10937 if (un->un_state == SD_STATE_DUMPING) { 10938 bioerror(bp, ENXIO); 10939 bp->b_resid = bp->b_bcount; 10940 biodone(bp); 10941 return (0); 10942 } 10943 10944 ASSERT(!mutex_owned(SD_MUTEX(un))); 10945 10946 /* 10947 * Commands may sneak in while we released the mutex in 10948 * DDI_SUSPEND, we should block new commands. However, old 10949 * commands that are still in the driver at this point should 10950 * still be allowed to drain. 10951 */ 10952 mutex_enter(SD_MUTEX(un)); 10953 /* 10954 * Must wait here if either the device is suspended or 10955 * if it's power level is changing. 10956 */ 10957 while ((un->un_state == SD_STATE_SUSPENDED) || 10958 (un->un_state == SD_STATE_PM_CHANGING)) { 10959 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10960 } 10961 10962 un->un_ncmds_in_driver++; 10963 10964 /* 10965 * atapi: Since we are running the CD for now in PIO mode we need to 10966 * call bp_mapin here to avoid bp_mapin called interrupt context under 10967 * the HBA's init_pkt routine. 10968 */ 10969 if (un->un_f_cfg_is_atapi == TRUE) { 10970 mutex_exit(SD_MUTEX(un)); 10971 bp_mapin(bp); 10972 mutex_enter(SD_MUTEX(un)); 10973 } 10974 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10975 un->un_ncmds_in_driver); 10976 10977 if (bp->b_flags & B_WRITE) 10978 un->un_f_sync_cache_required = TRUE; 10979 10980 mutex_exit(SD_MUTEX(un)); 10981 10982 /* 10983 * This will (eventually) allocate the sd_xbuf area and 10984 * call sd_xbuf_strategy(). We just want to return the 10985 * result of ddi_xbuf_qstrategy so that we have an opt- 10986 * imized tail call which saves us a stack frame. 10987 */ 10988 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10989 } 10990 10991 10992 /* 10993 * Function: sd_xbuf_strategy 10994 * 10995 * Description: Function for initiating IO operations via the 10996 * ddi_xbuf_qstrategy() mechanism. 10997 * 10998 * Context: Kernel thread context. 10999 */ 11000 11001 static void 11002 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11003 { 11004 struct sd_lun *un = arg; 11005 11006 ASSERT(bp != NULL); 11007 ASSERT(xp != NULL); 11008 ASSERT(un != NULL); 11009 ASSERT(!mutex_owned(SD_MUTEX(un))); 11010 11011 /* 11012 * Initialize the fields in the xbuf and save a pointer to the 11013 * xbuf in bp->b_private. 11014 */ 11015 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11016 11017 /* Send the buf down the iostart chain */ 11018 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11019 } 11020 11021 11022 /* 11023 * Function: sd_xbuf_init 11024 * 11025 * Description: Prepare the given sd_xbuf struct for use. 11026 * 11027 * Arguments: un - ptr to softstate 11028 * bp - ptr to associated buf(9S) 11029 * xp - ptr to associated sd_xbuf 11030 * chain_type - IO chain type to use: 11031 * SD_CHAIN_NULL 11032 * SD_CHAIN_BUFIO 11033 * SD_CHAIN_USCSI 11034 * SD_CHAIN_DIRECT 11035 * SD_CHAIN_DIRECT_PRIORITY 11036 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11037 * initialization; may be NULL if none. 11038 * 11039 * Context: Kernel thread context 11040 */ 11041 11042 static void 11043 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11044 uchar_t chain_type, void *pktinfop) 11045 { 11046 int index; 11047 11048 ASSERT(un != NULL); 11049 ASSERT(bp != NULL); 11050 ASSERT(xp != NULL); 11051 11052 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11053 bp, chain_type); 11054 11055 xp->xb_un = un; 11056 xp->xb_pktp = NULL; 11057 xp->xb_pktinfo = pktinfop; 11058 xp->xb_private = bp->b_private; 11059 xp->xb_blkno = (daddr_t)bp->b_blkno; 11060 11061 /* 11062 * Set up the iostart and iodone chain indexes in the xbuf, based 11063 * upon the specified chain type to use. 11064 */ 11065 switch (chain_type) { 11066 case SD_CHAIN_NULL: 11067 /* 11068 * Fall thru to just use the values for the buf type, even 11069 * tho for the NULL chain these values will never be used. 11070 */ 11071 /* FALLTHRU */ 11072 case SD_CHAIN_BUFIO: 11073 index = un->un_buf_chain_type; 11074 break; 11075 case SD_CHAIN_USCSI: 11076 index = un->un_uscsi_chain_type; 11077 break; 11078 case SD_CHAIN_DIRECT: 11079 index = un->un_direct_chain_type; 11080 break; 11081 case SD_CHAIN_DIRECT_PRIORITY: 11082 index = un->un_priority_chain_type; 11083 break; 11084 default: 11085 /* We're really broken if we ever get here... */ 11086 panic("sd_xbuf_init: illegal chain type!"); 11087 /*NOTREACHED*/ 11088 } 11089 11090 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11091 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11092 11093 /* 11094 * It might be a bit easier to simply bzero the entire xbuf above, 11095 * but it turns out that since we init a fair number of members anyway, 11096 * we save a fair number cycles by doing explicit assignment of zero. 11097 */ 11098 xp->xb_pkt_flags = 0; 11099 xp->xb_dma_resid = 0; 11100 xp->xb_retry_count = 0; 11101 xp->xb_victim_retry_count = 0; 11102 xp->xb_ua_retry_count = 0; 11103 xp->xb_nr_retry_count = 0; 11104 xp->xb_sense_bp = NULL; 11105 xp->xb_sense_status = 0; 11106 xp->xb_sense_state = 0; 11107 xp->xb_sense_resid = 0; 11108 xp->xb_ena = 0; 11109 11110 bp->b_private = xp; 11111 bp->b_flags &= ~(B_DONE | B_ERROR); 11112 bp->b_resid = 0; 11113 bp->av_forw = NULL; 11114 bp->av_back = NULL; 11115 bioerror(bp, 0); 11116 11117 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11118 } 11119 11120 11121 /* 11122 * Function: sd_uscsi_strategy 11123 * 11124 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11125 * 11126 * Arguments: bp - buf struct ptr 11127 * 11128 * Return Code: Always returns 0 11129 * 11130 * Context: Kernel thread context 11131 */ 11132 11133 static int 11134 sd_uscsi_strategy(struct buf *bp) 11135 { 11136 struct sd_lun *un; 11137 struct sd_uscsi_info *uip; 11138 struct sd_xbuf *xp; 11139 uchar_t chain_type; 11140 uchar_t cmd; 11141 11142 ASSERT(bp != NULL); 11143 11144 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11145 if (un == NULL) { 11146 bioerror(bp, EIO); 11147 bp->b_resid = bp->b_bcount; 11148 biodone(bp); 11149 return (0); 11150 } 11151 11152 ASSERT(!mutex_owned(SD_MUTEX(un))); 11153 11154 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11155 11156 /* 11157 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11158 */ 11159 ASSERT(bp->b_private != NULL); 11160 uip = (struct sd_uscsi_info *)bp->b_private; 11161 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11162 11163 mutex_enter(SD_MUTEX(un)); 11164 /* 11165 * atapi: Since we are running the CD for now in PIO mode we need to 11166 * call bp_mapin here to avoid bp_mapin called interrupt context under 11167 * the HBA's init_pkt routine. 11168 */ 11169 if (un->un_f_cfg_is_atapi == TRUE) { 11170 mutex_exit(SD_MUTEX(un)); 11171 bp_mapin(bp); 11172 mutex_enter(SD_MUTEX(un)); 11173 } 11174 un->un_ncmds_in_driver++; 11175 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11176 un->un_ncmds_in_driver); 11177 11178 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11179 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11180 un->un_f_sync_cache_required = TRUE; 11181 11182 mutex_exit(SD_MUTEX(un)); 11183 11184 switch (uip->ui_flags) { 11185 case SD_PATH_DIRECT: 11186 chain_type = SD_CHAIN_DIRECT; 11187 break; 11188 case SD_PATH_DIRECT_PRIORITY: 11189 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11190 break; 11191 default: 11192 chain_type = SD_CHAIN_USCSI; 11193 break; 11194 } 11195 11196 /* 11197 * We may allocate extra buf for external USCSI commands. If the 11198 * application asks for bigger than 20-byte sense data via USCSI, 11199 * SCSA layer will allocate 252 bytes sense buf for that command. 11200 */ 11201 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11202 SENSE_LENGTH) { 11203 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11204 MAX_SENSE_LENGTH, KM_SLEEP); 11205 } else { 11206 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11207 } 11208 11209 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11210 11211 /* Use the index obtained within xbuf_init */ 11212 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11213 11214 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11215 11216 return (0); 11217 } 11218 11219 /* 11220 * Function: sd_send_scsi_cmd 11221 * 11222 * Description: Runs a USCSI command for user (when called thru sdioctl), 11223 * or for the driver 11224 * 11225 * Arguments: dev - the dev_t for the device 11226 * incmd - ptr to a valid uscsi_cmd struct 11227 * flag - bit flag, indicating open settings, 32/64 bit type 11228 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11229 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11230 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11231 * to use the USCSI "direct" chain and bypass the normal 11232 * command waitq. 11233 * 11234 * Return Code: 0 - successful completion of the given command 11235 * EIO - scsi_uscsi_handle_command() failed 11236 * ENXIO - soft state not found for specified dev 11237 * EINVAL 11238 * EFAULT - copyin/copyout error 11239 * return code of scsi_uscsi_handle_command(): 11240 * EIO 11241 * ENXIO 11242 * EACCES 11243 * 11244 * Context: Waits for command to complete. Can sleep. 11245 */ 11246 11247 static int 11248 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11249 enum uio_seg dataspace, int path_flag) 11250 { 11251 struct sd_lun *un; 11252 sd_ssc_t *ssc; 11253 int rval; 11254 11255 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11256 if (un == NULL) { 11257 return (ENXIO); 11258 } 11259 11260 /* 11261 * Using sd_ssc_send to handle uscsi cmd 11262 */ 11263 ssc = sd_ssc_init(un); 11264 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11265 sd_ssc_fini(ssc); 11266 11267 return (rval); 11268 } 11269 11270 /* 11271 * Function: sd_ssc_init 11272 * 11273 * Description: Uscsi end-user call this function to initialize necessary 11274 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11275 * 11276 * The return value of sd_send_scsi_cmd will be treated as a 11277 * fault in various conditions. Even it is not Zero, some 11278 * callers may ignore the return value. That is to say, we can 11279 * not make an accurate assessment in sdintr, since if a 11280 * command is failed in sdintr it does not mean the caller of 11281 * sd_send_scsi_cmd will treat it as a real failure. 11282 * 11283 * To avoid printing too many error logs for a failed uscsi 11284 * packet that the caller may not treat it as a failure, the 11285 * sd will keep silent for handling all uscsi commands. 11286 * 11287 * During detach->attach and attach-open, for some types of 11288 * problems, the driver should be providing information about 11289 * the problem encountered. Device use USCSI_SILENT, which 11290 * suppresses all driver information. The result is that no 11291 * information about the problem is available. Being 11292 * completely silent during this time is inappropriate. The 11293 * driver needs a more selective filter than USCSI_SILENT, so 11294 * that information related to faults is provided. 11295 * 11296 * To make the accurate accessment, the caller of 11297 * sd_send_scsi_USCSI_CMD should take the ownership and 11298 * get necessary information to print error messages. 11299 * 11300 * If we want to print necessary info of uscsi command, we need to 11301 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11302 * assessment. We use sd_ssc_init to alloc necessary 11303 * structs for sending an uscsi command and we are also 11304 * responsible for free the memory by calling 11305 * sd_ssc_fini. 11306 * 11307 * The calling secquences will look like: 11308 * sd_ssc_init-> 11309 * 11310 * ... 11311 * 11312 * sd_send_scsi_USCSI_CMD-> 11313 * sd_ssc_send-> - - - sdintr 11314 * ... 11315 * 11316 * if we think the return value should be treated as a 11317 * failure, we make the accessment here and print out 11318 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11319 * 11320 * ... 11321 * 11322 * sd_ssc_fini 11323 * 11324 * 11325 * Arguments: un - pointer to driver soft state (unit) structure for this 11326 * target. 11327 * 11328 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11329 * uscsi_cmd and sd_uscsi_info. 11330 * NULL - if can not alloc memory for sd_ssc_t struct 11331 * 11332 * Context: Kernel Thread. 11333 */ 11334 static sd_ssc_t * 11335 sd_ssc_init(struct sd_lun *un) 11336 { 11337 sd_ssc_t *ssc; 11338 struct uscsi_cmd *ucmdp; 11339 struct sd_uscsi_info *uip; 11340 11341 ASSERT(un != NULL); 11342 ASSERT(!mutex_owned(SD_MUTEX(un))); 11343 11344 /* 11345 * Allocate sd_ssc_t structure 11346 */ 11347 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11348 11349 /* 11350 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11351 */ 11352 ucmdp = scsi_uscsi_alloc(); 11353 11354 /* 11355 * Allocate sd_uscsi_info structure 11356 */ 11357 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11358 11359 ssc->ssc_uscsi_cmd = ucmdp; 11360 ssc->ssc_uscsi_info = uip; 11361 ssc->ssc_un = un; 11362 11363 return (ssc); 11364 } 11365 11366 /* 11367 * Function: sd_ssc_fini 11368 * 11369 * Description: To free sd_ssc_t and it's hanging off 11370 * 11371 * Arguments: ssc - struct pointer of sd_ssc_t. 11372 */ 11373 static void 11374 sd_ssc_fini(sd_ssc_t *ssc) 11375 { 11376 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11377 11378 if (ssc->ssc_uscsi_info != NULL) { 11379 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11380 ssc->ssc_uscsi_info = NULL; 11381 } 11382 11383 kmem_free(ssc, sizeof (sd_ssc_t)); 11384 ssc = NULL; 11385 } 11386 11387 /* 11388 * Function: sd_ssc_send 11389 * 11390 * Description: Runs a USCSI command for user when called through sdioctl, 11391 * or for the driver. 11392 * 11393 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11394 * sd_uscsi_info in. 11395 * incmd - ptr to a valid uscsi_cmd struct 11396 * flag - bit flag, indicating open settings, 32/64 bit type 11397 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11398 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11399 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11400 * to use the USCSI "direct" chain and bypass the normal 11401 * command waitq. 11402 * 11403 * Return Code: 0 - successful completion of the given command 11404 * EIO - scsi_uscsi_handle_command() failed 11405 * ENXIO - soft state not found for specified dev 11406 * EINVAL 11407 * EFAULT - copyin/copyout error 11408 * return code of scsi_uscsi_handle_command(): 11409 * EIO 11410 * ENXIO 11411 * EACCES 11412 * 11413 * Context: Kernel Thread; 11414 * Waits for command to complete. Can sleep. 11415 */ 11416 static int 11417 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11418 enum uio_seg dataspace, int path_flag) 11419 { 11420 struct sd_uscsi_info *uip; 11421 struct uscsi_cmd *uscmd = ssc->ssc_uscsi_cmd; 11422 struct sd_lun *un; 11423 dev_t dev; 11424 11425 int format = 0; 11426 int rval; 11427 11428 11429 ASSERT(ssc != NULL); 11430 un = ssc->ssc_un; 11431 ASSERT(un != NULL); 11432 ASSERT(!mutex_owned(SD_MUTEX(un))); 11433 ASSERT(!(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT)); 11434 /* 11435 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11436 * followed to avoid missing any point of telemetry. 11437 */ 11438 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11439 11440 if (uscmd == NULL) { 11441 return (ENXIO); 11442 } 11443 11444 11445 #ifdef SDDEBUG 11446 switch (dataspace) { 11447 case UIO_USERSPACE: 11448 SD_TRACE(SD_LOG_IO, un, 11449 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11450 break; 11451 case UIO_SYSSPACE: 11452 SD_TRACE(SD_LOG_IO, un, 11453 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11454 break; 11455 default: 11456 SD_TRACE(SD_LOG_IO, un, 11457 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11458 break; 11459 } 11460 #endif 11461 11462 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11463 SD_ADDRESS(un), &uscmd); 11464 if (rval != 0) { 11465 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11466 "scsi_uscsi_alloc_and_copyin failed\n", un); 11467 return (rval); 11468 } 11469 11470 if ((uscmd->uscsi_cdb != NULL) && 11471 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11472 mutex_enter(SD_MUTEX(un)); 11473 un->un_f_format_in_progress = TRUE; 11474 mutex_exit(SD_MUTEX(un)); 11475 format = 1; 11476 } 11477 11478 /* 11479 * Allocate an sd_uscsi_info struct and fill it with the info 11480 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11481 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11482 * since we allocate the buf here in this function, we do not 11483 * need to preserve the prior contents of b_private. 11484 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11485 */ 11486 uip = ssc->ssc_uscsi_info; 11487 uip->ui_flags = path_flag; 11488 uip->ui_cmdp = uscmd; 11489 11490 /* 11491 * Commands sent with priority are intended for error recovery 11492 * situations, and do not have retries performed. 11493 */ 11494 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11495 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11496 } 11497 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11498 11499 dev = SD_GET_DEV(un); 11500 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11501 sd_uscsi_strategy, NULL, uip); 11502 11503 /* 11504 * mark ssc_flags right after handle_cmd to make sure 11505 * the uscsi has been sent 11506 */ 11507 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11508 11509 #ifdef SDDEBUG 11510 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11511 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11512 uscmd->uscsi_status, uscmd->uscsi_resid); 11513 if (uscmd->uscsi_bufaddr != NULL) { 11514 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11515 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11516 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11517 if (dataspace == UIO_SYSSPACE) { 11518 SD_DUMP_MEMORY(un, SD_LOG_IO, 11519 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11520 uscmd->uscsi_buflen, SD_LOG_HEX); 11521 } 11522 } 11523 #endif 11524 11525 if (format == 1) { 11526 mutex_enter(SD_MUTEX(un)); 11527 un->un_f_format_in_progress = FALSE; 11528 mutex_exit(SD_MUTEX(un)); 11529 } 11530 11531 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11532 11533 return (rval); 11534 } 11535 11536 /* 11537 * Function: sd_ssc_print 11538 * 11539 * Description: Print information available to the console. 11540 * 11541 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11542 * sd_uscsi_info in. 11543 * sd_severity - log level. 11544 * Context: Kernel thread or interrupt context. 11545 */ 11546 static void 11547 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11548 { 11549 struct uscsi_cmd *ucmdp; 11550 struct scsi_device *devp; 11551 dev_info_t *devinfo; 11552 uchar_t *sensep; 11553 int senlen; 11554 union scsi_cdb *cdbp; 11555 uchar_t com; 11556 extern struct scsi_key_strings scsi_cmds[]; 11557 11558 ASSERT(ssc != NULL); 11559 11560 ucmdp = ssc->ssc_uscsi_cmd; 11561 devp = SD_SCSI_DEVP(ssc->ssc_un); 11562 devinfo = SD_DEVINFO(ssc->ssc_un); 11563 ASSERT(ucmdp != NULL); 11564 ASSERT(devp != NULL); 11565 ASSERT(devinfo != NULL); 11566 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11567 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11568 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11569 11570 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11571 if (cdbp == NULL) 11572 return; 11573 /* We don't print log if no sense data available. */ 11574 if (senlen == 0) 11575 sensep = NULL; 11576 com = cdbp->scc_cmd; 11577 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11578 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11579 } 11580 11581 /* 11582 * Function: sd_ssc_assessment 11583 * 11584 * Description: We use this function to make an assessment at the point 11585 * where SD driver may encounter a potential error. 11586 * 11587 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11588 * sd_uscsi_info in. 11589 * tp_assess - a hint of strategy for ereport posting. 11590 * Possible values of tp_assess include: 11591 * SD_FMT_IGNORE - we don't post any ereport because we're 11592 * sure that it is ok to ignore the underlying problems. 11593 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11594 * but it might be not correct to ignore the underlying hardware 11595 * error. 11596 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11597 * payload driver-assessment of value "fail" or 11598 * "fatal"(depending on what information we have here). This 11599 * assessment value is usually set when SD driver think there 11600 * is a potential error occurred(Typically, when return value 11601 * of the SCSI command is EIO). 11602 * SD_FMT_STANDARD - we will post an ereport with the payload 11603 * driver-assessment of value "info". This assessment value is 11604 * set when the SCSI command returned successfully and with 11605 * sense data sent back. 11606 * 11607 * Context: Kernel thread. 11608 */ 11609 static void 11610 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11611 { 11612 int senlen = 0; 11613 struct uscsi_cmd *ucmdp = NULL; 11614 struct sd_lun *un; 11615 11616 ASSERT(ssc != NULL); 11617 ASSERT(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT); 11618 11619 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11620 un = ssc->ssc_un; 11621 ASSERT(un != NULL); 11622 11623 /* 11624 * We don't handle CD-ROM, and removable media 11625 */ 11626 if (ISCD(un) || un->un_f_has_removable_media) { 11627 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11628 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11629 return; 11630 } 11631 11632 /* 11633 * Only handle an issued command which is waiting for assessment. 11634 */ 11635 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11636 sd_ssc_print(ssc, SCSI_ERR_INFO); 11637 return; 11638 } else 11639 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11640 11641 ucmdp = ssc->ssc_uscsi_cmd; 11642 ASSERT(ucmdp != NULL); 11643 11644 /* 11645 * We will not deal with non-retryable commands here. 11646 */ 11647 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11648 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11649 return; 11650 } 11651 11652 switch (tp_assess) { 11653 case SD_FMT_IGNORE: 11654 case SD_FMT_IGNORE_COMPROMISE: 11655 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11656 break; 11657 case SD_FMT_STATUS_CHECK: 11658 /* 11659 * For a failed command(including the succeeded command 11660 * with invalid data sent back). 11661 */ 11662 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11663 break; 11664 case SD_FMT_STANDARD: 11665 /* 11666 * Always for the succeeded commands probably with sense 11667 * data sent back. 11668 * Limitation: 11669 * We can only handle a succeeded command with sense 11670 * data sent back when auto-request-sense is enabled. 11671 */ 11672 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11673 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11674 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11675 (un->un_f_arq_enabled == TRUE) && 11676 senlen > 0 && 11677 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11678 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11679 } 11680 break; 11681 default: 11682 /* 11683 * Should be an software error. 11684 */ 11685 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11686 "sd_ssc_assessment got wrong \ 11687 sd_type_assessment %d\n", tp_assess); 11688 break; 11689 } 11690 } 11691 11692 /* 11693 * Function: sd_ssc_post 11694 * 11695 * Description: 1. read the driver property to get fm-scsi-log flag. 11696 * 2. print log if fm_log_capable is non-zero. 11697 * 3. call sd_ssc_ereport_post to post ereport if possible. 11698 * 11699 * Context: May be called from kernel thread or interrupt context. 11700 */ 11701 static void 11702 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11703 { 11704 struct sd_lun *un; 11705 int fm_scsi_log = 0; 11706 int sd_severity; 11707 11708 ASSERT(ssc != NULL); 11709 un = ssc->ssc_un; 11710 ASSERT(un != NULL); 11711 11712 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 11713 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 11714 11715 if (fm_scsi_log != 0) { 11716 switch (sd_assess) { 11717 case SD_FM_DRV_FATAL: 11718 sd_severity = SCSI_ERR_FATAL; 11719 break; 11720 case SD_FM_DRV_RECOVERY: 11721 sd_severity = SCSI_ERR_RECOVERED; 11722 break; 11723 case SD_FM_DRV_RETRY: 11724 sd_severity = SCSI_ERR_RETRYABLE; 11725 break; 11726 case SD_FM_DRV_NOTICE: 11727 sd_severity = SCSI_ERR_INFO; 11728 break; 11729 default: 11730 sd_severity = SCSI_ERR_UNKNOWN; 11731 } 11732 /* print log */ 11733 sd_ssc_print(ssc, sd_severity); 11734 } 11735 11736 /* always post ereport */ 11737 sd_ssc_ereport_post(ssc, sd_assess); 11738 } 11739 11740 /* 11741 * Function: sd_ssc_set_info 11742 * 11743 * Description: Mark ssc_flags and set ssc_info which would be the 11744 * payload of uderr ereport. This function will cause 11745 * sd_ssc_ereport_post to post uderr ereport only. 11746 * 11747 * Context: Kernel thread or interrupt context 11748 */ 11749 static void 11750 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, const char *fmt, ...) 11751 { 11752 va_list ap; 11753 11754 ASSERT(ssc != NULL); 11755 11756 ssc->ssc_flags |= ssc_flags; 11757 va_start(ap, fmt); 11758 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11759 va_end(ap); 11760 } 11761 11762 /* 11763 * Function: sd_buf_iodone 11764 * 11765 * Description: Frees the sd_xbuf & returns the buf to its originator. 11766 * 11767 * Context: May be called from interrupt context. 11768 */ 11769 /* ARGSUSED */ 11770 static void 11771 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11772 { 11773 struct sd_xbuf *xp; 11774 11775 ASSERT(un != NULL); 11776 ASSERT(bp != NULL); 11777 ASSERT(!mutex_owned(SD_MUTEX(un))); 11778 11779 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11780 11781 xp = SD_GET_XBUF(bp); 11782 ASSERT(xp != NULL); 11783 11784 mutex_enter(SD_MUTEX(un)); 11785 11786 /* 11787 * Grab time when the cmd completed. 11788 * This is used for determining if the system has been 11789 * idle long enough to make it idle to the PM framework. 11790 * This is for lowering the overhead, and therefore improving 11791 * performance per I/O operation. 11792 */ 11793 un->un_pm_idle_time = ddi_get_time(); 11794 11795 un->un_ncmds_in_driver--; 11796 ASSERT(un->un_ncmds_in_driver >= 0); 11797 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11798 un->un_ncmds_in_driver); 11799 11800 mutex_exit(SD_MUTEX(un)); 11801 11802 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11803 biodone(bp); /* bp is gone after this */ 11804 11805 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11806 } 11807 11808 11809 /* 11810 * Function: sd_uscsi_iodone 11811 * 11812 * Description: Frees the sd_xbuf & returns the buf to its originator. 11813 * 11814 * Context: May be called from interrupt context. 11815 */ 11816 /* ARGSUSED */ 11817 static void 11818 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11819 { 11820 struct sd_xbuf *xp; 11821 11822 ASSERT(un != NULL); 11823 ASSERT(bp != NULL); 11824 11825 xp = SD_GET_XBUF(bp); 11826 ASSERT(xp != NULL); 11827 ASSERT(!mutex_owned(SD_MUTEX(un))); 11828 11829 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11830 11831 bp->b_private = xp->xb_private; 11832 11833 mutex_enter(SD_MUTEX(un)); 11834 11835 /* 11836 * Grab time when the cmd completed. 11837 * This is used for determining if the system has been 11838 * idle long enough to make it idle to the PM framework. 11839 * This is for lowering the overhead, and therefore improving 11840 * performance per I/O operation. 11841 */ 11842 un->un_pm_idle_time = ddi_get_time(); 11843 11844 un->un_ncmds_in_driver--; 11845 ASSERT(un->un_ncmds_in_driver >= 0); 11846 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11847 un->un_ncmds_in_driver); 11848 11849 mutex_exit(SD_MUTEX(un)); 11850 11851 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11852 SENSE_LENGTH) { 11853 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11854 MAX_SENSE_LENGTH); 11855 } else { 11856 kmem_free(xp, sizeof (struct sd_xbuf)); 11857 } 11858 11859 biodone(bp); 11860 11861 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11862 } 11863 11864 11865 /* 11866 * Function: sd_mapblockaddr_iostart 11867 * 11868 * Description: Verify request lies within the partition limits for 11869 * the indicated minor device. Issue "overrun" buf if 11870 * request would exceed partition range. Converts 11871 * partition-relative block address to absolute. 11872 * 11873 * Context: Can sleep 11874 * 11875 * Issues: This follows what the old code did, in terms of accessing 11876 * some of the partition info in the unit struct without holding 11877 * the mutext. This is a general issue, if the partition info 11878 * can be altered while IO is in progress... as soon as we send 11879 * a buf, its partitioning can be invalid before it gets to the 11880 * device. Probably the right fix is to move partitioning out 11881 * of the driver entirely. 11882 */ 11883 11884 static void 11885 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11886 { 11887 diskaddr_t nblocks; /* #blocks in the given partition */ 11888 daddr_t blocknum; /* Block number specified by the buf */ 11889 size_t requested_nblocks; 11890 size_t available_nblocks; 11891 int partition; 11892 diskaddr_t partition_offset; 11893 struct sd_xbuf *xp; 11894 11895 ASSERT(un != NULL); 11896 ASSERT(bp != NULL); 11897 ASSERT(!mutex_owned(SD_MUTEX(un))); 11898 11899 SD_TRACE(SD_LOG_IO_PARTITION, un, 11900 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11901 11902 xp = SD_GET_XBUF(bp); 11903 ASSERT(xp != NULL); 11904 11905 /* 11906 * If the geometry is not indicated as valid, attempt to access 11907 * the unit & verify the geometry/label. This can be the case for 11908 * removable-media devices, of if the device was opened in 11909 * NDELAY/NONBLOCK mode. 11910 */ 11911 partition = SDPART(bp->b_edev); 11912 11913 if (!SD_IS_VALID_LABEL(un)) { 11914 sd_ssc_t *ssc; 11915 /* 11916 * Initialize sd_ssc_t for internal uscsi commands 11917 * In case of potential porformance issue, we need 11918 * to alloc memory only if there is invalid label 11919 */ 11920 ssc = sd_ssc_init(un); 11921 11922 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 11923 /* 11924 * For removable devices it is possible to start an 11925 * I/O without a media by opening the device in nodelay 11926 * mode. Also for writable CDs there can be many 11927 * scenarios where there is no geometry yet but volume 11928 * manager is trying to issue a read() just because 11929 * it can see TOC on the CD. So do not print a message 11930 * for removables. 11931 */ 11932 if (!un->un_f_has_removable_media) { 11933 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11934 "i/o to invalid geometry\n"); 11935 } 11936 bioerror(bp, EIO); 11937 bp->b_resid = bp->b_bcount; 11938 SD_BEGIN_IODONE(index, un, bp); 11939 11940 sd_ssc_fini(ssc); 11941 return; 11942 } 11943 sd_ssc_fini(ssc); 11944 } 11945 11946 nblocks = 0; 11947 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11948 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 11949 11950 /* 11951 * blocknum is the starting block number of the request. At this 11952 * point it is still relative to the start of the minor device. 11953 */ 11954 blocknum = xp->xb_blkno; 11955 11956 /* 11957 * Legacy: If the starting block number is one past the last block 11958 * in the partition, do not set B_ERROR in the buf. 11959 */ 11960 if (blocknum == nblocks) { 11961 goto error_exit; 11962 } 11963 11964 /* 11965 * Confirm that the first block of the request lies within the 11966 * partition limits. Also the requested number of bytes must be 11967 * a multiple of the system block size. 11968 */ 11969 if ((blocknum < 0) || (blocknum >= nblocks) || 11970 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11971 bp->b_flags |= B_ERROR; 11972 goto error_exit; 11973 } 11974 11975 /* 11976 * If the requsted # blocks exceeds the available # blocks, that 11977 * is an overrun of the partition. 11978 */ 11979 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11980 available_nblocks = (size_t)(nblocks - blocknum); 11981 ASSERT(nblocks >= blocknum); 11982 11983 if (requested_nblocks > available_nblocks) { 11984 /* 11985 * Allocate an "overrun" buf to allow the request to proceed 11986 * for the amount of space available in the partition. The 11987 * amount not transferred will be added into the b_resid 11988 * when the operation is complete. The overrun buf 11989 * replaces the original buf here, and the original buf 11990 * is saved inside the overrun buf, for later use. 11991 */ 11992 size_t resid = SD_SYSBLOCKS2BYTES(un, 11993 (offset_t)(requested_nblocks - available_nblocks)); 11994 size_t count = bp->b_bcount - resid; 11995 /* 11996 * Note: count is an unsigned entity thus it'll NEVER 11997 * be less than 0 so ASSERT the original values are 11998 * correct. 11999 */ 12000 ASSERT(bp->b_bcount >= resid); 12001 12002 bp = sd_bioclone_alloc(bp, count, blocknum, 12003 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12004 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12005 ASSERT(xp != NULL); 12006 } 12007 12008 /* At this point there should be no residual for this buf. */ 12009 ASSERT(bp->b_resid == 0); 12010 12011 /* Convert the block number to an absolute address. */ 12012 xp->xb_blkno += partition_offset; 12013 12014 SD_NEXT_IOSTART(index, un, bp); 12015 12016 SD_TRACE(SD_LOG_IO_PARTITION, un, 12017 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12018 12019 return; 12020 12021 error_exit: 12022 bp->b_resid = bp->b_bcount; 12023 SD_BEGIN_IODONE(index, un, bp); 12024 SD_TRACE(SD_LOG_IO_PARTITION, un, 12025 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12026 } 12027 12028 12029 /* 12030 * Function: sd_mapblockaddr_iodone 12031 * 12032 * Description: Completion-side processing for partition management. 12033 * 12034 * Context: May be called under interrupt context 12035 */ 12036 12037 static void 12038 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12039 { 12040 /* int partition; */ /* Not used, see below. */ 12041 ASSERT(un != NULL); 12042 ASSERT(bp != NULL); 12043 ASSERT(!mutex_owned(SD_MUTEX(un))); 12044 12045 SD_TRACE(SD_LOG_IO_PARTITION, un, 12046 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12047 12048 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12049 /* 12050 * We have an "overrun" buf to deal with... 12051 */ 12052 struct sd_xbuf *xp; 12053 struct buf *obp; /* ptr to the original buf */ 12054 12055 xp = SD_GET_XBUF(bp); 12056 ASSERT(xp != NULL); 12057 12058 /* Retrieve the pointer to the original buf */ 12059 obp = (struct buf *)xp->xb_private; 12060 ASSERT(obp != NULL); 12061 12062 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12063 bioerror(obp, bp->b_error); 12064 12065 sd_bioclone_free(bp); 12066 12067 /* 12068 * Get back the original buf. 12069 * Note that since the restoration of xb_blkno below 12070 * was removed, the sd_xbuf is not needed. 12071 */ 12072 bp = obp; 12073 /* 12074 * xp = SD_GET_XBUF(bp); 12075 * ASSERT(xp != NULL); 12076 */ 12077 } 12078 12079 /* 12080 * Convert sd->xb_blkno back to a minor-device relative value. 12081 * Note: this has been commented out, as it is not needed in the 12082 * current implementation of the driver (ie, since this function 12083 * is at the top of the layering chains, so the info will be 12084 * discarded) and it is in the "hot" IO path. 12085 * 12086 * partition = getminor(bp->b_edev) & SDPART_MASK; 12087 * xp->xb_blkno -= un->un_offset[partition]; 12088 */ 12089 12090 SD_NEXT_IODONE(index, un, bp); 12091 12092 SD_TRACE(SD_LOG_IO_PARTITION, un, 12093 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12094 } 12095 12096 12097 /* 12098 * Function: sd_mapblocksize_iostart 12099 * 12100 * Description: Convert between system block size (un->un_sys_blocksize) 12101 * and target block size (un->un_tgt_blocksize). 12102 * 12103 * Context: Can sleep to allocate resources. 12104 * 12105 * Assumptions: A higher layer has already performed any partition validation, 12106 * and converted the xp->xb_blkno to an absolute value relative 12107 * to the start of the device. 12108 * 12109 * It is also assumed that the higher layer has implemented 12110 * an "overrun" mechanism for the case where the request would 12111 * read/write beyond the end of a partition. In this case we 12112 * assume (and ASSERT) that bp->b_resid == 0. 12113 * 12114 * Note: The implementation for this routine assumes the target 12115 * block size remains constant between allocation and transport. 12116 */ 12117 12118 static void 12119 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12120 { 12121 struct sd_mapblocksize_info *bsp; 12122 struct sd_xbuf *xp; 12123 offset_t first_byte; 12124 daddr_t start_block, end_block; 12125 daddr_t request_bytes; 12126 ushort_t is_aligned = FALSE; 12127 12128 ASSERT(un != NULL); 12129 ASSERT(bp != NULL); 12130 ASSERT(!mutex_owned(SD_MUTEX(un))); 12131 ASSERT(bp->b_resid == 0); 12132 12133 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12134 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12135 12136 /* 12137 * For a non-writable CD, a write request is an error 12138 */ 12139 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12140 (un->un_f_mmc_writable_media == FALSE)) { 12141 bioerror(bp, EIO); 12142 bp->b_resid = bp->b_bcount; 12143 SD_BEGIN_IODONE(index, un, bp); 12144 return; 12145 } 12146 12147 /* 12148 * We do not need a shadow buf if the device is using 12149 * un->un_sys_blocksize as its block size or if bcount == 0. 12150 * In this case there is no layer-private data block allocated. 12151 */ 12152 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12153 (bp->b_bcount == 0)) { 12154 goto done; 12155 } 12156 12157 #if defined(__i386) || defined(__amd64) 12158 /* We do not support non-block-aligned transfers for ROD devices */ 12159 ASSERT(!ISROD(un)); 12160 #endif 12161 12162 xp = SD_GET_XBUF(bp); 12163 ASSERT(xp != NULL); 12164 12165 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12166 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12167 un->un_tgt_blocksize, un->un_sys_blocksize); 12168 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12169 "request start block:0x%x\n", xp->xb_blkno); 12170 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12171 "request len:0x%x\n", bp->b_bcount); 12172 12173 /* 12174 * Allocate the layer-private data area for the mapblocksize layer. 12175 * Layers are allowed to use the xp_private member of the sd_xbuf 12176 * struct to store the pointer to their layer-private data block, but 12177 * each layer also has the responsibility of restoring the prior 12178 * contents of xb_private before returning the buf/xbuf to the 12179 * higher layer that sent it. 12180 * 12181 * Here we save the prior contents of xp->xb_private into the 12182 * bsp->mbs_oprivate field of our layer-private data area. This value 12183 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12184 * the layer-private area and returning the buf/xbuf to the layer 12185 * that sent it. 12186 * 12187 * Note that here we use kmem_zalloc for the allocation as there are 12188 * parts of the mapblocksize code that expect certain fields to be 12189 * zero unless explicitly set to a required value. 12190 */ 12191 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12192 bsp->mbs_oprivate = xp->xb_private; 12193 xp->xb_private = bsp; 12194 12195 /* 12196 * This treats the data on the disk (target) as an array of bytes. 12197 * first_byte is the byte offset, from the beginning of the device, 12198 * to the location of the request. This is converted from a 12199 * un->un_sys_blocksize block address to a byte offset, and then back 12200 * to a block address based upon a un->un_tgt_blocksize block size. 12201 * 12202 * xp->xb_blkno should be absolute upon entry into this function, 12203 * but, but it is based upon partitions that use the "system" 12204 * block size. It must be adjusted to reflect the block size of 12205 * the target. 12206 * 12207 * Note that end_block is actually the block that follows the last 12208 * block of the request, but that's what is needed for the computation. 12209 */ 12210 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12211 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12212 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12213 un->un_tgt_blocksize; 12214 12215 /* request_bytes is rounded up to a multiple of the target block size */ 12216 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12217 12218 /* 12219 * See if the starting address of the request and the request 12220 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12221 * then we do not need to allocate a shadow buf to handle the request. 12222 */ 12223 if (((first_byte % un->un_tgt_blocksize) == 0) && 12224 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12225 is_aligned = TRUE; 12226 } 12227 12228 if ((bp->b_flags & B_READ) == 0) { 12229 /* 12230 * Lock the range for a write operation. An aligned request is 12231 * considered a simple write; otherwise the request must be a 12232 * read-modify-write. 12233 */ 12234 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12235 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12236 } 12237 12238 /* 12239 * Alloc a shadow buf if the request is not aligned. Also, this is 12240 * where the READ command is generated for a read-modify-write. (The 12241 * write phase is deferred until after the read completes.) 12242 */ 12243 if (is_aligned == FALSE) { 12244 12245 struct sd_mapblocksize_info *shadow_bsp; 12246 struct sd_xbuf *shadow_xp; 12247 struct buf *shadow_bp; 12248 12249 /* 12250 * Allocate the shadow buf and it associated xbuf. Note that 12251 * after this call the xb_blkno value in both the original 12252 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12253 * same: absolute relative to the start of the device, and 12254 * adjusted for the target block size. The b_blkno in the 12255 * shadow buf will also be set to this value. We should never 12256 * change b_blkno in the original bp however. 12257 * 12258 * Note also that the shadow buf will always need to be a 12259 * READ command, regardless of whether the incoming command 12260 * is a READ or a WRITE. 12261 */ 12262 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12263 xp->xb_blkno, 12264 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12265 12266 shadow_xp = SD_GET_XBUF(shadow_bp); 12267 12268 /* 12269 * Allocate the layer-private data for the shadow buf. 12270 * (No need to preserve xb_private in the shadow xbuf.) 12271 */ 12272 shadow_xp->xb_private = shadow_bsp = 12273 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12274 12275 /* 12276 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12277 * to figure out where the start of the user data is (based upon 12278 * the system block size) in the data returned by the READ 12279 * command (which will be based upon the target blocksize). Note 12280 * that this is only really used if the request is unaligned. 12281 */ 12282 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12283 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12284 ASSERT((bsp->mbs_copy_offset >= 0) && 12285 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12286 12287 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12288 12289 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12290 12291 /* Transfer the wmap (if any) to the shadow buf */ 12292 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12293 bsp->mbs_wmp = NULL; 12294 12295 /* 12296 * The shadow buf goes on from here in place of the 12297 * original buf. 12298 */ 12299 shadow_bsp->mbs_orig_bp = bp; 12300 bp = shadow_bp; 12301 } 12302 12303 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12304 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12305 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12306 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12307 request_bytes); 12308 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12309 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12310 12311 done: 12312 SD_NEXT_IOSTART(index, un, bp); 12313 12314 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12315 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12316 } 12317 12318 12319 /* 12320 * Function: sd_mapblocksize_iodone 12321 * 12322 * Description: Completion side processing for block-size mapping. 12323 * 12324 * Context: May be called under interrupt context 12325 */ 12326 12327 static void 12328 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12329 { 12330 struct sd_mapblocksize_info *bsp; 12331 struct sd_xbuf *xp; 12332 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12333 struct buf *orig_bp; /* ptr to the original buf */ 12334 offset_t shadow_end; 12335 offset_t request_end; 12336 offset_t shadow_start; 12337 ssize_t copy_offset; 12338 size_t copy_length; 12339 size_t shortfall; 12340 uint_t is_write; /* TRUE if this bp is a WRITE */ 12341 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12342 12343 ASSERT(un != NULL); 12344 ASSERT(bp != NULL); 12345 12346 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12347 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12348 12349 /* 12350 * There is no shadow buf or layer-private data if the target is 12351 * using un->un_sys_blocksize as its block size or if bcount == 0. 12352 */ 12353 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12354 (bp->b_bcount == 0)) { 12355 goto exit; 12356 } 12357 12358 xp = SD_GET_XBUF(bp); 12359 ASSERT(xp != NULL); 12360 12361 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12362 bsp = xp->xb_private; 12363 12364 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12365 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12366 12367 if (is_write) { 12368 /* 12369 * For a WRITE request we must free up the block range that 12370 * we have locked up. This holds regardless of whether this is 12371 * an aligned write request or a read-modify-write request. 12372 */ 12373 sd_range_unlock(un, bsp->mbs_wmp); 12374 bsp->mbs_wmp = NULL; 12375 } 12376 12377 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12378 /* 12379 * An aligned read or write command will have no shadow buf; 12380 * there is not much else to do with it. 12381 */ 12382 goto done; 12383 } 12384 12385 orig_bp = bsp->mbs_orig_bp; 12386 ASSERT(orig_bp != NULL); 12387 orig_xp = SD_GET_XBUF(orig_bp); 12388 ASSERT(orig_xp != NULL); 12389 ASSERT(!mutex_owned(SD_MUTEX(un))); 12390 12391 if (!is_write && has_wmap) { 12392 /* 12393 * A READ with a wmap means this is the READ phase of a 12394 * read-modify-write. If an error occurred on the READ then 12395 * we do not proceed with the WRITE phase or copy any data. 12396 * Just release the write maps and return with an error. 12397 */ 12398 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12399 orig_bp->b_resid = orig_bp->b_bcount; 12400 bioerror(orig_bp, bp->b_error); 12401 sd_range_unlock(un, bsp->mbs_wmp); 12402 goto freebuf_done; 12403 } 12404 } 12405 12406 /* 12407 * Here is where we set up to copy the data from the shadow buf 12408 * into the space associated with the original buf. 12409 * 12410 * To deal with the conversion between block sizes, these 12411 * computations treat the data as an array of bytes, with the 12412 * first byte (byte 0) corresponding to the first byte in the 12413 * first block on the disk. 12414 */ 12415 12416 /* 12417 * shadow_start and shadow_len indicate the location and size of 12418 * the data returned with the shadow IO request. 12419 */ 12420 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12421 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12422 12423 /* 12424 * copy_offset gives the offset (in bytes) from the start of the first 12425 * block of the READ request to the beginning of the data. We retrieve 12426 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12427 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12428 * data to be copied (in bytes). 12429 */ 12430 copy_offset = bsp->mbs_copy_offset; 12431 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12432 copy_length = orig_bp->b_bcount; 12433 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12434 12435 /* 12436 * Set up the resid and error fields of orig_bp as appropriate. 12437 */ 12438 if (shadow_end >= request_end) { 12439 /* We got all the requested data; set resid to zero */ 12440 orig_bp->b_resid = 0; 12441 } else { 12442 /* 12443 * We failed to get enough data to fully satisfy the original 12444 * request. Just copy back whatever data we got and set 12445 * up the residual and error code as required. 12446 * 12447 * 'shortfall' is the amount by which the data received with the 12448 * shadow buf has "fallen short" of the requested amount. 12449 */ 12450 shortfall = (size_t)(request_end - shadow_end); 12451 12452 if (shortfall > orig_bp->b_bcount) { 12453 /* 12454 * We did not get enough data to even partially 12455 * fulfill the original request. The residual is 12456 * equal to the amount requested. 12457 */ 12458 orig_bp->b_resid = orig_bp->b_bcount; 12459 } else { 12460 /* 12461 * We did not get all the data that we requested 12462 * from the device, but we will try to return what 12463 * portion we did get. 12464 */ 12465 orig_bp->b_resid = shortfall; 12466 } 12467 ASSERT(copy_length >= orig_bp->b_resid); 12468 copy_length -= orig_bp->b_resid; 12469 } 12470 12471 /* Propagate the error code from the shadow buf to the original buf */ 12472 bioerror(orig_bp, bp->b_error); 12473 12474 if (is_write) { 12475 goto freebuf_done; /* No data copying for a WRITE */ 12476 } 12477 12478 if (has_wmap) { 12479 /* 12480 * This is a READ command from the READ phase of a 12481 * read-modify-write request. We have to copy the data given 12482 * by the user OVER the data returned by the READ command, 12483 * then convert the command from a READ to a WRITE and send 12484 * it back to the target. 12485 */ 12486 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12487 copy_length); 12488 12489 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12490 12491 /* 12492 * Dispatch the WRITE command to the taskq thread, which 12493 * will in turn send the command to the target. When the 12494 * WRITE command completes, we (sd_mapblocksize_iodone()) 12495 * will get called again as part of the iodone chain 12496 * processing for it. Note that we will still be dealing 12497 * with the shadow buf at that point. 12498 */ 12499 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12500 KM_NOSLEEP) != 0) { 12501 /* 12502 * Dispatch was successful so we are done. Return 12503 * without going any higher up the iodone chain. Do 12504 * not free up any layer-private data until after the 12505 * WRITE completes. 12506 */ 12507 return; 12508 } 12509 12510 /* 12511 * Dispatch of the WRITE command failed; set up the error 12512 * condition and send this IO back up the iodone chain. 12513 */ 12514 bioerror(orig_bp, EIO); 12515 orig_bp->b_resid = orig_bp->b_bcount; 12516 12517 } else { 12518 /* 12519 * This is a regular READ request (ie, not a RMW). Copy the 12520 * data from the shadow buf into the original buf. The 12521 * copy_offset compensates for any "misalignment" between the 12522 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12523 * original buf (with its un->un_sys_blocksize blocks). 12524 */ 12525 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12526 copy_length); 12527 } 12528 12529 freebuf_done: 12530 12531 /* 12532 * At this point we still have both the shadow buf AND the original 12533 * buf to deal with, as well as the layer-private data area in each. 12534 * Local variables are as follows: 12535 * 12536 * bp -- points to shadow buf 12537 * xp -- points to xbuf of shadow buf 12538 * bsp -- points to layer-private data area of shadow buf 12539 * orig_bp -- points to original buf 12540 * 12541 * First free the shadow buf and its associated xbuf, then free the 12542 * layer-private data area from the shadow buf. There is no need to 12543 * restore xb_private in the shadow xbuf. 12544 */ 12545 sd_shadow_buf_free(bp); 12546 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12547 12548 /* 12549 * Now update the local variables to point to the original buf, xbuf, 12550 * and layer-private area. 12551 */ 12552 bp = orig_bp; 12553 xp = SD_GET_XBUF(bp); 12554 ASSERT(xp != NULL); 12555 ASSERT(xp == orig_xp); 12556 bsp = xp->xb_private; 12557 ASSERT(bsp != NULL); 12558 12559 done: 12560 /* 12561 * Restore xb_private to whatever it was set to by the next higher 12562 * layer in the chain, then free the layer-private data area. 12563 */ 12564 xp->xb_private = bsp->mbs_oprivate; 12565 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12566 12567 exit: 12568 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12569 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12570 12571 SD_NEXT_IODONE(index, un, bp); 12572 } 12573 12574 12575 /* 12576 * Function: sd_checksum_iostart 12577 * 12578 * Description: A stub function for a layer that's currently not used. 12579 * For now just a placeholder. 12580 * 12581 * Context: Kernel thread context 12582 */ 12583 12584 static void 12585 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12586 { 12587 ASSERT(un != NULL); 12588 ASSERT(bp != NULL); 12589 ASSERT(!mutex_owned(SD_MUTEX(un))); 12590 SD_NEXT_IOSTART(index, un, bp); 12591 } 12592 12593 12594 /* 12595 * Function: sd_checksum_iodone 12596 * 12597 * Description: A stub function for a layer that's currently not used. 12598 * For now just a placeholder. 12599 * 12600 * Context: May be called under interrupt context 12601 */ 12602 12603 static void 12604 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12605 { 12606 ASSERT(un != NULL); 12607 ASSERT(bp != NULL); 12608 ASSERT(!mutex_owned(SD_MUTEX(un))); 12609 SD_NEXT_IODONE(index, un, bp); 12610 } 12611 12612 12613 /* 12614 * Function: sd_checksum_uscsi_iostart 12615 * 12616 * Description: A stub function for a layer that's currently not used. 12617 * For now just a placeholder. 12618 * 12619 * Context: Kernel thread context 12620 */ 12621 12622 static void 12623 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12624 { 12625 ASSERT(un != NULL); 12626 ASSERT(bp != NULL); 12627 ASSERT(!mutex_owned(SD_MUTEX(un))); 12628 SD_NEXT_IOSTART(index, un, bp); 12629 } 12630 12631 12632 /* 12633 * Function: sd_checksum_uscsi_iodone 12634 * 12635 * Description: A stub function for a layer that's currently not used. 12636 * For now just a placeholder. 12637 * 12638 * Context: May be called under interrupt context 12639 */ 12640 12641 static void 12642 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12643 { 12644 ASSERT(un != NULL); 12645 ASSERT(bp != NULL); 12646 ASSERT(!mutex_owned(SD_MUTEX(un))); 12647 SD_NEXT_IODONE(index, un, bp); 12648 } 12649 12650 12651 /* 12652 * Function: sd_pm_iostart 12653 * 12654 * Description: iostart-side routine for Power mangement. 12655 * 12656 * Context: Kernel thread context 12657 */ 12658 12659 static void 12660 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12661 { 12662 ASSERT(un != NULL); 12663 ASSERT(bp != NULL); 12664 ASSERT(!mutex_owned(SD_MUTEX(un))); 12665 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12666 12667 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12668 12669 if (sd_pm_entry(un) != DDI_SUCCESS) { 12670 /* 12671 * Set up to return the failed buf back up the 'iodone' 12672 * side of the calling chain. 12673 */ 12674 bioerror(bp, EIO); 12675 bp->b_resid = bp->b_bcount; 12676 12677 SD_BEGIN_IODONE(index, un, bp); 12678 12679 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12680 return; 12681 } 12682 12683 SD_NEXT_IOSTART(index, un, bp); 12684 12685 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12686 } 12687 12688 12689 /* 12690 * Function: sd_pm_iodone 12691 * 12692 * Description: iodone-side routine for power mangement. 12693 * 12694 * Context: may be called from interrupt context 12695 */ 12696 12697 static void 12698 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12699 { 12700 ASSERT(un != NULL); 12701 ASSERT(bp != NULL); 12702 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12703 12704 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12705 12706 /* 12707 * After attach the following flag is only read, so don't 12708 * take the penalty of acquiring a mutex for it. 12709 */ 12710 if (un->un_f_pm_is_enabled == TRUE) { 12711 sd_pm_exit(un); 12712 } 12713 12714 SD_NEXT_IODONE(index, un, bp); 12715 12716 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12717 } 12718 12719 12720 /* 12721 * Function: sd_core_iostart 12722 * 12723 * Description: Primary driver function for enqueuing buf(9S) structs from 12724 * the system and initiating IO to the target device 12725 * 12726 * Context: Kernel thread context. Can sleep. 12727 * 12728 * Assumptions: - The given xp->xb_blkno is absolute 12729 * (ie, relative to the start of the device). 12730 * - The IO is to be done using the native blocksize of 12731 * the device, as specified in un->un_tgt_blocksize. 12732 */ 12733 /* ARGSUSED */ 12734 static void 12735 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12736 { 12737 struct sd_xbuf *xp; 12738 12739 ASSERT(un != NULL); 12740 ASSERT(bp != NULL); 12741 ASSERT(!mutex_owned(SD_MUTEX(un))); 12742 ASSERT(bp->b_resid == 0); 12743 12744 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12745 12746 xp = SD_GET_XBUF(bp); 12747 ASSERT(xp != NULL); 12748 12749 mutex_enter(SD_MUTEX(un)); 12750 12751 /* 12752 * If we are currently in the failfast state, fail any new IO 12753 * that has B_FAILFAST set, then return. 12754 */ 12755 if ((bp->b_flags & B_FAILFAST) && 12756 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12757 mutex_exit(SD_MUTEX(un)); 12758 bioerror(bp, EIO); 12759 bp->b_resid = bp->b_bcount; 12760 SD_BEGIN_IODONE(index, un, bp); 12761 return; 12762 } 12763 12764 if (SD_IS_DIRECT_PRIORITY(xp)) { 12765 /* 12766 * Priority command -- transport it immediately. 12767 * 12768 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12769 * because all direct priority commands should be associated 12770 * with error recovery actions which we don't want to retry. 12771 */ 12772 sd_start_cmds(un, bp); 12773 } else { 12774 /* 12775 * Normal command -- add it to the wait queue, then start 12776 * transporting commands from the wait queue. 12777 */ 12778 sd_add_buf_to_waitq(un, bp); 12779 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12780 sd_start_cmds(un, NULL); 12781 } 12782 12783 mutex_exit(SD_MUTEX(un)); 12784 12785 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12786 } 12787 12788 12789 /* 12790 * Function: sd_init_cdb_limits 12791 * 12792 * Description: This is to handle scsi_pkt initialization differences 12793 * between the driver platforms. 12794 * 12795 * Legacy behaviors: 12796 * 12797 * If the block number or the sector count exceeds the 12798 * capabilities of a Group 0 command, shift over to a 12799 * Group 1 command. We don't blindly use Group 1 12800 * commands because a) some drives (CDC Wren IVs) get a 12801 * bit confused, and b) there is probably a fair amount 12802 * of speed difference for a target to receive and decode 12803 * a 10 byte command instead of a 6 byte command. 12804 * 12805 * The xfer time difference of 6 vs 10 byte CDBs is 12806 * still significant so this code is still worthwhile. 12807 * 10 byte CDBs are very inefficient with the fas HBA driver 12808 * and older disks. Each CDB byte took 1 usec with some 12809 * popular disks. 12810 * 12811 * Context: Must be called at attach time 12812 */ 12813 12814 static void 12815 sd_init_cdb_limits(struct sd_lun *un) 12816 { 12817 int hba_cdb_limit; 12818 12819 /* 12820 * Use CDB_GROUP1 commands for most devices except for 12821 * parallel SCSI fixed drives in which case we get better 12822 * performance using CDB_GROUP0 commands (where applicable). 12823 */ 12824 un->un_mincdb = SD_CDB_GROUP1; 12825 #if !defined(__fibre) 12826 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12827 !un->un_f_has_removable_media) { 12828 un->un_mincdb = SD_CDB_GROUP0; 12829 } 12830 #endif 12831 12832 /* 12833 * Try to read the max-cdb-length supported by HBA. 12834 */ 12835 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 12836 if (0 >= un->un_max_hba_cdb) { 12837 un->un_max_hba_cdb = CDB_GROUP4; 12838 hba_cdb_limit = SD_CDB_GROUP4; 12839 } else if (0 < un->un_max_hba_cdb && 12840 un->un_max_hba_cdb < CDB_GROUP1) { 12841 hba_cdb_limit = SD_CDB_GROUP0; 12842 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 12843 un->un_max_hba_cdb < CDB_GROUP5) { 12844 hba_cdb_limit = SD_CDB_GROUP1; 12845 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 12846 un->un_max_hba_cdb < CDB_GROUP4) { 12847 hba_cdb_limit = SD_CDB_GROUP5; 12848 } else { 12849 hba_cdb_limit = SD_CDB_GROUP4; 12850 } 12851 12852 /* 12853 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12854 * commands for fixed disks unless we are building for a 32 bit 12855 * kernel. 12856 */ 12857 #ifdef _LP64 12858 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12859 min(hba_cdb_limit, SD_CDB_GROUP4); 12860 #else 12861 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12862 min(hba_cdb_limit, SD_CDB_GROUP1); 12863 #endif 12864 12865 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12866 ? sizeof (struct scsi_arq_status) : 1); 12867 un->un_cmd_timeout = (ushort_t)sd_io_time; 12868 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12869 } 12870 12871 12872 /* 12873 * Function: sd_initpkt_for_buf 12874 * 12875 * Description: Allocate and initialize for transport a scsi_pkt struct, 12876 * based upon the info specified in the given buf struct. 12877 * 12878 * Assumes the xb_blkno in the request is absolute (ie, 12879 * relative to the start of the device (NOT partition!). 12880 * Also assumes that the request is using the native block 12881 * size of the device (as returned by the READ CAPACITY 12882 * command). 12883 * 12884 * Return Code: SD_PKT_ALLOC_SUCCESS 12885 * SD_PKT_ALLOC_FAILURE 12886 * SD_PKT_ALLOC_FAILURE_NO_DMA 12887 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12888 * 12889 * Context: Kernel thread and may be called from software interrupt context 12890 * as part of a sdrunout callback. This function may not block or 12891 * call routines that block 12892 */ 12893 12894 static int 12895 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12896 { 12897 struct sd_xbuf *xp; 12898 struct scsi_pkt *pktp = NULL; 12899 struct sd_lun *un; 12900 size_t blockcount; 12901 daddr_t startblock; 12902 int rval; 12903 int cmd_flags; 12904 12905 ASSERT(bp != NULL); 12906 ASSERT(pktpp != NULL); 12907 xp = SD_GET_XBUF(bp); 12908 ASSERT(xp != NULL); 12909 un = SD_GET_UN(bp); 12910 ASSERT(un != NULL); 12911 ASSERT(mutex_owned(SD_MUTEX(un))); 12912 ASSERT(bp->b_resid == 0); 12913 12914 SD_TRACE(SD_LOG_IO_CORE, un, 12915 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12916 12917 mutex_exit(SD_MUTEX(un)); 12918 12919 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12920 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12921 /* 12922 * Already have a scsi_pkt -- just need DMA resources. 12923 * We must recompute the CDB in case the mapping returns 12924 * a nonzero pkt_resid. 12925 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12926 * that is being retried, the unmap/remap of the DMA resouces 12927 * will result in the entire transfer starting over again 12928 * from the very first block. 12929 */ 12930 ASSERT(xp->xb_pktp != NULL); 12931 pktp = xp->xb_pktp; 12932 } else { 12933 pktp = NULL; 12934 } 12935 #endif /* __i386 || __amd64 */ 12936 12937 startblock = xp->xb_blkno; /* Absolute block num. */ 12938 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12939 12940 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12941 12942 /* 12943 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12944 * call scsi_init_pkt, and build the CDB. 12945 */ 12946 rval = sd_setup_rw_pkt(un, &pktp, bp, 12947 cmd_flags, sdrunout, (caddr_t)un, 12948 startblock, blockcount); 12949 12950 if (rval == 0) { 12951 /* 12952 * Success. 12953 * 12954 * If partial DMA is being used and required for this transfer. 12955 * set it up here. 12956 */ 12957 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12958 (pktp->pkt_resid != 0)) { 12959 12960 /* 12961 * Save the CDB length and pkt_resid for the 12962 * next xfer 12963 */ 12964 xp->xb_dma_resid = pktp->pkt_resid; 12965 12966 /* rezero resid */ 12967 pktp->pkt_resid = 0; 12968 12969 } else { 12970 xp->xb_dma_resid = 0; 12971 } 12972 12973 pktp->pkt_flags = un->un_tagflags; 12974 pktp->pkt_time = un->un_cmd_timeout; 12975 pktp->pkt_comp = sdintr; 12976 12977 pktp->pkt_private = bp; 12978 *pktpp = pktp; 12979 12980 SD_TRACE(SD_LOG_IO_CORE, un, 12981 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12982 12983 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12984 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12985 #endif 12986 12987 mutex_enter(SD_MUTEX(un)); 12988 return (SD_PKT_ALLOC_SUCCESS); 12989 12990 } 12991 12992 /* 12993 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12994 * from sd_setup_rw_pkt. 12995 */ 12996 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12997 12998 if (rval == SD_PKT_ALLOC_FAILURE) { 12999 *pktpp = NULL; 13000 /* 13001 * Set the driver state to RWAIT to indicate the driver 13002 * is waiting on resource allocations. The driver will not 13003 * suspend, pm_suspend, or detatch while the state is RWAIT. 13004 */ 13005 mutex_enter(SD_MUTEX(un)); 13006 New_state(un, SD_STATE_RWAIT); 13007 13008 SD_ERROR(SD_LOG_IO_CORE, un, 13009 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13010 13011 if ((bp->b_flags & B_ERROR) != 0) { 13012 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13013 } 13014 return (SD_PKT_ALLOC_FAILURE); 13015 } else { 13016 /* 13017 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13018 * 13019 * This should never happen. Maybe someone messed with the 13020 * kernel's minphys? 13021 */ 13022 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13023 "Request rejected: too large for CDB: " 13024 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13025 SD_ERROR(SD_LOG_IO_CORE, un, 13026 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13027 mutex_enter(SD_MUTEX(un)); 13028 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13029 13030 } 13031 } 13032 13033 13034 /* 13035 * Function: sd_destroypkt_for_buf 13036 * 13037 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13038 * 13039 * Context: Kernel thread or interrupt context 13040 */ 13041 13042 static void 13043 sd_destroypkt_for_buf(struct buf *bp) 13044 { 13045 ASSERT(bp != NULL); 13046 ASSERT(SD_GET_UN(bp) != NULL); 13047 13048 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13049 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13050 13051 ASSERT(SD_GET_PKTP(bp) != NULL); 13052 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13053 13054 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13055 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13056 } 13057 13058 /* 13059 * Function: sd_setup_rw_pkt 13060 * 13061 * Description: Determines appropriate CDB group for the requested LBA 13062 * and transfer length, calls scsi_init_pkt, and builds 13063 * the CDB. Do not use for partial DMA transfers except 13064 * for the initial transfer since the CDB size must 13065 * remain constant. 13066 * 13067 * Context: Kernel thread and may be called from software interrupt 13068 * context as part of a sdrunout callback. This function may not 13069 * block or call routines that block 13070 */ 13071 13072 13073 int 13074 sd_setup_rw_pkt(struct sd_lun *un, 13075 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13076 int (*callback)(caddr_t), caddr_t callback_arg, 13077 diskaddr_t lba, uint32_t blockcount) 13078 { 13079 struct scsi_pkt *return_pktp; 13080 union scsi_cdb *cdbp; 13081 struct sd_cdbinfo *cp = NULL; 13082 int i; 13083 13084 /* 13085 * See which size CDB to use, based upon the request. 13086 */ 13087 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13088 13089 /* 13090 * Check lba and block count against sd_cdbtab limits. 13091 * In the partial DMA case, we have to use the same size 13092 * CDB for all the transfers. Check lba + blockcount 13093 * against the max LBA so we know that segment of the 13094 * transfer can use the CDB we select. 13095 */ 13096 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13097 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13098 13099 /* 13100 * The command will fit into the CDB type 13101 * specified by sd_cdbtab[i]. 13102 */ 13103 cp = sd_cdbtab + i; 13104 13105 /* 13106 * Call scsi_init_pkt so we can fill in the 13107 * CDB. 13108 */ 13109 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13110 bp, cp->sc_grpcode, un->un_status_len, 0, 13111 flags, callback, callback_arg); 13112 13113 if (return_pktp != NULL) { 13114 13115 /* 13116 * Return new value of pkt 13117 */ 13118 *pktpp = return_pktp; 13119 13120 /* 13121 * To be safe, zero the CDB insuring there is 13122 * no leftover data from a previous command. 13123 */ 13124 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13125 13126 /* 13127 * Handle partial DMA mapping 13128 */ 13129 if (return_pktp->pkt_resid != 0) { 13130 13131 /* 13132 * Not going to xfer as many blocks as 13133 * originally expected 13134 */ 13135 blockcount -= 13136 SD_BYTES2TGTBLOCKS(un, 13137 return_pktp->pkt_resid); 13138 } 13139 13140 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13141 13142 /* 13143 * Set command byte based on the CDB 13144 * type we matched. 13145 */ 13146 cdbp->scc_cmd = cp->sc_grpmask | 13147 ((bp->b_flags & B_READ) ? 13148 SCMD_READ : SCMD_WRITE); 13149 13150 SD_FILL_SCSI1_LUN(un, return_pktp); 13151 13152 /* 13153 * Fill in LBA and length 13154 */ 13155 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13156 (cp->sc_grpcode == CDB_GROUP4) || 13157 (cp->sc_grpcode == CDB_GROUP0) || 13158 (cp->sc_grpcode == CDB_GROUP5)); 13159 13160 if (cp->sc_grpcode == CDB_GROUP1) { 13161 FORMG1ADDR(cdbp, lba); 13162 FORMG1COUNT(cdbp, blockcount); 13163 return (0); 13164 } else if (cp->sc_grpcode == CDB_GROUP4) { 13165 FORMG4LONGADDR(cdbp, lba); 13166 FORMG4COUNT(cdbp, blockcount); 13167 return (0); 13168 } else if (cp->sc_grpcode == CDB_GROUP0) { 13169 FORMG0ADDR(cdbp, lba); 13170 FORMG0COUNT(cdbp, blockcount); 13171 return (0); 13172 } else if (cp->sc_grpcode == CDB_GROUP5) { 13173 FORMG5ADDR(cdbp, lba); 13174 FORMG5COUNT(cdbp, blockcount); 13175 return (0); 13176 } 13177 13178 /* 13179 * It should be impossible to not match one 13180 * of the CDB types above, so we should never 13181 * reach this point. Set the CDB command byte 13182 * to test-unit-ready to avoid writing 13183 * to somewhere we don't intend. 13184 */ 13185 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13186 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13187 } else { 13188 /* 13189 * Couldn't get scsi_pkt 13190 */ 13191 return (SD_PKT_ALLOC_FAILURE); 13192 } 13193 } 13194 } 13195 13196 /* 13197 * None of the available CDB types were suitable. This really 13198 * should never happen: on a 64 bit system we support 13199 * READ16/WRITE16 which will hold an entire 64 bit disk address 13200 * and on a 32 bit system we will refuse to bind to a device 13201 * larger than 2TB so addresses will never be larger than 32 bits. 13202 */ 13203 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13204 } 13205 13206 /* 13207 * Function: sd_setup_next_rw_pkt 13208 * 13209 * Description: Setup packet for partial DMA transfers, except for the 13210 * initial transfer. sd_setup_rw_pkt should be used for 13211 * the initial transfer. 13212 * 13213 * Context: Kernel thread and may be called from interrupt context. 13214 */ 13215 13216 int 13217 sd_setup_next_rw_pkt(struct sd_lun *un, 13218 struct scsi_pkt *pktp, struct buf *bp, 13219 diskaddr_t lba, uint32_t blockcount) 13220 { 13221 uchar_t com; 13222 union scsi_cdb *cdbp; 13223 uchar_t cdb_group_id; 13224 13225 ASSERT(pktp != NULL); 13226 ASSERT(pktp->pkt_cdbp != NULL); 13227 13228 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13229 com = cdbp->scc_cmd; 13230 cdb_group_id = CDB_GROUPID(com); 13231 13232 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13233 (cdb_group_id == CDB_GROUPID_1) || 13234 (cdb_group_id == CDB_GROUPID_4) || 13235 (cdb_group_id == CDB_GROUPID_5)); 13236 13237 /* 13238 * Move pkt to the next portion of the xfer. 13239 * func is NULL_FUNC so we do not have to release 13240 * the disk mutex here. 13241 */ 13242 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13243 NULL_FUNC, NULL) == pktp) { 13244 /* Success. Handle partial DMA */ 13245 if (pktp->pkt_resid != 0) { 13246 blockcount -= 13247 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13248 } 13249 13250 cdbp->scc_cmd = com; 13251 SD_FILL_SCSI1_LUN(un, pktp); 13252 if (cdb_group_id == CDB_GROUPID_1) { 13253 FORMG1ADDR(cdbp, lba); 13254 FORMG1COUNT(cdbp, blockcount); 13255 return (0); 13256 } else if (cdb_group_id == CDB_GROUPID_4) { 13257 FORMG4LONGADDR(cdbp, lba); 13258 FORMG4COUNT(cdbp, blockcount); 13259 return (0); 13260 } else if (cdb_group_id == CDB_GROUPID_0) { 13261 FORMG0ADDR(cdbp, lba); 13262 FORMG0COUNT(cdbp, blockcount); 13263 return (0); 13264 } else if (cdb_group_id == CDB_GROUPID_5) { 13265 FORMG5ADDR(cdbp, lba); 13266 FORMG5COUNT(cdbp, blockcount); 13267 return (0); 13268 } 13269 13270 /* Unreachable */ 13271 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13272 } 13273 13274 /* 13275 * Error setting up next portion of cmd transfer. 13276 * Something is definitely very wrong and this 13277 * should not happen. 13278 */ 13279 return (SD_PKT_ALLOC_FAILURE); 13280 } 13281 13282 /* 13283 * Function: sd_initpkt_for_uscsi 13284 * 13285 * Description: Allocate and initialize for transport a scsi_pkt struct, 13286 * based upon the info specified in the given uscsi_cmd struct. 13287 * 13288 * Return Code: SD_PKT_ALLOC_SUCCESS 13289 * SD_PKT_ALLOC_FAILURE 13290 * SD_PKT_ALLOC_FAILURE_NO_DMA 13291 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13292 * 13293 * Context: Kernel thread and may be called from software interrupt context 13294 * as part of a sdrunout callback. This function may not block or 13295 * call routines that block 13296 */ 13297 13298 static int 13299 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13300 { 13301 struct uscsi_cmd *uscmd; 13302 struct sd_xbuf *xp; 13303 struct scsi_pkt *pktp; 13304 struct sd_lun *un; 13305 uint32_t flags = 0; 13306 13307 ASSERT(bp != NULL); 13308 ASSERT(pktpp != NULL); 13309 xp = SD_GET_XBUF(bp); 13310 ASSERT(xp != NULL); 13311 un = SD_GET_UN(bp); 13312 ASSERT(un != NULL); 13313 ASSERT(mutex_owned(SD_MUTEX(un))); 13314 13315 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13316 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13317 ASSERT(uscmd != NULL); 13318 13319 SD_TRACE(SD_LOG_IO_CORE, un, 13320 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13321 13322 /* 13323 * Allocate the scsi_pkt for the command. 13324 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13325 * during scsi_init_pkt time and will continue to use the 13326 * same path as long as the same scsi_pkt is used without 13327 * intervening scsi_dma_free(). Since uscsi command does 13328 * not call scsi_dmafree() before retry failed command, it 13329 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13330 * set such that scsi_vhci can use other available path for 13331 * retry. Besides, ucsci command does not allow DMA breakup, 13332 * so there is no need to set PKT_DMA_PARTIAL flag. 13333 */ 13334 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13335 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13336 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13337 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13338 - sizeof (struct scsi_extended_sense)), 0, 13339 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13340 sdrunout, (caddr_t)un); 13341 } else { 13342 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13343 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13344 sizeof (struct scsi_arq_status), 0, 13345 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13346 sdrunout, (caddr_t)un); 13347 } 13348 13349 if (pktp == NULL) { 13350 *pktpp = NULL; 13351 /* 13352 * Set the driver state to RWAIT to indicate the driver 13353 * is waiting on resource allocations. The driver will not 13354 * suspend, pm_suspend, or detatch while the state is RWAIT. 13355 */ 13356 New_state(un, SD_STATE_RWAIT); 13357 13358 SD_ERROR(SD_LOG_IO_CORE, un, 13359 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13360 13361 if ((bp->b_flags & B_ERROR) != 0) { 13362 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13363 } 13364 return (SD_PKT_ALLOC_FAILURE); 13365 } 13366 13367 /* 13368 * We do not do DMA breakup for USCSI commands, so return failure 13369 * here if all the needed DMA resources were not allocated. 13370 */ 13371 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13372 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13373 scsi_destroy_pkt(pktp); 13374 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13375 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13376 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13377 } 13378 13379 /* Init the cdb from the given uscsi struct */ 13380 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13381 uscmd->uscsi_cdb[0], 0, 0, 0); 13382 13383 SD_FILL_SCSI1_LUN(un, pktp); 13384 13385 /* 13386 * Set up the optional USCSI flags. See the uscsi (7I) man page 13387 * for listing of the supported flags. 13388 */ 13389 13390 if (uscmd->uscsi_flags & USCSI_SILENT) { 13391 flags |= FLAG_SILENT; 13392 } 13393 13394 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13395 flags |= FLAG_DIAGNOSE; 13396 } 13397 13398 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13399 flags |= FLAG_ISOLATE; 13400 } 13401 13402 if (un->un_f_is_fibre == FALSE) { 13403 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13404 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13405 } 13406 } 13407 13408 /* 13409 * Set the pkt flags here so we save time later. 13410 * Note: These flags are NOT in the uscsi man page!!! 13411 */ 13412 if (uscmd->uscsi_flags & USCSI_HEAD) { 13413 flags |= FLAG_HEAD; 13414 } 13415 13416 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13417 flags |= FLAG_NOINTR; 13418 } 13419 13420 /* 13421 * For tagged queueing, things get a bit complicated. 13422 * Check first for head of queue and last for ordered queue. 13423 * If neither head nor order, use the default driver tag flags. 13424 */ 13425 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13426 if (uscmd->uscsi_flags & USCSI_HTAG) { 13427 flags |= FLAG_HTAG; 13428 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13429 flags |= FLAG_OTAG; 13430 } else { 13431 flags |= un->un_tagflags & FLAG_TAGMASK; 13432 } 13433 } 13434 13435 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13436 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13437 } 13438 13439 pktp->pkt_flags = flags; 13440 13441 /* Transfer uscsi information to scsi_pkt */ 13442 (void) scsi_uscsi_pktinit(uscmd, pktp); 13443 13444 /* Copy the caller's CDB into the pkt... */ 13445 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13446 13447 if (uscmd->uscsi_timeout == 0) { 13448 pktp->pkt_time = un->un_uscsi_timeout; 13449 } else { 13450 pktp->pkt_time = uscmd->uscsi_timeout; 13451 } 13452 13453 /* need it later to identify USCSI request in sdintr */ 13454 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13455 13456 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13457 13458 pktp->pkt_private = bp; 13459 pktp->pkt_comp = sdintr; 13460 *pktpp = pktp; 13461 13462 SD_TRACE(SD_LOG_IO_CORE, un, 13463 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13464 13465 return (SD_PKT_ALLOC_SUCCESS); 13466 } 13467 13468 13469 /* 13470 * Function: sd_destroypkt_for_uscsi 13471 * 13472 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13473 * IOs.. Also saves relevant info into the associated uscsi_cmd 13474 * struct. 13475 * 13476 * Context: May be called under interrupt context 13477 */ 13478 13479 static void 13480 sd_destroypkt_for_uscsi(struct buf *bp) 13481 { 13482 struct uscsi_cmd *uscmd; 13483 struct sd_xbuf *xp; 13484 struct scsi_pkt *pktp; 13485 struct sd_lun *un; 13486 struct sd_uscsi_info *suip; 13487 13488 ASSERT(bp != NULL); 13489 xp = SD_GET_XBUF(bp); 13490 ASSERT(xp != NULL); 13491 un = SD_GET_UN(bp); 13492 ASSERT(un != NULL); 13493 ASSERT(!mutex_owned(SD_MUTEX(un))); 13494 pktp = SD_GET_PKTP(bp); 13495 ASSERT(pktp != NULL); 13496 13497 SD_TRACE(SD_LOG_IO_CORE, un, 13498 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13499 13500 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13501 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13502 ASSERT(uscmd != NULL); 13503 13504 /* Save the status and the residual into the uscsi_cmd struct */ 13505 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13506 uscmd->uscsi_resid = bp->b_resid; 13507 13508 /* Transfer scsi_pkt information to uscsi */ 13509 (void) scsi_uscsi_pktfini(pktp, uscmd); 13510 13511 /* 13512 * If enabled, copy any saved sense data into the area specified 13513 * by the uscsi command. 13514 */ 13515 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13516 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13517 /* 13518 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13519 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13520 */ 13521 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13522 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13523 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13524 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13525 MAX_SENSE_LENGTH); 13526 } else { 13527 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13528 SENSE_LENGTH); 13529 } 13530 } 13531 /* 13532 * The following assignments are for SCSI FMA. 13533 */ 13534 ASSERT(xp->xb_private != NULL); 13535 suip = (struct sd_uscsi_info *)xp->xb_private; 13536 suip->ui_pkt_reason = pktp->pkt_reason; 13537 suip->ui_pkt_state = pktp->pkt_state; 13538 suip->ui_pkt_statistics = pktp->pkt_statistics; 13539 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13540 13541 /* We are done with the scsi_pkt; free it now */ 13542 ASSERT(SD_GET_PKTP(bp) != NULL); 13543 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13544 13545 SD_TRACE(SD_LOG_IO_CORE, un, 13546 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13547 } 13548 13549 13550 /* 13551 * Function: sd_bioclone_alloc 13552 * 13553 * Description: Allocate a buf(9S) and init it as per the given buf 13554 * and the various arguments. The associated sd_xbuf 13555 * struct is (nearly) duplicated. The struct buf *bp 13556 * argument is saved in new_xp->xb_private. 13557 * 13558 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13559 * datalen - size of data area for the shadow bp 13560 * blkno - starting LBA 13561 * func - function pointer for b_iodone in the shadow buf. (May 13562 * be NULL if none.) 13563 * 13564 * Return Code: Pointer to allocates buf(9S) struct 13565 * 13566 * Context: Can sleep. 13567 */ 13568 13569 static struct buf * 13570 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13571 daddr_t blkno, int (*func)(struct buf *)) 13572 { 13573 struct sd_lun *un; 13574 struct sd_xbuf *xp; 13575 struct sd_xbuf *new_xp; 13576 struct buf *new_bp; 13577 13578 ASSERT(bp != NULL); 13579 xp = SD_GET_XBUF(bp); 13580 ASSERT(xp != NULL); 13581 un = SD_GET_UN(bp); 13582 ASSERT(un != NULL); 13583 ASSERT(!mutex_owned(SD_MUTEX(un))); 13584 13585 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13586 NULL, KM_SLEEP); 13587 13588 new_bp->b_lblkno = blkno; 13589 13590 /* 13591 * Allocate an xbuf for the shadow bp and copy the contents of the 13592 * original xbuf into it. 13593 */ 13594 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13595 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13596 13597 /* 13598 * The given bp is automatically saved in the xb_private member 13599 * of the new xbuf. Callers are allowed to depend on this. 13600 */ 13601 new_xp->xb_private = bp; 13602 13603 new_bp->b_private = new_xp; 13604 13605 return (new_bp); 13606 } 13607 13608 /* 13609 * Function: sd_shadow_buf_alloc 13610 * 13611 * Description: Allocate a buf(9S) and init it as per the given buf 13612 * and the various arguments. The associated sd_xbuf 13613 * struct is (nearly) duplicated. The struct buf *bp 13614 * argument is saved in new_xp->xb_private. 13615 * 13616 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13617 * datalen - size of data area for the shadow bp 13618 * bflags - B_READ or B_WRITE (pseudo flag) 13619 * blkno - starting LBA 13620 * func - function pointer for b_iodone in the shadow buf. (May 13621 * be NULL if none.) 13622 * 13623 * Return Code: Pointer to allocates buf(9S) struct 13624 * 13625 * Context: Can sleep. 13626 */ 13627 13628 static struct buf * 13629 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13630 daddr_t blkno, int (*func)(struct buf *)) 13631 { 13632 struct sd_lun *un; 13633 struct sd_xbuf *xp; 13634 struct sd_xbuf *new_xp; 13635 struct buf *new_bp; 13636 13637 ASSERT(bp != NULL); 13638 xp = SD_GET_XBUF(bp); 13639 ASSERT(xp != NULL); 13640 un = SD_GET_UN(bp); 13641 ASSERT(un != NULL); 13642 ASSERT(!mutex_owned(SD_MUTEX(un))); 13643 13644 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13645 bp_mapin(bp); 13646 } 13647 13648 bflags &= (B_READ | B_WRITE); 13649 #if defined(__i386) || defined(__amd64) 13650 new_bp = getrbuf(KM_SLEEP); 13651 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13652 new_bp->b_bcount = datalen; 13653 new_bp->b_flags = bflags | 13654 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13655 #else 13656 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13657 datalen, bflags, SLEEP_FUNC, NULL); 13658 #endif 13659 new_bp->av_forw = NULL; 13660 new_bp->av_back = NULL; 13661 new_bp->b_dev = bp->b_dev; 13662 new_bp->b_blkno = blkno; 13663 new_bp->b_iodone = func; 13664 new_bp->b_edev = bp->b_edev; 13665 new_bp->b_resid = 0; 13666 13667 /* We need to preserve the B_FAILFAST flag */ 13668 if (bp->b_flags & B_FAILFAST) { 13669 new_bp->b_flags |= B_FAILFAST; 13670 } 13671 13672 /* 13673 * Allocate an xbuf for the shadow bp and copy the contents of the 13674 * original xbuf into it. 13675 */ 13676 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13677 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13678 13679 /* Need later to copy data between the shadow buf & original buf! */ 13680 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13681 13682 /* 13683 * The given bp is automatically saved in the xb_private member 13684 * of the new xbuf. Callers are allowed to depend on this. 13685 */ 13686 new_xp->xb_private = bp; 13687 13688 new_bp->b_private = new_xp; 13689 13690 return (new_bp); 13691 } 13692 13693 /* 13694 * Function: sd_bioclone_free 13695 * 13696 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13697 * in the larger than partition operation. 13698 * 13699 * Context: May be called under interrupt context 13700 */ 13701 13702 static void 13703 sd_bioclone_free(struct buf *bp) 13704 { 13705 struct sd_xbuf *xp; 13706 13707 ASSERT(bp != NULL); 13708 xp = SD_GET_XBUF(bp); 13709 ASSERT(xp != NULL); 13710 13711 /* 13712 * Call bp_mapout() before freeing the buf, in case a lower 13713 * layer or HBA had done a bp_mapin(). we must do this here 13714 * as we are the "originator" of the shadow buf. 13715 */ 13716 bp_mapout(bp); 13717 13718 /* 13719 * Null out b_iodone before freeing the bp, to ensure that the driver 13720 * never gets confused by a stale value in this field. (Just a little 13721 * extra defensiveness here.) 13722 */ 13723 bp->b_iodone = NULL; 13724 13725 freerbuf(bp); 13726 13727 kmem_free(xp, sizeof (struct sd_xbuf)); 13728 } 13729 13730 /* 13731 * Function: sd_shadow_buf_free 13732 * 13733 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13734 * 13735 * Context: May be called under interrupt context 13736 */ 13737 13738 static void 13739 sd_shadow_buf_free(struct buf *bp) 13740 { 13741 struct sd_xbuf *xp; 13742 13743 ASSERT(bp != NULL); 13744 xp = SD_GET_XBUF(bp); 13745 ASSERT(xp != NULL); 13746 13747 #if defined(__sparc) 13748 /* 13749 * Call bp_mapout() before freeing the buf, in case a lower 13750 * layer or HBA had done a bp_mapin(). we must do this here 13751 * as we are the "originator" of the shadow buf. 13752 */ 13753 bp_mapout(bp); 13754 #endif 13755 13756 /* 13757 * Null out b_iodone before freeing the bp, to ensure that the driver 13758 * never gets confused by a stale value in this field. (Just a little 13759 * extra defensiveness here.) 13760 */ 13761 bp->b_iodone = NULL; 13762 13763 #if defined(__i386) || defined(__amd64) 13764 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13765 freerbuf(bp); 13766 #else 13767 scsi_free_consistent_buf(bp); 13768 #endif 13769 13770 kmem_free(xp, sizeof (struct sd_xbuf)); 13771 } 13772 13773 13774 /* 13775 * Function: sd_print_transport_rejected_message 13776 * 13777 * Description: This implements the ludicrously complex rules for printing 13778 * a "transport rejected" message. This is to address the 13779 * specific problem of having a flood of this error message 13780 * produced when a failover occurs. 13781 * 13782 * Context: Any. 13783 */ 13784 13785 static void 13786 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13787 int code) 13788 { 13789 ASSERT(un != NULL); 13790 ASSERT(mutex_owned(SD_MUTEX(un))); 13791 ASSERT(xp != NULL); 13792 13793 /* 13794 * Print the "transport rejected" message under the following 13795 * conditions: 13796 * 13797 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13798 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13799 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13800 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13801 * scsi_transport(9F) (which indicates that the target might have 13802 * gone off-line). This uses the un->un_tran_fatal_count 13803 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13804 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13805 * from scsi_transport(). 13806 * 13807 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13808 * the preceeding cases in order for the message to be printed. 13809 */ 13810 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13811 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13812 (code != TRAN_FATAL_ERROR) || 13813 (un->un_tran_fatal_count == 1)) { 13814 switch (code) { 13815 case TRAN_BADPKT: 13816 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13817 "transport rejected bad packet\n"); 13818 break; 13819 case TRAN_FATAL_ERROR: 13820 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13821 "transport rejected fatal error\n"); 13822 break; 13823 default: 13824 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13825 "transport rejected (%d)\n", code); 13826 break; 13827 } 13828 } 13829 } 13830 } 13831 13832 13833 /* 13834 * Function: sd_add_buf_to_waitq 13835 * 13836 * Description: Add the given buf(9S) struct to the wait queue for the 13837 * instance. If sorting is enabled, then the buf is added 13838 * to the queue via an elevator sort algorithm (a la 13839 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13840 * If sorting is not enabled, then the buf is just added 13841 * to the end of the wait queue. 13842 * 13843 * Return Code: void 13844 * 13845 * Context: Does not sleep/block, therefore technically can be called 13846 * from any context. However if sorting is enabled then the 13847 * execution time is indeterminate, and may take long if 13848 * the wait queue grows large. 13849 */ 13850 13851 static void 13852 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13853 { 13854 struct buf *ap; 13855 13856 ASSERT(bp != NULL); 13857 ASSERT(un != NULL); 13858 ASSERT(mutex_owned(SD_MUTEX(un))); 13859 13860 /* If the queue is empty, add the buf as the only entry & return. */ 13861 if (un->un_waitq_headp == NULL) { 13862 ASSERT(un->un_waitq_tailp == NULL); 13863 un->un_waitq_headp = un->un_waitq_tailp = bp; 13864 bp->av_forw = NULL; 13865 return; 13866 } 13867 13868 ASSERT(un->un_waitq_tailp != NULL); 13869 13870 /* 13871 * If sorting is disabled, just add the buf to the tail end of 13872 * the wait queue and return. 13873 */ 13874 if (un->un_f_disksort_disabled) { 13875 un->un_waitq_tailp->av_forw = bp; 13876 un->un_waitq_tailp = bp; 13877 bp->av_forw = NULL; 13878 return; 13879 } 13880 13881 /* 13882 * Sort thru the list of requests currently on the wait queue 13883 * and add the new buf request at the appropriate position. 13884 * 13885 * The un->un_waitq_headp is an activity chain pointer on which 13886 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13887 * first queue holds those requests which are positioned after 13888 * the current SD_GET_BLKNO() (in the first request); the second holds 13889 * requests which came in after their SD_GET_BLKNO() number was passed. 13890 * Thus we implement a one way scan, retracting after reaching 13891 * the end of the drive to the first request on the second 13892 * queue, at which time it becomes the first queue. 13893 * A one-way scan is natural because of the way UNIX read-ahead 13894 * blocks are allocated. 13895 * 13896 * If we lie after the first request, then we must locate the 13897 * second request list and add ourselves to it. 13898 */ 13899 ap = un->un_waitq_headp; 13900 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13901 while (ap->av_forw != NULL) { 13902 /* 13903 * Look for an "inversion" in the (normally 13904 * ascending) block numbers. This indicates 13905 * the start of the second request list. 13906 */ 13907 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13908 /* 13909 * Search the second request list for the 13910 * first request at a larger block number. 13911 * We go before that; however if there is 13912 * no such request, we go at the end. 13913 */ 13914 do { 13915 if (SD_GET_BLKNO(bp) < 13916 SD_GET_BLKNO(ap->av_forw)) { 13917 goto insert; 13918 } 13919 ap = ap->av_forw; 13920 } while (ap->av_forw != NULL); 13921 goto insert; /* after last */ 13922 } 13923 ap = ap->av_forw; 13924 } 13925 13926 /* 13927 * No inversions... we will go after the last, and 13928 * be the first request in the second request list. 13929 */ 13930 goto insert; 13931 } 13932 13933 /* 13934 * Request is at/after the current request... 13935 * sort in the first request list. 13936 */ 13937 while (ap->av_forw != NULL) { 13938 /* 13939 * We want to go after the current request (1) if 13940 * there is an inversion after it (i.e. it is the end 13941 * of the first request list), or (2) if the next 13942 * request is a larger block no. than our request. 13943 */ 13944 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13945 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13946 goto insert; 13947 } 13948 ap = ap->av_forw; 13949 } 13950 13951 /* 13952 * Neither a second list nor a larger request, therefore 13953 * we go at the end of the first list (which is the same 13954 * as the end of the whole schebang). 13955 */ 13956 insert: 13957 bp->av_forw = ap->av_forw; 13958 ap->av_forw = bp; 13959 13960 /* 13961 * If we inserted onto the tail end of the waitq, make sure the 13962 * tail pointer is updated. 13963 */ 13964 if (ap == un->un_waitq_tailp) { 13965 un->un_waitq_tailp = bp; 13966 } 13967 } 13968 13969 13970 /* 13971 * Function: sd_start_cmds 13972 * 13973 * Description: Remove and transport cmds from the driver queues. 13974 * 13975 * Arguments: un - pointer to the unit (soft state) struct for the target. 13976 * 13977 * immed_bp - ptr to a buf to be transported immediately. Only 13978 * the immed_bp is transported; bufs on the waitq are not 13979 * processed and the un_retry_bp is not checked. If immed_bp is 13980 * NULL, then normal queue processing is performed. 13981 * 13982 * Context: May be called from kernel thread context, interrupt context, 13983 * or runout callback context. This function may not block or 13984 * call routines that block. 13985 */ 13986 13987 static void 13988 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13989 { 13990 struct sd_xbuf *xp; 13991 struct buf *bp; 13992 void (*statp)(kstat_io_t *); 13993 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13994 void (*saved_statp)(kstat_io_t *); 13995 #endif 13996 int rval; 13997 struct sd_fm_internal *sfip = NULL; 13998 13999 ASSERT(un != NULL); 14000 ASSERT(mutex_owned(SD_MUTEX(un))); 14001 ASSERT(un->un_ncmds_in_transport >= 0); 14002 ASSERT(un->un_throttle >= 0); 14003 14004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14005 14006 do { 14007 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14008 saved_statp = NULL; 14009 #endif 14010 14011 /* 14012 * If we are syncing or dumping, fail the command to 14013 * avoid recursively calling back into scsi_transport(). 14014 * The dump I/O itself uses a separate code path so this 14015 * only prevents non-dump I/O from being sent while dumping. 14016 * File system sync takes place before dumping begins. 14017 * During panic, filesystem I/O is allowed provided 14018 * un_in_callback is <= 1. This is to prevent recursion 14019 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14020 * sd_start_cmds and so on. See panic.c for more information 14021 * about the states the system can be in during panic. 14022 */ 14023 if ((un->un_state == SD_STATE_DUMPING) || 14024 (ddi_in_panic() && (un->un_in_callback > 1))) { 14025 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14026 "sd_start_cmds: panicking\n"); 14027 goto exit; 14028 } 14029 14030 if ((bp = immed_bp) != NULL) { 14031 /* 14032 * We have a bp that must be transported immediately. 14033 * It's OK to transport the immed_bp here without doing 14034 * the throttle limit check because the immed_bp is 14035 * always used in a retry/recovery case. This means 14036 * that we know we are not at the throttle limit by 14037 * virtue of the fact that to get here we must have 14038 * already gotten a command back via sdintr(). This also 14039 * relies on (1) the command on un_retry_bp preventing 14040 * further commands from the waitq from being issued; 14041 * and (2) the code in sd_retry_command checking the 14042 * throttle limit before issuing a delayed or immediate 14043 * retry. This holds even if the throttle limit is 14044 * currently ratcheted down from its maximum value. 14045 */ 14046 statp = kstat_runq_enter; 14047 if (bp == un->un_retry_bp) { 14048 ASSERT((un->un_retry_statp == NULL) || 14049 (un->un_retry_statp == kstat_waitq_enter) || 14050 (un->un_retry_statp == 14051 kstat_runq_back_to_waitq)); 14052 /* 14053 * If the waitq kstat was incremented when 14054 * sd_set_retry_bp() queued this bp for a retry, 14055 * then we must set up statp so that the waitq 14056 * count will get decremented correctly below. 14057 * Also we must clear un->un_retry_statp to 14058 * ensure that we do not act on a stale value 14059 * in this field. 14060 */ 14061 if ((un->un_retry_statp == kstat_waitq_enter) || 14062 (un->un_retry_statp == 14063 kstat_runq_back_to_waitq)) { 14064 statp = kstat_waitq_to_runq; 14065 } 14066 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14067 saved_statp = un->un_retry_statp; 14068 #endif 14069 un->un_retry_statp = NULL; 14070 14071 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14072 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14073 "un_throttle:%d un_ncmds_in_transport:%d\n", 14074 un, un->un_retry_bp, un->un_throttle, 14075 un->un_ncmds_in_transport); 14076 } else { 14077 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14078 "processing priority bp:0x%p\n", bp); 14079 } 14080 14081 } else if ((bp = un->un_waitq_headp) != NULL) { 14082 /* 14083 * A command on the waitq is ready to go, but do not 14084 * send it if: 14085 * 14086 * (1) the throttle limit has been reached, or 14087 * (2) a retry is pending, or 14088 * (3) a START_STOP_UNIT callback pending, or 14089 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14090 * command is pending. 14091 * 14092 * For all of these conditions, IO processing will 14093 * restart after the condition is cleared. 14094 */ 14095 if (un->un_ncmds_in_transport >= un->un_throttle) { 14096 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14097 "sd_start_cmds: exiting, " 14098 "throttle limit reached!\n"); 14099 goto exit; 14100 } 14101 if (un->un_retry_bp != NULL) { 14102 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14103 "sd_start_cmds: exiting, retry pending!\n"); 14104 goto exit; 14105 } 14106 if (un->un_startstop_timeid != NULL) { 14107 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14108 "sd_start_cmds: exiting, " 14109 "START_STOP pending!\n"); 14110 goto exit; 14111 } 14112 if (un->un_direct_priority_timeid != NULL) { 14113 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14114 "sd_start_cmds: exiting, " 14115 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14116 goto exit; 14117 } 14118 14119 /* Dequeue the command */ 14120 un->un_waitq_headp = bp->av_forw; 14121 if (un->un_waitq_headp == NULL) { 14122 un->un_waitq_tailp = NULL; 14123 } 14124 bp->av_forw = NULL; 14125 statp = kstat_waitq_to_runq; 14126 SD_TRACE(SD_LOG_IO_CORE, un, 14127 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14128 14129 } else { 14130 /* No work to do so bail out now */ 14131 SD_TRACE(SD_LOG_IO_CORE, un, 14132 "sd_start_cmds: no more work, exiting!\n"); 14133 goto exit; 14134 } 14135 14136 /* 14137 * Reset the state to normal. This is the mechanism by which 14138 * the state transitions from either SD_STATE_RWAIT or 14139 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14140 * If state is SD_STATE_PM_CHANGING then this command is 14141 * part of the device power control and the state must 14142 * not be put back to normal. Doing so would would 14143 * allow new commands to proceed when they shouldn't, 14144 * the device may be going off. 14145 */ 14146 if ((un->un_state != SD_STATE_SUSPENDED) && 14147 (un->un_state != SD_STATE_PM_CHANGING)) { 14148 New_state(un, SD_STATE_NORMAL); 14149 } 14150 14151 xp = SD_GET_XBUF(bp); 14152 ASSERT(xp != NULL); 14153 14154 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14155 /* 14156 * Allocate the scsi_pkt if we need one, or attach DMA 14157 * resources if we have a scsi_pkt that needs them. The 14158 * latter should only occur for commands that are being 14159 * retried. 14160 */ 14161 if ((xp->xb_pktp == NULL) || 14162 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14163 #else 14164 if (xp->xb_pktp == NULL) { 14165 #endif 14166 /* 14167 * There is no scsi_pkt allocated for this buf. Call 14168 * the initpkt function to allocate & init one. 14169 * 14170 * The scsi_init_pkt runout callback functionality is 14171 * implemented as follows: 14172 * 14173 * 1) The initpkt function always calls 14174 * scsi_init_pkt(9F) with sdrunout specified as the 14175 * callback routine. 14176 * 2) A successful packet allocation is initialized and 14177 * the I/O is transported. 14178 * 3) The I/O associated with an allocation resource 14179 * failure is left on its queue to be retried via 14180 * runout or the next I/O. 14181 * 4) The I/O associated with a DMA error is removed 14182 * from the queue and failed with EIO. Processing of 14183 * the transport queues is also halted to be 14184 * restarted via runout or the next I/O. 14185 * 5) The I/O associated with a CDB size or packet 14186 * size error is removed from the queue and failed 14187 * with EIO. Processing of the transport queues is 14188 * continued. 14189 * 14190 * Note: there is no interface for canceling a runout 14191 * callback. To prevent the driver from detaching or 14192 * suspending while a runout is pending the driver 14193 * state is set to SD_STATE_RWAIT 14194 * 14195 * Note: using the scsi_init_pkt callback facility can 14196 * result in an I/O request persisting at the head of 14197 * the list which cannot be satisfied even after 14198 * multiple retries. In the future the driver may 14199 * implement some kind of maximum runout count before 14200 * failing an I/O. 14201 * 14202 * Note: the use of funcp below may seem superfluous, 14203 * but it helps warlock figure out the correct 14204 * initpkt function calls (see [s]sd.wlcmd). 14205 */ 14206 struct scsi_pkt *pktp; 14207 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14208 14209 ASSERT(bp != un->un_rqs_bp); 14210 14211 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14212 switch ((*funcp)(bp, &pktp)) { 14213 case SD_PKT_ALLOC_SUCCESS: 14214 xp->xb_pktp = pktp; 14215 SD_TRACE(SD_LOG_IO_CORE, un, 14216 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14217 pktp); 14218 goto got_pkt; 14219 14220 case SD_PKT_ALLOC_FAILURE: 14221 /* 14222 * Temporary (hopefully) resource depletion. 14223 * Since retries and RQS commands always have a 14224 * scsi_pkt allocated, these cases should never 14225 * get here. So the only cases this needs to 14226 * handle is a bp from the waitq (which we put 14227 * back onto the waitq for sdrunout), or a bp 14228 * sent as an immed_bp (which we just fail). 14229 */ 14230 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14231 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14232 14233 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14234 14235 if (bp == immed_bp) { 14236 /* 14237 * If SD_XB_DMA_FREED is clear, then 14238 * this is a failure to allocate a 14239 * scsi_pkt, and we must fail the 14240 * command. 14241 */ 14242 if ((xp->xb_pkt_flags & 14243 SD_XB_DMA_FREED) == 0) { 14244 break; 14245 } 14246 14247 /* 14248 * If this immediate command is NOT our 14249 * un_retry_bp, then we must fail it. 14250 */ 14251 if (bp != un->un_retry_bp) { 14252 break; 14253 } 14254 14255 /* 14256 * We get here if this cmd is our 14257 * un_retry_bp that was DMAFREED, but 14258 * scsi_init_pkt() failed to reallocate 14259 * DMA resources when we attempted to 14260 * retry it. This can happen when an 14261 * mpxio failover is in progress, but 14262 * we don't want to just fail the 14263 * command in this case. 14264 * 14265 * Use timeout(9F) to restart it after 14266 * a 100ms delay. We don't want to 14267 * let sdrunout() restart it, because 14268 * sdrunout() is just supposed to start 14269 * commands that are sitting on the 14270 * wait queue. The un_retry_bp stays 14271 * set until the command completes, but 14272 * sdrunout can be called many times 14273 * before that happens. Since sdrunout 14274 * cannot tell if the un_retry_bp is 14275 * already in the transport, it could 14276 * end up calling scsi_transport() for 14277 * the un_retry_bp multiple times. 14278 * 14279 * Also: don't schedule the callback 14280 * if some other callback is already 14281 * pending. 14282 */ 14283 if (un->un_retry_statp == NULL) { 14284 /* 14285 * restore the kstat pointer to 14286 * keep kstat counts coherent 14287 * when we do retry the command. 14288 */ 14289 un->un_retry_statp = 14290 saved_statp; 14291 } 14292 14293 if ((un->un_startstop_timeid == NULL) && 14294 (un->un_retry_timeid == NULL) && 14295 (un->un_direct_priority_timeid == 14296 NULL)) { 14297 14298 un->un_retry_timeid = 14299 timeout( 14300 sd_start_retry_command, 14301 un, SD_RESTART_TIMEOUT); 14302 } 14303 goto exit; 14304 } 14305 14306 #else 14307 if (bp == immed_bp) { 14308 break; /* Just fail the command */ 14309 } 14310 #endif 14311 14312 /* Add the buf back to the head of the waitq */ 14313 bp->av_forw = un->un_waitq_headp; 14314 un->un_waitq_headp = bp; 14315 if (un->un_waitq_tailp == NULL) { 14316 un->un_waitq_tailp = bp; 14317 } 14318 goto exit; 14319 14320 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14321 /* 14322 * HBA DMA resource failure. Fail the command 14323 * and continue processing of the queues. 14324 */ 14325 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14326 "sd_start_cmds: " 14327 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14328 break; 14329 14330 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14331 /* 14332 * Note:x86: Partial DMA mapping not supported 14333 * for USCSI commands, and all the needed DMA 14334 * resources were not allocated. 14335 */ 14336 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14337 "sd_start_cmds: " 14338 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14339 break; 14340 14341 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14342 /* 14343 * Note:x86: Request cannot fit into CDB based 14344 * on lba and len. 14345 */ 14346 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14347 "sd_start_cmds: " 14348 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14349 break; 14350 14351 default: 14352 /* Should NEVER get here! */ 14353 panic("scsi_initpkt error"); 14354 /*NOTREACHED*/ 14355 } 14356 14357 /* 14358 * Fatal error in allocating a scsi_pkt for this buf. 14359 * Update kstats & return the buf with an error code. 14360 * We must use sd_return_failed_command_no_restart() to 14361 * avoid a recursive call back into sd_start_cmds(). 14362 * However this also means that we must keep processing 14363 * the waitq here in order to avoid stalling. 14364 */ 14365 if (statp == kstat_waitq_to_runq) { 14366 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14367 } 14368 sd_return_failed_command_no_restart(un, bp, EIO); 14369 if (bp == immed_bp) { 14370 /* immed_bp is gone by now, so clear this */ 14371 immed_bp = NULL; 14372 } 14373 continue; 14374 } 14375 got_pkt: 14376 if (bp == immed_bp) { 14377 /* goto the head of the class.... */ 14378 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14379 } 14380 14381 un->un_ncmds_in_transport++; 14382 SD_UPDATE_KSTATS(un, statp, bp); 14383 14384 /* 14385 * Call scsi_transport() to send the command to the target. 14386 * According to SCSA architecture, we must drop the mutex here 14387 * before calling scsi_transport() in order to avoid deadlock. 14388 * Note that the scsi_pkt's completion routine can be executed 14389 * (from interrupt context) even before the call to 14390 * scsi_transport() returns. 14391 */ 14392 SD_TRACE(SD_LOG_IO_CORE, un, 14393 "sd_start_cmds: calling scsi_transport()\n"); 14394 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14395 14396 mutex_exit(SD_MUTEX(un)); 14397 rval = scsi_transport(xp->xb_pktp); 14398 mutex_enter(SD_MUTEX(un)); 14399 14400 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14401 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14402 14403 switch (rval) { 14404 case TRAN_ACCEPT: 14405 /* Clear this with every pkt accepted by the HBA */ 14406 un->un_tran_fatal_count = 0; 14407 break; /* Success; try the next cmd (if any) */ 14408 14409 case TRAN_BUSY: 14410 un->un_ncmds_in_transport--; 14411 ASSERT(un->un_ncmds_in_transport >= 0); 14412 14413 /* 14414 * Don't retry request sense, the sense data 14415 * is lost when another request is sent. 14416 * Free up the rqs buf and retry 14417 * the original failed cmd. Update kstat. 14418 */ 14419 if (bp == un->un_rqs_bp) { 14420 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14421 bp = sd_mark_rqs_idle(un, xp); 14422 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14423 NULL, NULL, EIO, un->un_busy_timeout / 500, 14424 kstat_waitq_enter); 14425 goto exit; 14426 } 14427 14428 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14429 /* 14430 * Free the DMA resources for the scsi_pkt. This will 14431 * allow mpxio to select another path the next time 14432 * we call scsi_transport() with this scsi_pkt. 14433 * See sdintr() for the rationalization behind this. 14434 */ 14435 if ((un->un_f_is_fibre == TRUE) && 14436 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14437 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14438 scsi_dmafree(xp->xb_pktp); 14439 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14440 } 14441 #endif 14442 14443 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14444 /* 14445 * Commands that are SD_PATH_DIRECT_PRIORITY 14446 * are for error recovery situations. These do 14447 * not use the normal command waitq, so if they 14448 * get a TRAN_BUSY we cannot put them back onto 14449 * the waitq for later retry. One possible 14450 * problem is that there could already be some 14451 * other command on un_retry_bp that is waiting 14452 * for this one to complete, so we would be 14453 * deadlocked if we put this command back onto 14454 * the waitq for later retry (since un_retry_bp 14455 * must complete before the driver gets back to 14456 * commands on the waitq). 14457 * 14458 * To avoid deadlock we must schedule a callback 14459 * that will restart this command after a set 14460 * interval. This should keep retrying for as 14461 * long as the underlying transport keeps 14462 * returning TRAN_BUSY (just like for other 14463 * commands). Use the same timeout interval as 14464 * for the ordinary TRAN_BUSY retry. 14465 */ 14466 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14467 "sd_start_cmds: scsi_transport() returned " 14468 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14469 14470 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14471 un->un_direct_priority_timeid = 14472 timeout(sd_start_direct_priority_command, 14473 bp, un->un_busy_timeout / 500); 14474 14475 goto exit; 14476 } 14477 14478 /* 14479 * For TRAN_BUSY, we want to reduce the throttle value, 14480 * unless we are retrying a command. 14481 */ 14482 if (bp != un->un_retry_bp) { 14483 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14484 } 14485 14486 /* 14487 * Set up the bp to be tried again 10 ms later. 14488 * Note:x86: Is there a timeout value in the sd_lun 14489 * for this condition? 14490 */ 14491 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14492 kstat_runq_back_to_waitq); 14493 goto exit; 14494 14495 case TRAN_FATAL_ERROR: 14496 un->un_tran_fatal_count++; 14497 /* FALLTHRU */ 14498 14499 case TRAN_BADPKT: 14500 default: 14501 un->un_ncmds_in_transport--; 14502 ASSERT(un->un_ncmds_in_transport >= 0); 14503 14504 /* 14505 * If this is our REQUEST SENSE command with a 14506 * transport error, we must get back the pointers 14507 * to the original buf, and mark the REQUEST 14508 * SENSE command as "available". 14509 */ 14510 if (bp == un->un_rqs_bp) { 14511 bp = sd_mark_rqs_idle(un, xp); 14512 xp = SD_GET_XBUF(bp); 14513 } else { 14514 /* 14515 * Legacy behavior: do not update transport 14516 * error count for request sense commands. 14517 */ 14518 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14519 } 14520 14521 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14522 sd_print_transport_rejected_message(un, xp, rval); 14523 14524 /* 14525 * This command will be terminated by SD driver due 14526 * to a fatal transport error. We should post 14527 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14528 * of "fail" for any command to indicate this 14529 * situation. 14530 */ 14531 if (xp->xb_ena > 0) { 14532 ASSERT(un->un_fm_private != NULL); 14533 sfip = un->un_fm_private; 14534 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14535 sd_ssc_extract_info(&sfip->fm_ssc, un, 14536 xp->xb_pktp, bp, xp); 14537 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14538 } 14539 14540 /* 14541 * We must use sd_return_failed_command_no_restart() to 14542 * avoid a recursive call back into sd_start_cmds(). 14543 * However this also means that we must keep processing 14544 * the waitq here in order to avoid stalling. 14545 */ 14546 sd_return_failed_command_no_restart(un, bp, EIO); 14547 14548 /* 14549 * Notify any threads waiting in sd_ddi_suspend() that 14550 * a command completion has occurred. 14551 */ 14552 if (un->un_state == SD_STATE_SUSPENDED) { 14553 cv_broadcast(&un->un_disk_busy_cv); 14554 } 14555 14556 if (bp == immed_bp) { 14557 /* immed_bp is gone by now, so clear this */ 14558 immed_bp = NULL; 14559 } 14560 break; 14561 } 14562 14563 } while (immed_bp == NULL); 14564 14565 exit: 14566 ASSERT(mutex_owned(SD_MUTEX(un))); 14567 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14568 } 14569 14570 14571 /* 14572 * Function: sd_return_command 14573 * 14574 * Description: Returns a command to its originator (with or without an 14575 * error). Also starts commands waiting to be transported 14576 * to the target. 14577 * 14578 * Context: May be called from interrupt, kernel, or timeout context 14579 */ 14580 14581 static void 14582 sd_return_command(struct sd_lun *un, struct buf *bp) 14583 { 14584 struct sd_xbuf *xp; 14585 struct scsi_pkt *pktp; 14586 struct sd_fm_internal *sfip; 14587 14588 ASSERT(bp != NULL); 14589 ASSERT(un != NULL); 14590 ASSERT(mutex_owned(SD_MUTEX(un))); 14591 ASSERT(bp != un->un_rqs_bp); 14592 xp = SD_GET_XBUF(bp); 14593 ASSERT(xp != NULL); 14594 14595 pktp = SD_GET_PKTP(bp); 14596 sfip = (struct sd_fm_internal *)un->un_fm_private; 14597 ASSERT(sfip != NULL); 14598 14599 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14600 14601 /* 14602 * Note: check for the "sdrestart failed" case. 14603 */ 14604 if ((un->un_partial_dma_supported == 1) && 14605 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14606 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14607 (xp->xb_pktp->pkt_resid == 0)) { 14608 14609 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14610 /* 14611 * Successfully set up next portion of cmd 14612 * transfer, try sending it 14613 */ 14614 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14615 NULL, NULL, 0, (clock_t)0, NULL); 14616 sd_start_cmds(un, NULL); 14617 return; /* Note:x86: need a return here? */ 14618 } 14619 } 14620 14621 /* 14622 * If this is the failfast bp, clear it from un_failfast_bp. This 14623 * can happen if upon being re-tried the failfast bp either 14624 * succeeded or encountered another error (possibly even a different 14625 * error than the one that precipitated the failfast state, but in 14626 * that case it would have had to exhaust retries as well). Regardless, 14627 * this should not occur whenever the instance is in the active 14628 * failfast state. 14629 */ 14630 if (bp == un->un_failfast_bp) { 14631 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14632 un->un_failfast_bp = NULL; 14633 } 14634 14635 /* 14636 * Clear the failfast state upon successful completion of ANY cmd. 14637 */ 14638 if (bp->b_error == 0) { 14639 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14640 /* 14641 * If this is a successful command, but used to be retried, 14642 * we will take it as a recovered command and post an 14643 * ereport with driver-assessment of "recovered". 14644 */ 14645 if (xp->xb_ena > 0) { 14646 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14647 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14648 } 14649 } else { 14650 /* 14651 * If this is a failed non-USCSI command we will post an 14652 * ereport with driver-assessment set accordingly("fail" or 14653 * "fatal"). 14654 */ 14655 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 14656 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14657 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14658 } 14659 } 14660 14661 /* 14662 * This is used if the command was retried one or more times. Show that 14663 * we are done with it, and allow processing of the waitq to resume. 14664 */ 14665 if (bp == un->un_retry_bp) { 14666 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14667 "sd_return_command: un:0x%p: " 14668 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14669 un->un_retry_bp = NULL; 14670 un->un_retry_statp = NULL; 14671 } 14672 14673 SD_UPDATE_RDWR_STATS(un, bp); 14674 SD_UPDATE_PARTITION_STATS(un, bp); 14675 14676 switch (un->un_state) { 14677 case SD_STATE_SUSPENDED: 14678 /* 14679 * Notify any threads waiting in sd_ddi_suspend() that 14680 * a command completion has occurred. 14681 */ 14682 cv_broadcast(&un->un_disk_busy_cv); 14683 break; 14684 default: 14685 sd_start_cmds(un, NULL); 14686 break; 14687 } 14688 14689 /* Return this command up the iodone chain to its originator. */ 14690 mutex_exit(SD_MUTEX(un)); 14691 14692 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14693 xp->xb_pktp = NULL; 14694 14695 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14696 14697 ASSERT(!mutex_owned(SD_MUTEX(un))); 14698 mutex_enter(SD_MUTEX(un)); 14699 14700 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14701 } 14702 14703 14704 /* 14705 * Function: sd_return_failed_command 14706 * 14707 * Description: Command completion when an error occurred. 14708 * 14709 * Context: May be called from interrupt context 14710 */ 14711 14712 static void 14713 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14714 { 14715 ASSERT(bp != NULL); 14716 ASSERT(un != NULL); 14717 ASSERT(mutex_owned(SD_MUTEX(un))); 14718 14719 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14720 "sd_return_failed_command: entry\n"); 14721 14722 /* 14723 * b_resid could already be nonzero due to a partial data 14724 * transfer, so do not change it here. 14725 */ 14726 SD_BIOERROR(bp, errcode); 14727 14728 sd_return_command(un, bp); 14729 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14730 "sd_return_failed_command: exit\n"); 14731 } 14732 14733 14734 /* 14735 * Function: sd_return_failed_command_no_restart 14736 * 14737 * Description: Same as sd_return_failed_command, but ensures that no 14738 * call back into sd_start_cmds will be issued. 14739 * 14740 * Context: May be called from interrupt context 14741 */ 14742 14743 static void 14744 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14745 int errcode) 14746 { 14747 struct sd_xbuf *xp; 14748 14749 ASSERT(bp != NULL); 14750 ASSERT(un != NULL); 14751 ASSERT(mutex_owned(SD_MUTEX(un))); 14752 xp = SD_GET_XBUF(bp); 14753 ASSERT(xp != NULL); 14754 ASSERT(errcode != 0); 14755 14756 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14757 "sd_return_failed_command_no_restart: entry\n"); 14758 14759 /* 14760 * b_resid could already be nonzero due to a partial data 14761 * transfer, so do not change it here. 14762 */ 14763 SD_BIOERROR(bp, errcode); 14764 14765 /* 14766 * If this is the failfast bp, clear it. This can happen if the 14767 * failfast bp encounterd a fatal error when we attempted to 14768 * re-try it (such as a scsi_transport(9F) failure). However 14769 * we should NOT be in an active failfast state if the failfast 14770 * bp is not NULL. 14771 */ 14772 if (bp == un->un_failfast_bp) { 14773 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14774 un->un_failfast_bp = NULL; 14775 } 14776 14777 if (bp == un->un_retry_bp) { 14778 /* 14779 * This command was retried one or more times. Show that we are 14780 * done with it, and allow processing of the waitq to resume. 14781 */ 14782 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14783 "sd_return_failed_command_no_restart: " 14784 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14785 un->un_retry_bp = NULL; 14786 un->un_retry_statp = NULL; 14787 } 14788 14789 SD_UPDATE_RDWR_STATS(un, bp); 14790 SD_UPDATE_PARTITION_STATS(un, bp); 14791 14792 mutex_exit(SD_MUTEX(un)); 14793 14794 if (xp->xb_pktp != NULL) { 14795 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14796 xp->xb_pktp = NULL; 14797 } 14798 14799 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14800 14801 mutex_enter(SD_MUTEX(un)); 14802 14803 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14804 "sd_return_failed_command_no_restart: exit\n"); 14805 } 14806 14807 14808 /* 14809 * Function: sd_retry_command 14810 * 14811 * Description: queue up a command for retry, or (optionally) fail it 14812 * if retry counts are exhausted. 14813 * 14814 * Arguments: un - Pointer to the sd_lun struct for the target. 14815 * 14816 * bp - Pointer to the buf for the command to be retried. 14817 * 14818 * retry_check_flag - Flag to see which (if any) of the retry 14819 * counts should be decremented/checked. If the indicated 14820 * retry count is exhausted, then the command will not be 14821 * retried; it will be failed instead. This should use a 14822 * value equal to one of the following: 14823 * 14824 * SD_RETRIES_NOCHECK 14825 * SD_RESD_RETRIES_STANDARD 14826 * SD_RETRIES_VICTIM 14827 * 14828 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14829 * if the check should be made to see of FLAG_ISOLATE is set 14830 * in the pkt. If FLAG_ISOLATE is set, then the command is 14831 * not retried, it is simply failed. 14832 * 14833 * user_funcp - Ptr to function to call before dispatching the 14834 * command. May be NULL if no action needs to be performed. 14835 * (Primarily intended for printing messages.) 14836 * 14837 * user_arg - Optional argument to be passed along to 14838 * the user_funcp call. 14839 * 14840 * failure_code - errno return code to set in the bp if the 14841 * command is going to be failed. 14842 * 14843 * retry_delay - Retry delay interval in (clock_t) units. May 14844 * be zero which indicates that the retry should be retried 14845 * immediately (ie, without an intervening delay). 14846 * 14847 * statp - Ptr to kstat function to be updated if the command 14848 * is queued for a delayed retry. May be NULL if no kstat 14849 * update is desired. 14850 * 14851 * Context: May be called from interrupt context. 14852 */ 14853 14854 static void 14855 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14856 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14857 code), void *user_arg, int failure_code, clock_t retry_delay, 14858 void (*statp)(kstat_io_t *)) 14859 { 14860 struct sd_xbuf *xp; 14861 struct scsi_pkt *pktp; 14862 struct sd_fm_internal *sfip; 14863 14864 ASSERT(un != NULL); 14865 ASSERT(mutex_owned(SD_MUTEX(un))); 14866 ASSERT(bp != NULL); 14867 xp = SD_GET_XBUF(bp); 14868 ASSERT(xp != NULL); 14869 pktp = SD_GET_PKTP(bp); 14870 ASSERT(pktp != NULL); 14871 14872 sfip = (struct sd_fm_internal *)un->un_fm_private; 14873 ASSERT(sfip != NULL); 14874 14875 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14876 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14877 14878 /* 14879 * If we are syncing or dumping, fail the command to avoid 14880 * recursively calling back into scsi_transport(). 14881 */ 14882 if (ddi_in_panic()) { 14883 goto fail_command_no_log; 14884 } 14885 14886 /* 14887 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14888 * log an error and fail the command. 14889 */ 14890 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14891 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14892 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14893 sd_dump_memory(un, SD_LOG_IO, "CDB", 14894 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14895 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14896 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14897 goto fail_command; 14898 } 14899 14900 /* 14901 * If we are suspended, then put the command onto head of the 14902 * wait queue since we don't want to start more commands, and 14903 * clear the un_retry_bp. Next time when we are resumed, will 14904 * handle the command in the wait queue. 14905 */ 14906 switch (un->un_state) { 14907 case SD_STATE_SUSPENDED: 14908 case SD_STATE_DUMPING: 14909 bp->av_forw = un->un_waitq_headp; 14910 un->un_waitq_headp = bp; 14911 if (un->un_waitq_tailp == NULL) { 14912 un->un_waitq_tailp = bp; 14913 } 14914 if (bp == un->un_retry_bp) { 14915 un->un_retry_bp = NULL; 14916 un->un_retry_statp = NULL; 14917 } 14918 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14919 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14920 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14921 return; 14922 default: 14923 break; 14924 } 14925 14926 /* 14927 * If the caller wants us to check FLAG_ISOLATE, then see if that 14928 * is set; if it is then we do not want to retry the command. 14929 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14930 */ 14931 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14932 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14933 goto fail_command; 14934 } 14935 } 14936 14937 14938 /* 14939 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14940 * command timeout or a selection timeout has occurred. This means 14941 * that we were unable to establish an kind of communication with 14942 * the target, and subsequent retries and/or commands are likely 14943 * to encounter similar results and take a long time to complete. 14944 * 14945 * If this is a failfast error condition, we need to update the 14946 * failfast state, even if this bp does not have B_FAILFAST set. 14947 */ 14948 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14949 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14950 ASSERT(un->un_failfast_bp == NULL); 14951 /* 14952 * If we are already in the active failfast state, and 14953 * another failfast error condition has been detected, 14954 * then fail this command if it has B_FAILFAST set. 14955 * If B_FAILFAST is clear, then maintain the legacy 14956 * behavior of retrying heroically, even tho this will 14957 * take a lot more time to fail the command. 14958 */ 14959 if (bp->b_flags & B_FAILFAST) { 14960 goto fail_command; 14961 } 14962 } else { 14963 /* 14964 * We're not in the active failfast state, but we 14965 * have a failfast error condition, so we must begin 14966 * transition to the next state. We do this regardless 14967 * of whether or not this bp has B_FAILFAST set. 14968 */ 14969 if (un->un_failfast_bp == NULL) { 14970 /* 14971 * This is the first bp to meet a failfast 14972 * condition so save it on un_failfast_bp & 14973 * do normal retry processing. Do not enter 14974 * active failfast state yet. This marks 14975 * entry into the "failfast pending" state. 14976 */ 14977 un->un_failfast_bp = bp; 14978 14979 } else if (un->un_failfast_bp == bp) { 14980 /* 14981 * This is the second time *this* bp has 14982 * encountered a failfast error condition, 14983 * so enter active failfast state & flush 14984 * queues as appropriate. 14985 */ 14986 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14987 un->un_failfast_bp = NULL; 14988 sd_failfast_flushq(un); 14989 14990 /* 14991 * Fail this bp now if B_FAILFAST set; 14992 * otherwise continue with retries. (It would 14993 * be pretty ironic if this bp succeeded on a 14994 * subsequent retry after we just flushed all 14995 * the queues). 14996 */ 14997 if (bp->b_flags & B_FAILFAST) { 14998 goto fail_command; 14999 } 15000 15001 #if !defined(lint) && !defined(__lint) 15002 } else { 15003 /* 15004 * If neither of the preceeding conditionals 15005 * was true, it means that there is some 15006 * *other* bp that has met an inital failfast 15007 * condition and is currently either being 15008 * retried or is waiting to be retried. In 15009 * that case we should perform normal retry 15010 * processing on *this* bp, since there is a 15011 * chance that the current failfast condition 15012 * is transient and recoverable. If that does 15013 * not turn out to be the case, then retries 15014 * will be cleared when the wait queue is 15015 * flushed anyway. 15016 */ 15017 #endif 15018 } 15019 } 15020 } else { 15021 /* 15022 * SD_RETRIES_FAILFAST is clear, which indicates that we 15023 * likely were able to at least establish some level of 15024 * communication with the target and subsequent commands 15025 * and/or retries are likely to get through to the target, 15026 * In this case we want to be aggressive about clearing 15027 * the failfast state. Note that this does not affect 15028 * the "failfast pending" condition. 15029 */ 15030 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15031 } 15032 15033 15034 /* 15035 * Check the specified retry count to see if we can still do 15036 * any retries with this pkt before we should fail it. 15037 */ 15038 switch (retry_check_flag & SD_RETRIES_MASK) { 15039 case SD_RETRIES_VICTIM: 15040 /* 15041 * Check the victim retry count. If exhausted, then fall 15042 * thru & check against the standard retry count. 15043 */ 15044 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15045 /* Increment count & proceed with the retry */ 15046 xp->xb_victim_retry_count++; 15047 break; 15048 } 15049 /* Victim retries exhausted, fall back to std. retries... */ 15050 /* FALLTHRU */ 15051 15052 case SD_RETRIES_STANDARD: 15053 if (xp->xb_retry_count >= un->un_retry_count) { 15054 /* Retries exhausted, fail the command */ 15055 SD_TRACE(SD_LOG_IO_CORE, un, 15056 "sd_retry_command: retries exhausted!\n"); 15057 /* 15058 * update b_resid for failed SCMD_READ & SCMD_WRITE 15059 * commands with nonzero pkt_resid. 15060 */ 15061 if ((pktp->pkt_reason == CMD_CMPLT) && 15062 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15063 (pktp->pkt_resid != 0)) { 15064 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15065 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15066 SD_UPDATE_B_RESID(bp, pktp); 15067 } 15068 } 15069 goto fail_command; 15070 } 15071 xp->xb_retry_count++; 15072 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15073 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15074 break; 15075 15076 case SD_RETRIES_UA: 15077 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15078 /* Retries exhausted, fail the command */ 15079 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15080 "Unit Attention retries exhausted. " 15081 "Check the target.\n"); 15082 goto fail_command; 15083 } 15084 xp->xb_ua_retry_count++; 15085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15086 "sd_retry_command: retry count:%d\n", 15087 xp->xb_ua_retry_count); 15088 break; 15089 15090 case SD_RETRIES_BUSY: 15091 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15092 /* Retries exhausted, fail the command */ 15093 SD_TRACE(SD_LOG_IO_CORE, un, 15094 "sd_retry_command: retries exhausted!\n"); 15095 goto fail_command; 15096 } 15097 xp->xb_retry_count++; 15098 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15099 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15100 break; 15101 15102 case SD_RETRIES_NOCHECK: 15103 default: 15104 /* No retry count to check. Just proceed with the retry */ 15105 break; 15106 } 15107 15108 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15109 15110 /* 15111 * If this is a non-USCSI command being retried 15112 * during execution last time, we should post an ereport with 15113 * driver-assessment of the value "retry". 15114 * For partial DMA, request sense and STATUS_QFULL, there are no 15115 * hardware errors, we bypass ereport posting. 15116 */ 15117 if (failure_code != 0) { 15118 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15119 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15120 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15121 } 15122 } 15123 15124 /* 15125 * If we were given a zero timeout, we must attempt to retry the 15126 * command immediately (ie, without a delay). 15127 */ 15128 if (retry_delay == 0) { 15129 /* 15130 * Check some limiting conditions to see if we can actually 15131 * do the immediate retry. If we cannot, then we must 15132 * fall back to queueing up a delayed retry. 15133 */ 15134 if (un->un_ncmds_in_transport >= un->un_throttle) { 15135 /* 15136 * We are at the throttle limit for the target, 15137 * fall back to delayed retry. 15138 */ 15139 retry_delay = un->un_busy_timeout; 15140 statp = kstat_waitq_enter; 15141 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15142 "sd_retry_command: immed. retry hit " 15143 "throttle!\n"); 15144 } else { 15145 /* 15146 * We're clear to proceed with the immediate retry. 15147 * First call the user-provided function (if any) 15148 */ 15149 if (user_funcp != NULL) { 15150 (*user_funcp)(un, bp, user_arg, 15151 SD_IMMEDIATE_RETRY_ISSUED); 15152 #ifdef __lock_lint 15153 sd_print_incomplete_msg(un, bp, user_arg, 15154 SD_IMMEDIATE_RETRY_ISSUED); 15155 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15156 SD_IMMEDIATE_RETRY_ISSUED); 15157 sd_print_sense_failed_msg(un, bp, user_arg, 15158 SD_IMMEDIATE_RETRY_ISSUED); 15159 #endif 15160 } 15161 15162 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15163 "sd_retry_command: issuing immediate retry\n"); 15164 15165 /* 15166 * Call sd_start_cmds() to transport the command to 15167 * the target. 15168 */ 15169 sd_start_cmds(un, bp); 15170 15171 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15172 "sd_retry_command exit\n"); 15173 return; 15174 } 15175 } 15176 15177 /* 15178 * Set up to retry the command after a delay. 15179 * First call the user-provided function (if any) 15180 */ 15181 if (user_funcp != NULL) { 15182 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15183 } 15184 15185 sd_set_retry_bp(un, bp, retry_delay, statp); 15186 15187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15188 return; 15189 15190 fail_command: 15191 15192 if (user_funcp != NULL) { 15193 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15194 } 15195 15196 fail_command_no_log: 15197 15198 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15199 "sd_retry_command: returning failed command\n"); 15200 15201 sd_return_failed_command(un, bp, failure_code); 15202 15203 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15204 } 15205 15206 15207 /* 15208 * Function: sd_set_retry_bp 15209 * 15210 * Description: Set up the given bp for retry. 15211 * 15212 * Arguments: un - ptr to associated softstate 15213 * bp - ptr to buf(9S) for the command 15214 * retry_delay - time interval before issuing retry (may be 0) 15215 * statp - optional pointer to kstat function 15216 * 15217 * Context: May be called under interrupt context 15218 */ 15219 15220 static void 15221 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15222 void (*statp)(kstat_io_t *)) 15223 { 15224 ASSERT(un != NULL); 15225 ASSERT(mutex_owned(SD_MUTEX(un))); 15226 ASSERT(bp != NULL); 15227 15228 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15229 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15230 15231 /* 15232 * Indicate that the command is being retried. This will not allow any 15233 * other commands on the wait queue to be transported to the target 15234 * until this command has been completed (success or failure). The 15235 * "retry command" is not transported to the target until the given 15236 * time delay expires, unless the user specified a 0 retry_delay. 15237 * 15238 * Note: the timeout(9F) callback routine is what actually calls 15239 * sd_start_cmds() to transport the command, with the exception of a 15240 * zero retry_delay. The only current implementor of a zero retry delay 15241 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15242 */ 15243 if (un->un_retry_bp == NULL) { 15244 ASSERT(un->un_retry_statp == NULL); 15245 un->un_retry_bp = bp; 15246 15247 /* 15248 * If the user has not specified a delay the command should 15249 * be queued and no timeout should be scheduled. 15250 */ 15251 if (retry_delay == 0) { 15252 /* 15253 * Save the kstat pointer that will be used in the 15254 * call to SD_UPDATE_KSTATS() below, so that 15255 * sd_start_cmds() can correctly decrement the waitq 15256 * count when it is time to transport this command. 15257 */ 15258 un->un_retry_statp = statp; 15259 goto done; 15260 } 15261 } 15262 15263 if (un->un_retry_bp == bp) { 15264 /* 15265 * Save the kstat pointer that will be used in the call to 15266 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15267 * correctly decrement the waitq count when it is time to 15268 * transport this command. 15269 */ 15270 un->un_retry_statp = statp; 15271 15272 /* 15273 * Schedule a timeout if: 15274 * 1) The user has specified a delay. 15275 * 2) There is not a START_STOP_UNIT callback pending. 15276 * 15277 * If no delay has been specified, then it is up to the caller 15278 * to ensure that IO processing continues without stalling. 15279 * Effectively, this means that the caller will issue the 15280 * required call to sd_start_cmds(). The START_STOP_UNIT 15281 * callback does this after the START STOP UNIT command has 15282 * completed. In either of these cases we should not schedule 15283 * a timeout callback here. Also don't schedule the timeout if 15284 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15285 */ 15286 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15287 (un->un_direct_priority_timeid == NULL)) { 15288 un->un_retry_timeid = 15289 timeout(sd_start_retry_command, un, retry_delay); 15290 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15291 "sd_set_retry_bp: setting timeout: un: 0x%p" 15292 " bp:0x%p un_retry_timeid:0x%p\n", 15293 un, bp, un->un_retry_timeid); 15294 } 15295 } else { 15296 /* 15297 * We only get in here if there is already another command 15298 * waiting to be retried. In this case, we just put the 15299 * given command onto the wait queue, so it can be transported 15300 * after the current retry command has completed. 15301 * 15302 * Also we have to make sure that if the command at the head 15303 * of the wait queue is the un_failfast_bp, that we do not 15304 * put ahead of it any other commands that are to be retried. 15305 */ 15306 if ((un->un_failfast_bp != NULL) && 15307 (un->un_failfast_bp == un->un_waitq_headp)) { 15308 /* 15309 * Enqueue this command AFTER the first command on 15310 * the wait queue (which is also un_failfast_bp). 15311 */ 15312 bp->av_forw = un->un_waitq_headp->av_forw; 15313 un->un_waitq_headp->av_forw = bp; 15314 if (un->un_waitq_headp == un->un_waitq_tailp) { 15315 un->un_waitq_tailp = bp; 15316 } 15317 } else { 15318 /* Enqueue this command at the head of the waitq. */ 15319 bp->av_forw = un->un_waitq_headp; 15320 un->un_waitq_headp = bp; 15321 if (un->un_waitq_tailp == NULL) { 15322 un->un_waitq_tailp = bp; 15323 } 15324 } 15325 15326 if (statp == NULL) { 15327 statp = kstat_waitq_enter; 15328 } 15329 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15330 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15331 } 15332 15333 done: 15334 if (statp != NULL) { 15335 SD_UPDATE_KSTATS(un, statp, bp); 15336 } 15337 15338 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15339 "sd_set_retry_bp: exit un:0x%p\n", un); 15340 } 15341 15342 15343 /* 15344 * Function: sd_start_retry_command 15345 * 15346 * Description: Start the command that has been waiting on the target's 15347 * retry queue. Called from timeout(9F) context after the 15348 * retry delay interval has expired. 15349 * 15350 * Arguments: arg - pointer to associated softstate for the device. 15351 * 15352 * Context: timeout(9F) thread context. May not sleep. 15353 */ 15354 15355 static void 15356 sd_start_retry_command(void *arg) 15357 { 15358 struct sd_lun *un = arg; 15359 15360 ASSERT(un != NULL); 15361 ASSERT(!mutex_owned(SD_MUTEX(un))); 15362 15363 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15364 "sd_start_retry_command: entry\n"); 15365 15366 mutex_enter(SD_MUTEX(un)); 15367 15368 un->un_retry_timeid = NULL; 15369 15370 if (un->un_retry_bp != NULL) { 15371 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15372 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15373 un, un->un_retry_bp); 15374 sd_start_cmds(un, un->un_retry_bp); 15375 } 15376 15377 mutex_exit(SD_MUTEX(un)); 15378 15379 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15380 "sd_start_retry_command: exit\n"); 15381 } 15382 15383 15384 /* 15385 * Function: sd_start_direct_priority_command 15386 * 15387 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15388 * received TRAN_BUSY when we called scsi_transport() to send it 15389 * to the underlying HBA. This function is called from timeout(9F) 15390 * context after the delay interval has expired. 15391 * 15392 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15393 * 15394 * Context: timeout(9F) thread context. May not sleep. 15395 */ 15396 15397 static void 15398 sd_start_direct_priority_command(void *arg) 15399 { 15400 struct buf *priority_bp = arg; 15401 struct sd_lun *un; 15402 15403 ASSERT(priority_bp != NULL); 15404 un = SD_GET_UN(priority_bp); 15405 ASSERT(un != NULL); 15406 ASSERT(!mutex_owned(SD_MUTEX(un))); 15407 15408 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15409 "sd_start_direct_priority_command: entry\n"); 15410 15411 mutex_enter(SD_MUTEX(un)); 15412 un->un_direct_priority_timeid = NULL; 15413 sd_start_cmds(un, priority_bp); 15414 mutex_exit(SD_MUTEX(un)); 15415 15416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15417 "sd_start_direct_priority_command: exit\n"); 15418 } 15419 15420 15421 /* 15422 * Function: sd_send_request_sense_command 15423 * 15424 * Description: Sends a REQUEST SENSE command to the target 15425 * 15426 * Context: May be called from interrupt context. 15427 */ 15428 15429 static void 15430 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15431 struct scsi_pkt *pktp) 15432 { 15433 ASSERT(bp != NULL); 15434 ASSERT(un != NULL); 15435 ASSERT(mutex_owned(SD_MUTEX(un))); 15436 15437 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15438 "entry: buf:0x%p\n", bp); 15439 15440 /* 15441 * If we are syncing or dumping, then fail the command to avoid a 15442 * recursive callback into scsi_transport(). Also fail the command 15443 * if we are suspended (legacy behavior). 15444 */ 15445 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15446 (un->un_state == SD_STATE_DUMPING)) { 15447 sd_return_failed_command(un, bp, EIO); 15448 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15449 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15450 return; 15451 } 15452 15453 /* 15454 * Retry the failed command and don't issue the request sense if: 15455 * 1) the sense buf is busy 15456 * 2) we have 1 or more outstanding commands on the target 15457 * (the sense data will be cleared or invalidated any way) 15458 * 15459 * Note: There could be an issue with not checking a retry limit here, 15460 * the problem is determining which retry limit to check. 15461 */ 15462 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15463 /* Don't retry if the command is flagged as non-retryable */ 15464 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15465 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15466 NULL, NULL, 0, un->un_busy_timeout, 15467 kstat_waitq_enter); 15468 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15469 "sd_send_request_sense_command: " 15470 "at full throttle, retrying exit\n"); 15471 } else { 15472 sd_return_failed_command(un, bp, EIO); 15473 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15474 "sd_send_request_sense_command: " 15475 "at full throttle, non-retryable exit\n"); 15476 } 15477 return; 15478 } 15479 15480 sd_mark_rqs_busy(un, bp); 15481 sd_start_cmds(un, un->un_rqs_bp); 15482 15483 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15484 "sd_send_request_sense_command: exit\n"); 15485 } 15486 15487 15488 /* 15489 * Function: sd_mark_rqs_busy 15490 * 15491 * Description: Indicate that the request sense bp for this instance is 15492 * in use. 15493 * 15494 * Context: May be called under interrupt context 15495 */ 15496 15497 static void 15498 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15499 { 15500 struct sd_xbuf *sense_xp; 15501 15502 ASSERT(un != NULL); 15503 ASSERT(bp != NULL); 15504 ASSERT(mutex_owned(SD_MUTEX(un))); 15505 ASSERT(un->un_sense_isbusy == 0); 15506 15507 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15508 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15509 15510 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15511 ASSERT(sense_xp != NULL); 15512 15513 SD_INFO(SD_LOG_IO, un, 15514 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15515 15516 ASSERT(sense_xp->xb_pktp != NULL); 15517 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15518 == (FLAG_SENSING | FLAG_HEAD)); 15519 15520 un->un_sense_isbusy = 1; 15521 un->un_rqs_bp->b_resid = 0; 15522 sense_xp->xb_pktp->pkt_resid = 0; 15523 sense_xp->xb_pktp->pkt_reason = 0; 15524 15525 /* So we can get back the bp at interrupt time! */ 15526 sense_xp->xb_sense_bp = bp; 15527 15528 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15529 15530 /* 15531 * Mark this buf as awaiting sense data. (This is already set in 15532 * the pkt_flags for the RQS packet.) 15533 */ 15534 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15535 15536 /* Request sense down same path */ 15537 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15538 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15539 sense_xp->xb_pktp->pkt_path_instance = 15540 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15541 15542 sense_xp->xb_retry_count = 0; 15543 sense_xp->xb_victim_retry_count = 0; 15544 sense_xp->xb_ua_retry_count = 0; 15545 sense_xp->xb_nr_retry_count = 0; 15546 sense_xp->xb_dma_resid = 0; 15547 15548 /* Clean up the fields for auto-request sense */ 15549 sense_xp->xb_sense_status = 0; 15550 sense_xp->xb_sense_state = 0; 15551 sense_xp->xb_sense_resid = 0; 15552 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15553 15554 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15555 } 15556 15557 15558 /* 15559 * Function: sd_mark_rqs_idle 15560 * 15561 * Description: SD_MUTEX must be held continuously through this routine 15562 * to prevent reuse of the rqs struct before the caller can 15563 * complete it's processing. 15564 * 15565 * Return Code: Pointer to the RQS buf 15566 * 15567 * Context: May be called under interrupt context 15568 */ 15569 15570 static struct buf * 15571 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15572 { 15573 struct buf *bp; 15574 ASSERT(un != NULL); 15575 ASSERT(sense_xp != NULL); 15576 ASSERT(mutex_owned(SD_MUTEX(un))); 15577 ASSERT(un->un_sense_isbusy != 0); 15578 15579 un->un_sense_isbusy = 0; 15580 bp = sense_xp->xb_sense_bp; 15581 sense_xp->xb_sense_bp = NULL; 15582 15583 /* This pkt is no longer interested in getting sense data */ 15584 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15585 15586 return (bp); 15587 } 15588 15589 15590 15591 /* 15592 * Function: sd_alloc_rqs 15593 * 15594 * Description: Set up the unit to receive auto request sense data 15595 * 15596 * Return Code: DDI_SUCCESS or DDI_FAILURE 15597 * 15598 * Context: Called under attach(9E) context 15599 */ 15600 15601 static int 15602 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15603 { 15604 struct sd_xbuf *xp; 15605 15606 ASSERT(un != NULL); 15607 ASSERT(!mutex_owned(SD_MUTEX(un))); 15608 ASSERT(un->un_rqs_bp == NULL); 15609 ASSERT(un->un_rqs_pktp == NULL); 15610 15611 /* 15612 * First allocate the required buf and scsi_pkt structs, then set up 15613 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15614 */ 15615 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15616 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15617 if (un->un_rqs_bp == NULL) { 15618 return (DDI_FAILURE); 15619 } 15620 15621 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15622 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15623 15624 if (un->un_rqs_pktp == NULL) { 15625 sd_free_rqs(un); 15626 return (DDI_FAILURE); 15627 } 15628 15629 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15630 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15631 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15632 15633 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15634 15635 /* Set up the other needed members in the ARQ scsi_pkt. */ 15636 un->un_rqs_pktp->pkt_comp = sdintr; 15637 un->un_rqs_pktp->pkt_time = sd_io_time; 15638 un->un_rqs_pktp->pkt_flags |= 15639 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15640 15641 /* 15642 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15643 * provide any intpkt, destroypkt routines as we take care of 15644 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15645 */ 15646 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15647 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15648 xp->xb_pktp = un->un_rqs_pktp; 15649 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15650 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15651 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15652 15653 /* 15654 * Save the pointer to the request sense private bp so it can 15655 * be retrieved in sdintr. 15656 */ 15657 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15658 ASSERT(un->un_rqs_bp->b_private == xp); 15659 15660 /* 15661 * See if the HBA supports auto-request sense for the specified 15662 * target/lun. If it does, then try to enable it (if not already 15663 * enabled). 15664 * 15665 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15666 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15667 * return success. However, in both of these cases ARQ is always 15668 * enabled and scsi_ifgetcap will always return true. The best approach 15669 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15670 * 15671 * The 3rd case is the HBA (adp) always return enabled on 15672 * scsi_ifgetgetcap even when it's not enable, the best approach 15673 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15674 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15675 */ 15676 15677 if (un->un_f_is_fibre == TRUE) { 15678 un->un_f_arq_enabled = TRUE; 15679 } else { 15680 #if defined(__i386) || defined(__amd64) 15681 /* 15682 * Circumvent the Adaptec bug, remove this code when 15683 * the bug is fixed 15684 */ 15685 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15686 #endif 15687 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15688 case 0: 15689 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15690 "sd_alloc_rqs: HBA supports ARQ\n"); 15691 /* 15692 * ARQ is supported by this HBA but currently is not 15693 * enabled. Attempt to enable it and if successful then 15694 * mark this instance as ARQ enabled. 15695 */ 15696 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15697 == 1) { 15698 /* Successfully enabled ARQ in the HBA */ 15699 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15700 "sd_alloc_rqs: ARQ enabled\n"); 15701 un->un_f_arq_enabled = TRUE; 15702 } else { 15703 /* Could not enable ARQ in the HBA */ 15704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15705 "sd_alloc_rqs: failed ARQ enable\n"); 15706 un->un_f_arq_enabled = FALSE; 15707 } 15708 break; 15709 case 1: 15710 /* 15711 * ARQ is supported by this HBA and is already enabled. 15712 * Just mark ARQ as enabled for this instance. 15713 */ 15714 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15715 "sd_alloc_rqs: ARQ already enabled\n"); 15716 un->un_f_arq_enabled = TRUE; 15717 break; 15718 default: 15719 /* 15720 * ARQ is not supported by this HBA; disable it for this 15721 * instance. 15722 */ 15723 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15724 "sd_alloc_rqs: HBA does not support ARQ\n"); 15725 un->un_f_arq_enabled = FALSE; 15726 break; 15727 } 15728 } 15729 15730 return (DDI_SUCCESS); 15731 } 15732 15733 15734 /* 15735 * Function: sd_free_rqs 15736 * 15737 * Description: Cleanup for the pre-instance RQS command. 15738 * 15739 * Context: Kernel thread context 15740 */ 15741 15742 static void 15743 sd_free_rqs(struct sd_lun *un) 15744 { 15745 ASSERT(un != NULL); 15746 15747 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15748 15749 /* 15750 * If consistent memory is bound to a scsi_pkt, the pkt 15751 * has to be destroyed *before* freeing the consistent memory. 15752 * Don't change the sequence of this operations. 15753 * scsi_destroy_pkt() might access memory, which isn't allowed, 15754 * after it was freed in scsi_free_consistent_buf(). 15755 */ 15756 if (un->un_rqs_pktp != NULL) { 15757 scsi_destroy_pkt(un->un_rqs_pktp); 15758 un->un_rqs_pktp = NULL; 15759 } 15760 15761 if (un->un_rqs_bp != NULL) { 15762 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15763 if (xp != NULL) { 15764 kmem_free(xp, sizeof (struct sd_xbuf)); 15765 } 15766 scsi_free_consistent_buf(un->un_rqs_bp); 15767 un->un_rqs_bp = NULL; 15768 } 15769 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15770 } 15771 15772 15773 15774 /* 15775 * Function: sd_reduce_throttle 15776 * 15777 * Description: Reduces the maximum # of outstanding commands on a 15778 * target to the current number of outstanding commands. 15779 * Queues a tiemout(9F) callback to restore the limit 15780 * after a specified interval has elapsed. 15781 * Typically used when we get a TRAN_BUSY return code 15782 * back from scsi_transport(). 15783 * 15784 * Arguments: un - ptr to the sd_lun softstate struct 15785 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15786 * 15787 * Context: May be called from interrupt context 15788 */ 15789 15790 static void 15791 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15792 { 15793 ASSERT(un != NULL); 15794 ASSERT(mutex_owned(SD_MUTEX(un))); 15795 ASSERT(un->un_ncmds_in_transport >= 0); 15796 15797 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15798 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15799 un, un->un_throttle, un->un_ncmds_in_transport); 15800 15801 if (un->un_throttle > 1) { 15802 if (un->un_f_use_adaptive_throttle == TRUE) { 15803 switch (throttle_type) { 15804 case SD_THROTTLE_TRAN_BUSY: 15805 if (un->un_busy_throttle == 0) { 15806 un->un_busy_throttle = un->un_throttle; 15807 } 15808 break; 15809 case SD_THROTTLE_QFULL: 15810 un->un_busy_throttle = 0; 15811 break; 15812 default: 15813 ASSERT(FALSE); 15814 } 15815 15816 if (un->un_ncmds_in_transport > 0) { 15817 un->un_throttle = un->un_ncmds_in_transport; 15818 } 15819 15820 } else { 15821 if (un->un_ncmds_in_transport == 0) { 15822 un->un_throttle = 1; 15823 } else { 15824 un->un_throttle = un->un_ncmds_in_transport; 15825 } 15826 } 15827 } 15828 15829 /* Reschedule the timeout if none is currently active */ 15830 if (un->un_reset_throttle_timeid == NULL) { 15831 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15832 un, SD_THROTTLE_RESET_INTERVAL); 15833 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15834 "sd_reduce_throttle: timeout scheduled!\n"); 15835 } 15836 15837 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15838 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15839 } 15840 15841 15842 15843 /* 15844 * Function: sd_restore_throttle 15845 * 15846 * Description: Callback function for timeout(9F). Resets the current 15847 * value of un->un_throttle to its default. 15848 * 15849 * Arguments: arg - pointer to associated softstate for the device. 15850 * 15851 * Context: May be called from interrupt context 15852 */ 15853 15854 static void 15855 sd_restore_throttle(void *arg) 15856 { 15857 struct sd_lun *un = arg; 15858 15859 ASSERT(un != NULL); 15860 ASSERT(!mutex_owned(SD_MUTEX(un))); 15861 15862 mutex_enter(SD_MUTEX(un)); 15863 15864 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15865 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15866 15867 un->un_reset_throttle_timeid = NULL; 15868 15869 if (un->un_f_use_adaptive_throttle == TRUE) { 15870 /* 15871 * If un_busy_throttle is nonzero, then it contains the 15872 * value that un_throttle was when we got a TRAN_BUSY back 15873 * from scsi_transport(). We want to revert back to this 15874 * value. 15875 * 15876 * In the QFULL case, the throttle limit will incrementally 15877 * increase until it reaches max throttle. 15878 */ 15879 if (un->un_busy_throttle > 0) { 15880 un->un_throttle = un->un_busy_throttle; 15881 un->un_busy_throttle = 0; 15882 } else { 15883 /* 15884 * increase throttle by 10% open gate slowly, schedule 15885 * another restore if saved throttle has not been 15886 * reached 15887 */ 15888 short throttle; 15889 if (sd_qfull_throttle_enable) { 15890 throttle = un->un_throttle + 15891 max((un->un_throttle / 10), 1); 15892 un->un_throttle = 15893 (throttle < un->un_saved_throttle) ? 15894 throttle : un->un_saved_throttle; 15895 if (un->un_throttle < un->un_saved_throttle) { 15896 un->un_reset_throttle_timeid = 15897 timeout(sd_restore_throttle, 15898 un, 15899 SD_QFULL_THROTTLE_RESET_INTERVAL); 15900 } 15901 } 15902 } 15903 15904 /* 15905 * If un_throttle has fallen below the low-water mark, we 15906 * restore the maximum value here (and allow it to ratchet 15907 * down again if necessary). 15908 */ 15909 if (un->un_throttle < un->un_min_throttle) { 15910 un->un_throttle = un->un_saved_throttle; 15911 } 15912 } else { 15913 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15914 "restoring limit from 0x%x to 0x%x\n", 15915 un->un_throttle, un->un_saved_throttle); 15916 un->un_throttle = un->un_saved_throttle; 15917 } 15918 15919 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15920 "sd_restore_throttle: calling sd_start_cmds!\n"); 15921 15922 sd_start_cmds(un, NULL); 15923 15924 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15925 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15926 un, un->un_throttle); 15927 15928 mutex_exit(SD_MUTEX(un)); 15929 15930 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15931 } 15932 15933 /* 15934 * Function: sdrunout 15935 * 15936 * Description: Callback routine for scsi_init_pkt when a resource allocation 15937 * fails. 15938 * 15939 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15940 * soft state instance. 15941 * 15942 * Return Code: The scsi_init_pkt routine allows for the callback function to 15943 * return a 0 indicating the callback should be rescheduled or a 1 15944 * indicating not to reschedule. This routine always returns 1 15945 * because the driver always provides a callback function to 15946 * scsi_init_pkt. This results in a callback always being scheduled 15947 * (via the scsi_init_pkt callback implementation) if a resource 15948 * failure occurs. 15949 * 15950 * Context: This callback function may not block or call routines that block 15951 * 15952 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15953 * request persisting at the head of the list which cannot be 15954 * satisfied even after multiple retries. In the future the driver 15955 * may implement some time of maximum runout count before failing 15956 * an I/O. 15957 */ 15958 15959 static int 15960 sdrunout(caddr_t arg) 15961 { 15962 struct sd_lun *un = (struct sd_lun *)arg; 15963 15964 ASSERT(un != NULL); 15965 ASSERT(!mutex_owned(SD_MUTEX(un))); 15966 15967 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15968 15969 mutex_enter(SD_MUTEX(un)); 15970 sd_start_cmds(un, NULL); 15971 mutex_exit(SD_MUTEX(un)); 15972 /* 15973 * This callback routine always returns 1 (i.e. do not reschedule) 15974 * because we always specify sdrunout as the callback handler for 15975 * scsi_init_pkt inside the call to sd_start_cmds. 15976 */ 15977 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15978 return (1); 15979 } 15980 15981 15982 /* 15983 * Function: sdintr 15984 * 15985 * Description: Completion callback routine for scsi_pkt(9S) structs 15986 * sent to the HBA driver via scsi_transport(9F). 15987 * 15988 * Context: Interrupt context 15989 */ 15990 15991 static void 15992 sdintr(struct scsi_pkt *pktp) 15993 { 15994 struct buf *bp; 15995 struct sd_xbuf *xp; 15996 struct sd_lun *un; 15997 size_t actual_len; 15998 sd_ssc_t *sscp; 15999 16000 ASSERT(pktp != NULL); 16001 bp = (struct buf *)pktp->pkt_private; 16002 ASSERT(bp != NULL); 16003 xp = SD_GET_XBUF(bp); 16004 ASSERT(xp != NULL); 16005 ASSERT(xp->xb_pktp != NULL); 16006 un = SD_GET_UN(bp); 16007 ASSERT(un != NULL); 16008 ASSERT(!mutex_owned(SD_MUTEX(un))); 16009 16010 #ifdef SD_FAULT_INJECTION 16011 16012 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16013 /* SD FaultInjection */ 16014 sd_faultinjection(pktp); 16015 16016 #endif /* SD_FAULT_INJECTION */ 16017 16018 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16019 " xp:0x%p, un:0x%p\n", bp, xp, un); 16020 16021 mutex_enter(SD_MUTEX(un)); 16022 16023 ASSERT(un->un_fm_private != NULL); 16024 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16025 ASSERT(sscp != NULL); 16026 16027 /* Reduce the count of the #commands currently in transport */ 16028 un->un_ncmds_in_transport--; 16029 ASSERT(un->un_ncmds_in_transport >= 0); 16030 16031 /* Increment counter to indicate that the callback routine is active */ 16032 un->un_in_callback++; 16033 16034 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16035 16036 #ifdef SDDEBUG 16037 if (bp == un->un_retry_bp) { 16038 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16039 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16040 un, un->un_retry_bp, un->un_ncmds_in_transport); 16041 } 16042 #endif 16043 16044 /* 16045 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16046 * state if needed. 16047 */ 16048 if (pktp->pkt_reason == CMD_DEV_GONE) { 16049 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16050 "Command failed to complete...Device is gone\n"); 16051 if (un->un_mediastate != DKIO_DEV_GONE) { 16052 un->un_mediastate = DKIO_DEV_GONE; 16053 cv_broadcast(&un->un_state_cv); 16054 } 16055 sd_return_failed_command(un, bp, EIO); 16056 goto exit; 16057 } 16058 16059 if (pktp->pkt_state & STATE_XARQ_DONE) { 16060 SD_TRACE(SD_LOG_COMMON, un, 16061 "sdintr: extra sense data received. pkt=%p\n", pktp); 16062 } 16063 16064 /* 16065 * First see if the pkt has auto-request sense data with it.... 16066 * Look at the packet state first so we don't take a performance 16067 * hit looking at the arq enabled flag unless absolutely necessary. 16068 */ 16069 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16070 (un->un_f_arq_enabled == TRUE)) { 16071 /* 16072 * The HBA did an auto request sense for this command so check 16073 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16074 * driver command that should not be retried. 16075 */ 16076 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16077 /* 16078 * Save the relevant sense info into the xp for the 16079 * original cmd. 16080 */ 16081 struct scsi_arq_status *asp; 16082 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16083 xp->xb_sense_status = 16084 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16085 xp->xb_sense_state = asp->sts_rqpkt_state; 16086 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16087 if (pktp->pkt_state & STATE_XARQ_DONE) { 16088 actual_len = MAX_SENSE_LENGTH - 16089 xp->xb_sense_resid; 16090 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16091 MAX_SENSE_LENGTH); 16092 } else { 16093 if (xp->xb_sense_resid > SENSE_LENGTH) { 16094 actual_len = MAX_SENSE_LENGTH - 16095 xp->xb_sense_resid; 16096 } else { 16097 actual_len = SENSE_LENGTH - 16098 xp->xb_sense_resid; 16099 } 16100 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16101 if ((((struct uscsi_cmd *) 16102 (xp->xb_pktinfo))->uscsi_rqlen) > 16103 actual_len) { 16104 xp->xb_sense_resid = 16105 (((struct uscsi_cmd *) 16106 (xp->xb_pktinfo))-> 16107 uscsi_rqlen) - actual_len; 16108 } else { 16109 xp->xb_sense_resid = 0; 16110 } 16111 } 16112 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16113 SENSE_LENGTH); 16114 } 16115 16116 /* fail the command */ 16117 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16118 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16119 sd_return_failed_command(un, bp, EIO); 16120 goto exit; 16121 } 16122 16123 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16124 /* 16125 * We want to either retry or fail this command, so free 16126 * the DMA resources here. If we retry the command then 16127 * the DMA resources will be reallocated in sd_start_cmds(). 16128 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16129 * causes the *entire* transfer to start over again from the 16130 * beginning of the request, even for PARTIAL chunks that 16131 * have already transferred successfully. 16132 */ 16133 if ((un->un_f_is_fibre == TRUE) && 16134 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16135 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16136 scsi_dmafree(pktp); 16137 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16138 } 16139 #endif 16140 16141 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16142 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16143 16144 sd_handle_auto_request_sense(un, bp, xp, pktp); 16145 goto exit; 16146 } 16147 16148 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16149 if (pktp->pkt_flags & FLAG_SENSING) { 16150 /* This pktp is from the unit's REQUEST_SENSE command */ 16151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16152 "sdintr: sd_handle_request_sense\n"); 16153 sd_handle_request_sense(un, bp, xp, pktp); 16154 goto exit; 16155 } 16156 16157 /* 16158 * Check to see if the command successfully completed as requested; 16159 * this is the most common case (and also the hot performance path). 16160 * 16161 * Requirements for successful completion are: 16162 * pkt_reason is CMD_CMPLT and packet status is status good. 16163 * In addition: 16164 * - A residual of zero indicates successful completion no matter what 16165 * the command is. 16166 * - If the residual is not zero and the command is not a read or 16167 * write, then it's still defined as successful completion. In other 16168 * words, if the command is a read or write the residual must be 16169 * zero for successful completion. 16170 * - If the residual is not zero and the command is a read or 16171 * write, and it's a USCSICMD, then it's still defined as 16172 * successful completion. 16173 */ 16174 if ((pktp->pkt_reason == CMD_CMPLT) && 16175 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16176 16177 /* 16178 * Since this command is returned with a good status, we 16179 * can reset the count for Sonoma failover. 16180 */ 16181 un->un_sonoma_failure_count = 0; 16182 16183 /* 16184 * Return all USCSI commands on good status 16185 */ 16186 if (pktp->pkt_resid == 0) { 16187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16188 "sdintr: returning command for resid == 0\n"); 16189 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16190 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16191 SD_UPDATE_B_RESID(bp, pktp); 16192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16193 "sdintr: returning command for resid != 0\n"); 16194 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16195 SD_UPDATE_B_RESID(bp, pktp); 16196 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16197 "sdintr: returning uscsi command\n"); 16198 } else { 16199 goto not_successful; 16200 } 16201 sd_return_command(un, bp); 16202 16203 /* 16204 * Decrement counter to indicate that the callback routine 16205 * is done. 16206 */ 16207 un->un_in_callback--; 16208 ASSERT(un->un_in_callback >= 0); 16209 mutex_exit(SD_MUTEX(un)); 16210 16211 return; 16212 } 16213 16214 not_successful: 16215 16216 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16217 /* 16218 * The following is based upon knowledge of the underlying transport 16219 * and its use of DMA resources. This code should be removed when 16220 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16221 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16222 * and sd_start_cmds(). 16223 * 16224 * Free any DMA resources associated with this command if there 16225 * is a chance it could be retried or enqueued for later retry. 16226 * If we keep the DMA binding then mpxio cannot reissue the 16227 * command on another path whenever a path failure occurs. 16228 * 16229 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16230 * causes the *entire* transfer to start over again from the 16231 * beginning of the request, even for PARTIAL chunks that 16232 * have already transferred successfully. 16233 * 16234 * This is only done for non-uscsi commands (and also skipped for the 16235 * driver's internal RQS command). Also just do this for Fibre Channel 16236 * devices as these are the only ones that support mpxio. 16237 */ 16238 if ((un->un_f_is_fibre == TRUE) && 16239 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16240 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16241 scsi_dmafree(pktp); 16242 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16243 } 16244 #endif 16245 16246 /* 16247 * The command did not successfully complete as requested so check 16248 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16249 * driver command that should not be retried so just return. If 16250 * FLAG_DIAGNOSE is not set the error will be processed below. 16251 */ 16252 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16253 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16254 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16255 /* 16256 * Issue a request sense if a check condition caused the error 16257 * (we handle the auto request sense case above), otherwise 16258 * just fail the command. 16259 */ 16260 if ((pktp->pkt_reason == CMD_CMPLT) && 16261 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16262 sd_send_request_sense_command(un, bp, pktp); 16263 } else { 16264 sd_return_failed_command(un, bp, EIO); 16265 } 16266 goto exit; 16267 } 16268 16269 /* 16270 * The command did not successfully complete as requested so process 16271 * the error, retry, and/or attempt recovery. 16272 */ 16273 switch (pktp->pkt_reason) { 16274 case CMD_CMPLT: 16275 switch (SD_GET_PKT_STATUS(pktp)) { 16276 case STATUS_GOOD: 16277 /* 16278 * The command completed successfully with a non-zero 16279 * residual 16280 */ 16281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16282 "sdintr: STATUS_GOOD \n"); 16283 sd_pkt_status_good(un, bp, xp, pktp); 16284 break; 16285 16286 case STATUS_CHECK: 16287 case STATUS_TERMINATED: 16288 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16289 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16290 sd_pkt_status_check_condition(un, bp, xp, pktp); 16291 break; 16292 16293 case STATUS_BUSY: 16294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16295 "sdintr: STATUS_BUSY\n"); 16296 sd_pkt_status_busy(un, bp, xp, pktp); 16297 break; 16298 16299 case STATUS_RESERVATION_CONFLICT: 16300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16301 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16302 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16303 break; 16304 16305 case STATUS_QFULL: 16306 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16307 "sdintr: STATUS_QFULL\n"); 16308 sd_pkt_status_qfull(un, bp, xp, pktp); 16309 break; 16310 16311 case STATUS_MET: 16312 case STATUS_INTERMEDIATE: 16313 case STATUS_SCSI2: 16314 case STATUS_INTERMEDIATE_MET: 16315 case STATUS_ACA_ACTIVE: 16316 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16317 "Unexpected SCSI status received: 0x%x\n", 16318 SD_GET_PKT_STATUS(pktp)); 16319 /* 16320 * Mark the ssc_flags when detected invalid status 16321 * code for non-USCSI command. 16322 */ 16323 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16324 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16325 "stat-code"); 16326 } 16327 sd_return_failed_command(un, bp, EIO); 16328 break; 16329 16330 default: 16331 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16332 "Invalid SCSI status received: 0x%x\n", 16333 SD_GET_PKT_STATUS(pktp)); 16334 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16335 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16336 "stat-code"); 16337 } 16338 sd_return_failed_command(un, bp, EIO); 16339 break; 16340 16341 } 16342 break; 16343 16344 case CMD_INCOMPLETE: 16345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16346 "sdintr: CMD_INCOMPLETE\n"); 16347 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16348 break; 16349 case CMD_TRAN_ERR: 16350 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16351 "sdintr: CMD_TRAN_ERR\n"); 16352 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16353 break; 16354 case CMD_RESET: 16355 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16356 "sdintr: CMD_RESET \n"); 16357 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16358 break; 16359 case CMD_ABORTED: 16360 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16361 "sdintr: CMD_ABORTED \n"); 16362 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16363 break; 16364 case CMD_TIMEOUT: 16365 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16366 "sdintr: CMD_TIMEOUT\n"); 16367 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16368 break; 16369 case CMD_UNX_BUS_FREE: 16370 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16371 "sdintr: CMD_UNX_BUS_FREE \n"); 16372 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16373 break; 16374 case CMD_TAG_REJECT: 16375 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16376 "sdintr: CMD_TAG_REJECT\n"); 16377 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16378 break; 16379 default: 16380 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16381 "sdintr: default\n"); 16382 /* 16383 * Mark the ssc_flags for detecting invliad pkt_reason. 16384 */ 16385 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16386 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16387 "pkt-reason"); 16388 } 16389 sd_pkt_reason_default(un, bp, xp, pktp); 16390 break; 16391 } 16392 16393 exit: 16394 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16395 16396 /* Decrement counter to indicate that the callback routine is done. */ 16397 un->un_in_callback--; 16398 ASSERT(un->un_in_callback >= 0); 16399 16400 /* 16401 * At this point, the pkt has been dispatched, ie, it is either 16402 * being re-tried or has been returned to its caller and should 16403 * not be referenced. 16404 */ 16405 16406 mutex_exit(SD_MUTEX(un)); 16407 } 16408 16409 16410 /* 16411 * Function: sd_print_incomplete_msg 16412 * 16413 * Description: Prints the error message for a CMD_INCOMPLETE error. 16414 * 16415 * Arguments: un - ptr to associated softstate for the device. 16416 * bp - ptr to the buf(9S) for the command. 16417 * arg - message string ptr 16418 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16419 * or SD_NO_RETRY_ISSUED. 16420 * 16421 * Context: May be called under interrupt context 16422 */ 16423 16424 static void 16425 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16426 { 16427 struct scsi_pkt *pktp; 16428 char *msgp; 16429 char *cmdp = arg; 16430 16431 ASSERT(un != NULL); 16432 ASSERT(mutex_owned(SD_MUTEX(un))); 16433 ASSERT(bp != NULL); 16434 ASSERT(arg != NULL); 16435 pktp = SD_GET_PKTP(bp); 16436 ASSERT(pktp != NULL); 16437 16438 switch (code) { 16439 case SD_DELAYED_RETRY_ISSUED: 16440 case SD_IMMEDIATE_RETRY_ISSUED: 16441 msgp = "retrying"; 16442 break; 16443 case SD_NO_RETRY_ISSUED: 16444 default: 16445 msgp = "giving up"; 16446 break; 16447 } 16448 16449 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16450 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16451 "incomplete %s- %s\n", cmdp, msgp); 16452 } 16453 } 16454 16455 16456 16457 /* 16458 * Function: sd_pkt_status_good 16459 * 16460 * Description: Processing for a STATUS_GOOD code in pkt_status. 16461 * 16462 * Context: May be called under interrupt context 16463 */ 16464 16465 static void 16466 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16467 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16468 { 16469 char *cmdp; 16470 16471 ASSERT(un != NULL); 16472 ASSERT(mutex_owned(SD_MUTEX(un))); 16473 ASSERT(bp != NULL); 16474 ASSERT(xp != NULL); 16475 ASSERT(pktp != NULL); 16476 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16477 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16478 ASSERT(pktp->pkt_resid != 0); 16479 16480 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16481 16482 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16483 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16484 case SCMD_READ: 16485 cmdp = "read"; 16486 break; 16487 case SCMD_WRITE: 16488 cmdp = "write"; 16489 break; 16490 default: 16491 SD_UPDATE_B_RESID(bp, pktp); 16492 sd_return_command(un, bp); 16493 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16494 return; 16495 } 16496 16497 /* 16498 * See if we can retry the read/write, preferrably immediately. 16499 * If retries are exhaused, then sd_retry_command() will update 16500 * the b_resid count. 16501 */ 16502 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16503 cmdp, EIO, (clock_t)0, NULL); 16504 16505 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16506 } 16507 16508 16509 16510 16511 16512 /* 16513 * Function: sd_handle_request_sense 16514 * 16515 * Description: Processing for non-auto Request Sense command. 16516 * 16517 * Arguments: un - ptr to associated softstate 16518 * sense_bp - ptr to buf(9S) for the RQS command 16519 * sense_xp - ptr to the sd_xbuf for the RQS command 16520 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16521 * 16522 * Context: May be called under interrupt context 16523 */ 16524 16525 static void 16526 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16527 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16528 { 16529 struct buf *cmd_bp; /* buf for the original command */ 16530 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16531 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16532 size_t actual_len; /* actual sense data length */ 16533 16534 ASSERT(un != NULL); 16535 ASSERT(mutex_owned(SD_MUTEX(un))); 16536 ASSERT(sense_bp != NULL); 16537 ASSERT(sense_xp != NULL); 16538 ASSERT(sense_pktp != NULL); 16539 16540 /* 16541 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16542 * RQS command and not the original command. 16543 */ 16544 ASSERT(sense_pktp == un->un_rqs_pktp); 16545 ASSERT(sense_bp == un->un_rqs_bp); 16546 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16547 (FLAG_SENSING | FLAG_HEAD)); 16548 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16549 FLAG_SENSING) == FLAG_SENSING); 16550 16551 /* These are the bp, xp, and pktp for the original command */ 16552 cmd_bp = sense_xp->xb_sense_bp; 16553 cmd_xp = SD_GET_XBUF(cmd_bp); 16554 cmd_pktp = SD_GET_PKTP(cmd_bp); 16555 16556 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16557 /* 16558 * The REQUEST SENSE command failed. Release the REQUEST 16559 * SENSE command for re-use, get back the bp for the original 16560 * command, and attempt to re-try the original command if 16561 * FLAG_DIAGNOSE is not set in the original packet. 16562 */ 16563 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16564 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16565 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16566 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16567 NULL, NULL, EIO, (clock_t)0, NULL); 16568 return; 16569 } 16570 } 16571 16572 /* 16573 * Save the relevant sense info into the xp for the original cmd. 16574 * 16575 * Note: if the request sense failed the state info will be zero 16576 * as set in sd_mark_rqs_busy() 16577 */ 16578 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16579 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16580 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16581 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16582 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16583 SENSE_LENGTH)) { 16584 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16585 MAX_SENSE_LENGTH); 16586 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16587 } else { 16588 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16589 SENSE_LENGTH); 16590 if (actual_len < SENSE_LENGTH) { 16591 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16592 } else { 16593 cmd_xp->xb_sense_resid = 0; 16594 } 16595 } 16596 16597 /* 16598 * Free up the RQS command.... 16599 * NOTE: 16600 * Must do this BEFORE calling sd_validate_sense_data! 16601 * sd_validate_sense_data may return the original command in 16602 * which case the pkt will be freed and the flags can no 16603 * longer be touched. 16604 * SD_MUTEX is held through this process until the command 16605 * is dispatched based upon the sense data, so there are 16606 * no race conditions. 16607 */ 16608 (void) sd_mark_rqs_idle(un, sense_xp); 16609 16610 /* 16611 * For a retryable command see if we have valid sense data, if so then 16612 * turn it over to sd_decode_sense() to figure out the right course of 16613 * action. Just fail a non-retryable command. 16614 */ 16615 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16616 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16617 SD_SENSE_DATA_IS_VALID) { 16618 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16619 } 16620 } else { 16621 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16622 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16623 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16624 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16625 sd_return_failed_command(un, cmd_bp, EIO); 16626 } 16627 } 16628 16629 16630 16631 16632 /* 16633 * Function: sd_handle_auto_request_sense 16634 * 16635 * Description: Processing for auto-request sense information. 16636 * 16637 * Arguments: un - ptr to associated softstate 16638 * bp - ptr to buf(9S) for the command 16639 * xp - ptr to the sd_xbuf for the command 16640 * pktp - ptr to the scsi_pkt(9S) for the command 16641 * 16642 * Context: May be called under interrupt context 16643 */ 16644 16645 static void 16646 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16647 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16648 { 16649 struct scsi_arq_status *asp; 16650 size_t actual_len; 16651 16652 ASSERT(un != NULL); 16653 ASSERT(mutex_owned(SD_MUTEX(un))); 16654 ASSERT(bp != NULL); 16655 ASSERT(xp != NULL); 16656 ASSERT(pktp != NULL); 16657 ASSERT(pktp != un->un_rqs_pktp); 16658 ASSERT(bp != un->un_rqs_bp); 16659 16660 /* 16661 * For auto-request sense, we get a scsi_arq_status back from 16662 * the HBA, with the sense data in the sts_sensedata member. 16663 * The pkt_scbp of the packet points to this scsi_arq_status. 16664 */ 16665 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16666 16667 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16668 /* 16669 * The auto REQUEST SENSE failed; see if we can re-try 16670 * the original command. 16671 */ 16672 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16673 "auto request sense failed (reason=%s)\n", 16674 scsi_rname(asp->sts_rqpkt_reason)); 16675 16676 sd_reset_target(un, pktp); 16677 16678 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16679 NULL, NULL, EIO, (clock_t)0, NULL); 16680 return; 16681 } 16682 16683 /* Save the relevant sense info into the xp for the original cmd. */ 16684 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16685 xp->xb_sense_state = asp->sts_rqpkt_state; 16686 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16687 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16688 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16689 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16690 MAX_SENSE_LENGTH); 16691 } else { 16692 if (xp->xb_sense_resid > SENSE_LENGTH) { 16693 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16694 } else { 16695 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16696 } 16697 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16698 if ((((struct uscsi_cmd *) 16699 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16700 xp->xb_sense_resid = (((struct uscsi_cmd *) 16701 (xp->xb_pktinfo))->uscsi_rqlen) - 16702 actual_len; 16703 } else { 16704 xp->xb_sense_resid = 0; 16705 } 16706 } 16707 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16708 } 16709 16710 /* 16711 * See if we have valid sense data, if so then turn it over to 16712 * sd_decode_sense() to figure out the right course of action. 16713 */ 16714 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16715 SD_SENSE_DATA_IS_VALID) { 16716 sd_decode_sense(un, bp, xp, pktp); 16717 } 16718 } 16719 16720 16721 /* 16722 * Function: sd_print_sense_failed_msg 16723 * 16724 * Description: Print log message when RQS has failed. 16725 * 16726 * Arguments: un - ptr to associated softstate 16727 * bp - ptr to buf(9S) for the command 16728 * arg - generic message string ptr 16729 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16730 * or SD_NO_RETRY_ISSUED 16731 * 16732 * Context: May be called from interrupt context 16733 */ 16734 16735 static void 16736 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16737 int code) 16738 { 16739 char *msgp = arg; 16740 16741 ASSERT(un != NULL); 16742 ASSERT(mutex_owned(SD_MUTEX(un))); 16743 ASSERT(bp != NULL); 16744 16745 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16746 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16747 } 16748 } 16749 16750 16751 /* 16752 * Function: sd_validate_sense_data 16753 * 16754 * Description: Check the given sense data for validity. 16755 * If the sense data is not valid, the command will 16756 * be either failed or retried! 16757 * 16758 * Return Code: SD_SENSE_DATA_IS_INVALID 16759 * SD_SENSE_DATA_IS_VALID 16760 * 16761 * Context: May be called from interrupt context 16762 */ 16763 16764 static int 16765 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16766 size_t actual_len) 16767 { 16768 struct scsi_extended_sense *esp; 16769 struct scsi_pkt *pktp; 16770 char *msgp = NULL; 16771 sd_ssc_t *sscp; 16772 16773 ASSERT(un != NULL); 16774 ASSERT(mutex_owned(SD_MUTEX(un))); 16775 ASSERT(bp != NULL); 16776 ASSERT(bp != un->un_rqs_bp); 16777 ASSERT(xp != NULL); 16778 ASSERT(un->un_fm_private != NULL); 16779 16780 pktp = SD_GET_PKTP(bp); 16781 ASSERT(pktp != NULL); 16782 16783 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16784 ASSERT(sscp != NULL); 16785 16786 /* 16787 * Check the status of the RQS command (auto or manual). 16788 */ 16789 switch (xp->xb_sense_status & STATUS_MASK) { 16790 case STATUS_GOOD: 16791 break; 16792 16793 case STATUS_RESERVATION_CONFLICT: 16794 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16795 return (SD_SENSE_DATA_IS_INVALID); 16796 16797 case STATUS_BUSY: 16798 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16799 "Busy Status on REQUEST SENSE\n"); 16800 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16801 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16802 return (SD_SENSE_DATA_IS_INVALID); 16803 16804 case STATUS_QFULL: 16805 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16806 "QFULL Status on REQUEST SENSE\n"); 16807 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16808 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16809 return (SD_SENSE_DATA_IS_INVALID); 16810 16811 case STATUS_CHECK: 16812 case STATUS_TERMINATED: 16813 msgp = "Check Condition on REQUEST SENSE\n"; 16814 goto sense_failed; 16815 16816 default: 16817 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16818 goto sense_failed; 16819 } 16820 16821 /* 16822 * See if we got the minimum required amount of sense data. 16823 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16824 * or less. 16825 */ 16826 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16827 (actual_len == 0)) { 16828 msgp = "Request Sense couldn't get sense data\n"; 16829 goto sense_failed; 16830 } 16831 16832 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16833 msgp = "Not enough sense information\n"; 16834 /* Mark the ssc_flags for detecting invalid sense data */ 16835 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16836 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16837 "sense-data"); 16838 } 16839 goto sense_failed; 16840 } 16841 16842 /* 16843 * We require the extended sense data 16844 */ 16845 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16846 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16847 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16848 static char tmp[8]; 16849 static char buf[148]; 16850 char *p = (char *)(xp->xb_sense_data); 16851 int i; 16852 16853 mutex_enter(&sd_sense_mutex); 16854 (void) strcpy(buf, "undecodable sense information:"); 16855 for (i = 0; i < actual_len; i++) { 16856 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16857 (void) strcpy(&buf[strlen(buf)], tmp); 16858 } 16859 i = strlen(buf); 16860 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16861 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16862 mutex_exit(&sd_sense_mutex); 16863 } 16864 16865 /* Mark the ssc_flags for detecting invalid sense data */ 16866 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16867 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16868 "sense-data"); 16869 } 16870 16871 /* Note: Legacy behavior, fail the command with no retry */ 16872 sd_return_failed_command(un, bp, EIO); 16873 return (SD_SENSE_DATA_IS_INVALID); 16874 } 16875 16876 /* 16877 * Check that es_code is valid (es_class concatenated with es_code 16878 * make up the "response code" field. es_class will always be 7, so 16879 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16880 * format. 16881 */ 16882 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16883 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16884 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16885 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16886 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16887 /* Mark the ssc_flags for detecting invalid sense data */ 16888 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16889 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16890 "sense-data"); 16891 } 16892 goto sense_failed; 16893 } 16894 16895 return (SD_SENSE_DATA_IS_VALID); 16896 16897 sense_failed: 16898 /* 16899 * If the request sense failed (for whatever reason), attempt 16900 * to retry the original command. 16901 */ 16902 #if defined(__i386) || defined(__amd64) 16903 /* 16904 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16905 * sddef.h for Sparc platform, and x86 uses 1 binary 16906 * for both SCSI/FC. 16907 * The SD_RETRY_DELAY value need to be adjusted here 16908 * when SD_RETRY_DELAY change in sddef.h 16909 */ 16910 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16911 sd_print_sense_failed_msg, msgp, EIO, 16912 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16913 #else 16914 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16915 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16916 #endif 16917 16918 return (SD_SENSE_DATA_IS_INVALID); 16919 } 16920 16921 /* 16922 * Function: sd_decode_sense 16923 * 16924 * Description: Take recovery action(s) when SCSI Sense Data is received. 16925 * 16926 * Context: Interrupt context. 16927 */ 16928 16929 static void 16930 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16931 struct scsi_pkt *pktp) 16932 { 16933 uint8_t sense_key; 16934 16935 ASSERT(un != NULL); 16936 ASSERT(mutex_owned(SD_MUTEX(un))); 16937 ASSERT(bp != NULL); 16938 ASSERT(bp != un->un_rqs_bp); 16939 ASSERT(xp != NULL); 16940 ASSERT(pktp != NULL); 16941 16942 sense_key = scsi_sense_key(xp->xb_sense_data); 16943 16944 switch (sense_key) { 16945 case KEY_NO_SENSE: 16946 sd_sense_key_no_sense(un, bp, xp, pktp); 16947 break; 16948 case KEY_RECOVERABLE_ERROR: 16949 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 16950 bp, xp, pktp); 16951 break; 16952 case KEY_NOT_READY: 16953 sd_sense_key_not_ready(un, xp->xb_sense_data, 16954 bp, xp, pktp); 16955 break; 16956 case KEY_MEDIUM_ERROR: 16957 case KEY_HARDWARE_ERROR: 16958 sd_sense_key_medium_or_hardware_error(un, 16959 xp->xb_sense_data, bp, xp, pktp); 16960 break; 16961 case KEY_ILLEGAL_REQUEST: 16962 sd_sense_key_illegal_request(un, bp, xp, pktp); 16963 break; 16964 case KEY_UNIT_ATTENTION: 16965 sd_sense_key_unit_attention(un, xp->xb_sense_data, 16966 bp, xp, pktp); 16967 break; 16968 case KEY_WRITE_PROTECT: 16969 case KEY_VOLUME_OVERFLOW: 16970 case KEY_MISCOMPARE: 16971 sd_sense_key_fail_command(un, bp, xp, pktp); 16972 break; 16973 case KEY_BLANK_CHECK: 16974 sd_sense_key_blank_check(un, bp, xp, pktp); 16975 break; 16976 case KEY_ABORTED_COMMAND: 16977 sd_sense_key_aborted_command(un, bp, xp, pktp); 16978 break; 16979 case KEY_VENDOR_UNIQUE: 16980 case KEY_COPY_ABORTED: 16981 case KEY_EQUAL: 16982 case KEY_RESERVED: 16983 default: 16984 sd_sense_key_default(un, xp->xb_sense_data, 16985 bp, xp, pktp); 16986 break; 16987 } 16988 } 16989 16990 16991 /* 16992 * Function: sd_dump_memory 16993 * 16994 * Description: Debug logging routine to print the contents of a user provided 16995 * buffer. The output of the buffer is broken up into 256 byte 16996 * segments due to a size constraint of the scsi_log. 16997 * implementation. 16998 * 16999 * Arguments: un - ptr to softstate 17000 * comp - component mask 17001 * title - "title" string to preceed data when printed 17002 * data - ptr to data block to be printed 17003 * len - size of data block to be printed 17004 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17005 * 17006 * Context: May be called from interrupt context 17007 */ 17008 17009 #define SD_DUMP_MEMORY_BUF_SIZE 256 17010 17011 static char *sd_dump_format_string[] = { 17012 " 0x%02x", 17013 " %c" 17014 }; 17015 17016 static void 17017 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17018 int len, int fmt) 17019 { 17020 int i, j; 17021 int avail_count; 17022 int start_offset; 17023 int end_offset; 17024 size_t entry_len; 17025 char *bufp; 17026 char *local_buf; 17027 char *format_string; 17028 17029 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17030 17031 /* 17032 * In the debug version of the driver, this function is called from a 17033 * number of places which are NOPs in the release driver. 17034 * The debug driver therefore has additional methods of filtering 17035 * debug output. 17036 */ 17037 #ifdef SDDEBUG 17038 /* 17039 * In the debug version of the driver we can reduce the amount of debug 17040 * messages by setting sd_error_level to something other than 17041 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17042 * sd_component_mask. 17043 */ 17044 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17045 (sd_error_level != SCSI_ERR_ALL)) { 17046 return; 17047 } 17048 if (((sd_component_mask & comp) == 0) || 17049 (sd_error_level != SCSI_ERR_ALL)) { 17050 return; 17051 } 17052 #else 17053 if (sd_error_level != SCSI_ERR_ALL) { 17054 return; 17055 } 17056 #endif 17057 17058 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17059 bufp = local_buf; 17060 /* 17061 * Available length is the length of local_buf[], minus the 17062 * length of the title string, minus one for the ":", minus 17063 * one for the newline, minus one for the NULL terminator. 17064 * This gives the #bytes available for holding the printed 17065 * values from the given data buffer. 17066 */ 17067 if (fmt == SD_LOG_HEX) { 17068 format_string = sd_dump_format_string[0]; 17069 } else /* SD_LOG_CHAR */ { 17070 format_string = sd_dump_format_string[1]; 17071 } 17072 /* 17073 * Available count is the number of elements from the given 17074 * data buffer that we can fit into the available length. 17075 * This is based upon the size of the format string used. 17076 * Make one entry and find it's size. 17077 */ 17078 (void) sprintf(bufp, format_string, data[0]); 17079 entry_len = strlen(bufp); 17080 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17081 17082 j = 0; 17083 while (j < len) { 17084 bufp = local_buf; 17085 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17086 start_offset = j; 17087 17088 end_offset = start_offset + avail_count; 17089 17090 (void) sprintf(bufp, "%s:", title); 17091 bufp += strlen(bufp); 17092 for (i = start_offset; ((i < end_offset) && (j < len)); 17093 i++, j++) { 17094 (void) sprintf(bufp, format_string, data[i]); 17095 bufp += entry_len; 17096 } 17097 (void) sprintf(bufp, "\n"); 17098 17099 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17100 } 17101 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17102 } 17103 17104 /* 17105 * Function: sd_print_sense_msg 17106 * 17107 * Description: Log a message based upon the given sense data. 17108 * 17109 * Arguments: un - ptr to associated softstate 17110 * bp - ptr to buf(9S) for the command 17111 * arg - ptr to associate sd_sense_info struct 17112 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17113 * or SD_NO_RETRY_ISSUED 17114 * 17115 * Context: May be called from interrupt context 17116 */ 17117 17118 static void 17119 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17120 { 17121 struct sd_xbuf *xp; 17122 struct scsi_pkt *pktp; 17123 uint8_t *sensep; 17124 daddr_t request_blkno; 17125 diskaddr_t err_blkno; 17126 int severity; 17127 int pfa_flag; 17128 extern struct scsi_key_strings scsi_cmds[]; 17129 17130 ASSERT(un != NULL); 17131 ASSERT(mutex_owned(SD_MUTEX(un))); 17132 ASSERT(bp != NULL); 17133 xp = SD_GET_XBUF(bp); 17134 ASSERT(xp != NULL); 17135 pktp = SD_GET_PKTP(bp); 17136 ASSERT(pktp != NULL); 17137 ASSERT(arg != NULL); 17138 17139 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17140 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17141 17142 if ((code == SD_DELAYED_RETRY_ISSUED) || 17143 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17144 severity = SCSI_ERR_RETRYABLE; 17145 } 17146 17147 /* Use absolute block number for the request block number */ 17148 request_blkno = xp->xb_blkno; 17149 17150 /* 17151 * Now try to get the error block number from the sense data 17152 */ 17153 sensep = xp->xb_sense_data; 17154 17155 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17156 (uint64_t *)&err_blkno)) { 17157 /* 17158 * We retrieved the error block number from the information 17159 * portion of the sense data. 17160 * 17161 * For USCSI commands we are better off using the error 17162 * block no. as the requested block no. (This is the best 17163 * we can estimate.) 17164 */ 17165 if ((SD_IS_BUFIO(xp) == FALSE) && 17166 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17167 request_blkno = err_blkno; 17168 } 17169 } else { 17170 /* 17171 * Without the es_valid bit set (for fixed format) or an 17172 * information descriptor (for descriptor format) we cannot 17173 * be certain of the error blkno, so just use the 17174 * request_blkno. 17175 */ 17176 err_blkno = (diskaddr_t)request_blkno; 17177 } 17178 17179 /* 17180 * The following will log the buffer contents for the release driver 17181 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17182 * level is set to verbose. 17183 */ 17184 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17185 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17186 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17187 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17188 17189 if (pfa_flag == FALSE) { 17190 /* This is normally only set for USCSI */ 17191 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17192 return; 17193 } 17194 17195 if ((SD_IS_BUFIO(xp) == TRUE) && 17196 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17197 (severity < sd_error_level))) { 17198 return; 17199 } 17200 } 17201 /* 17202 * Check for Sonoma Failover and keep a count of how many failed I/O's 17203 */ 17204 if ((SD_IS_LSI(un)) && 17205 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17206 (scsi_sense_asc(sensep) == 0x94) && 17207 (scsi_sense_ascq(sensep) == 0x01)) { 17208 un->un_sonoma_failure_count++; 17209 if (un->un_sonoma_failure_count > 1) { 17210 return; 17211 } 17212 } 17213 17214 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17215 request_blkno, err_blkno, scsi_cmds, 17216 (struct scsi_extended_sense *)sensep, 17217 un->un_additional_codes, NULL); 17218 } 17219 17220 /* 17221 * Function: sd_sense_key_no_sense 17222 * 17223 * Description: Recovery action when sense data was not received. 17224 * 17225 * Context: May be called from interrupt context 17226 */ 17227 17228 static void 17229 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17230 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17231 { 17232 struct sd_sense_info si; 17233 17234 ASSERT(un != NULL); 17235 ASSERT(mutex_owned(SD_MUTEX(un))); 17236 ASSERT(bp != NULL); 17237 ASSERT(xp != NULL); 17238 ASSERT(pktp != NULL); 17239 17240 si.ssi_severity = SCSI_ERR_FATAL; 17241 si.ssi_pfa_flag = FALSE; 17242 17243 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17244 17245 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17246 &si, EIO, (clock_t)0, NULL); 17247 } 17248 17249 17250 /* 17251 * Function: sd_sense_key_recoverable_error 17252 * 17253 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17254 * 17255 * Context: May be called from interrupt context 17256 */ 17257 17258 static void 17259 sd_sense_key_recoverable_error(struct sd_lun *un, 17260 uint8_t *sense_datap, 17261 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17262 { 17263 struct sd_sense_info si; 17264 uint8_t asc = scsi_sense_asc(sense_datap); 17265 17266 ASSERT(un != NULL); 17267 ASSERT(mutex_owned(SD_MUTEX(un))); 17268 ASSERT(bp != NULL); 17269 ASSERT(xp != NULL); 17270 ASSERT(pktp != NULL); 17271 17272 /* 17273 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17274 */ 17275 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17276 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17277 si.ssi_severity = SCSI_ERR_INFO; 17278 si.ssi_pfa_flag = TRUE; 17279 } else { 17280 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17281 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17282 si.ssi_severity = SCSI_ERR_RECOVERED; 17283 si.ssi_pfa_flag = FALSE; 17284 } 17285 17286 if (pktp->pkt_resid == 0) { 17287 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17288 sd_return_command(un, bp); 17289 return; 17290 } 17291 17292 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17293 &si, EIO, (clock_t)0, NULL); 17294 } 17295 17296 17297 17298 17299 /* 17300 * Function: sd_sense_key_not_ready 17301 * 17302 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17303 * 17304 * Context: May be called from interrupt context 17305 */ 17306 17307 static void 17308 sd_sense_key_not_ready(struct sd_lun *un, 17309 uint8_t *sense_datap, 17310 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17311 { 17312 struct sd_sense_info si; 17313 uint8_t asc = scsi_sense_asc(sense_datap); 17314 uint8_t ascq = scsi_sense_ascq(sense_datap); 17315 17316 ASSERT(un != NULL); 17317 ASSERT(mutex_owned(SD_MUTEX(un))); 17318 ASSERT(bp != NULL); 17319 ASSERT(xp != NULL); 17320 ASSERT(pktp != NULL); 17321 17322 si.ssi_severity = SCSI_ERR_FATAL; 17323 si.ssi_pfa_flag = FALSE; 17324 17325 /* 17326 * Update error stats after first NOT READY error. Disks may have 17327 * been powered down and may need to be restarted. For CDROMs, 17328 * report NOT READY errors only if media is present. 17329 */ 17330 if ((ISCD(un) && (asc == 0x3A)) || 17331 (xp->xb_nr_retry_count > 0)) { 17332 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17333 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17334 } 17335 17336 /* 17337 * Just fail if the "not ready" retry limit has been reached. 17338 */ 17339 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17340 /* Special check for error message printing for removables. */ 17341 if (un->un_f_has_removable_media && (asc == 0x04) && 17342 (ascq >= 0x04)) { 17343 si.ssi_severity = SCSI_ERR_ALL; 17344 } 17345 goto fail_command; 17346 } 17347 17348 /* 17349 * Check the ASC and ASCQ in the sense data as needed, to determine 17350 * what to do. 17351 */ 17352 switch (asc) { 17353 case 0x04: /* LOGICAL UNIT NOT READY */ 17354 /* 17355 * disk drives that don't spin up result in a very long delay 17356 * in format without warning messages. We will log a message 17357 * if the error level is set to verbose. 17358 */ 17359 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17360 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17361 "logical unit not ready, resetting disk\n"); 17362 } 17363 17364 /* 17365 * There are different requirements for CDROMs and disks for 17366 * the number of retries. If a CD-ROM is giving this, it is 17367 * probably reading TOC and is in the process of getting 17368 * ready, so we should keep on trying for a long time to make 17369 * sure that all types of media are taken in account (for 17370 * some media the drive takes a long time to read TOC). For 17371 * disks we do not want to retry this too many times as this 17372 * can cause a long hang in format when the drive refuses to 17373 * spin up (a very common failure). 17374 */ 17375 switch (ascq) { 17376 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17377 /* 17378 * Disk drives frequently refuse to spin up which 17379 * results in a very long hang in format without 17380 * warning messages. 17381 * 17382 * Note: This code preserves the legacy behavior of 17383 * comparing xb_nr_retry_count against zero for fibre 17384 * channel targets instead of comparing against the 17385 * un_reset_retry_count value. The reason for this 17386 * discrepancy has been so utterly lost beneath the 17387 * Sands of Time that even Indiana Jones could not 17388 * find it. 17389 */ 17390 if (un->un_f_is_fibre == TRUE) { 17391 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17392 (xp->xb_nr_retry_count > 0)) && 17393 (un->un_startstop_timeid == NULL)) { 17394 scsi_log(SD_DEVINFO(un), sd_label, 17395 CE_WARN, "logical unit not ready, " 17396 "resetting disk\n"); 17397 sd_reset_target(un, pktp); 17398 } 17399 } else { 17400 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17401 (xp->xb_nr_retry_count > 17402 un->un_reset_retry_count)) && 17403 (un->un_startstop_timeid == NULL)) { 17404 scsi_log(SD_DEVINFO(un), sd_label, 17405 CE_WARN, "logical unit not ready, " 17406 "resetting disk\n"); 17407 sd_reset_target(un, pktp); 17408 } 17409 } 17410 break; 17411 17412 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17413 /* 17414 * If the target is in the process of becoming 17415 * ready, just proceed with the retry. This can 17416 * happen with CD-ROMs that take a long time to 17417 * read TOC after a power cycle or reset. 17418 */ 17419 goto do_retry; 17420 17421 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17422 break; 17423 17424 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17425 /* 17426 * Retries cannot help here so just fail right away. 17427 */ 17428 goto fail_command; 17429 17430 case 0x88: 17431 /* 17432 * Vendor-unique code for T3/T4: it indicates a 17433 * path problem in a mutipathed config, but as far as 17434 * the target driver is concerned it equates to a fatal 17435 * error, so we should just fail the command right away 17436 * (without printing anything to the console). If this 17437 * is not a T3/T4, fall thru to the default recovery 17438 * action. 17439 * T3/T4 is FC only, don't need to check is_fibre 17440 */ 17441 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17442 sd_return_failed_command(un, bp, EIO); 17443 return; 17444 } 17445 /* FALLTHRU */ 17446 17447 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17448 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17449 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17450 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17451 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17452 default: /* Possible future codes in SCSI spec? */ 17453 /* 17454 * For removable-media devices, do not retry if 17455 * ASCQ > 2 as these result mostly from USCSI commands 17456 * on MMC devices issued to check status of an 17457 * operation initiated in immediate mode. Also for 17458 * ASCQ >= 4 do not print console messages as these 17459 * mainly represent a user-initiated operation 17460 * instead of a system failure. 17461 */ 17462 if (un->un_f_has_removable_media) { 17463 si.ssi_severity = SCSI_ERR_ALL; 17464 goto fail_command; 17465 } 17466 break; 17467 } 17468 17469 /* 17470 * As part of our recovery attempt for the NOT READY 17471 * condition, we issue a START STOP UNIT command. However 17472 * we want to wait for a short delay before attempting this 17473 * as there may still be more commands coming back from the 17474 * target with the check condition. To do this we use 17475 * timeout(9F) to call sd_start_stop_unit_callback() after 17476 * the delay interval expires. (sd_start_stop_unit_callback() 17477 * dispatches sd_start_stop_unit_task(), which will issue 17478 * the actual START STOP UNIT command. The delay interval 17479 * is one-half of the delay that we will use to retry the 17480 * command that generated the NOT READY condition. 17481 * 17482 * Note that we could just dispatch sd_start_stop_unit_task() 17483 * from here and allow it to sleep for the delay interval, 17484 * but then we would be tying up the taskq thread 17485 * uncesessarily for the duration of the delay. 17486 * 17487 * Do not issue the START STOP UNIT if the current command 17488 * is already a START STOP UNIT. 17489 */ 17490 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17491 break; 17492 } 17493 17494 /* 17495 * Do not schedule the timeout if one is already pending. 17496 */ 17497 if (un->un_startstop_timeid != NULL) { 17498 SD_INFO(SD_LOG_ERROR, un, 17499 "sd_sense_key_not_ready: restart already issued to" 17500 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17501 ddi_get_instance(SD_DEVINFO(un))); 17502 break; 17503 } 17504 17505 /* 17506 * Schedule the START STOP UNIT command, then queue the command 17507 * for a retry. 17508 * 17509 * Note: A timeout is not scheduled for this retry because we 17510 * want the retry to be serial with the START_STOP_UNIT. The 17511 * retry will be started when the START_STOP_UNIT is completed 17512 * in sd_start_stop_unit_task. 17513 */ 17514 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17515 un, un->un_busy_timeout / 2); 17516 xp->xb_nr_retry_count++; 17517 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17518 return; 17519 17520 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17521 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17523 "unit does not respond to selection\n"); 17524 } 17525 break; 17526 17527 case 0x3A: /* MEDIUM NOT PRESENT */ 17528 if (sd_error_level >= SCSI_ERR_FATAL) { 17529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17530 "Caddy not inserted in drive\n"); 17531 } 17532 17533 sr_ejected(un); 17534 un->un_mediastate = DKIO_EJECTED; 17535 /* The state has changed, inform the media watch routines */ 17536 cv_broadcast(&un->un_state_cv); 17537 /* Just fail if no media is present in the drive. */ 17538 goto fail_command; 17539 17540 default: 17541 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17542 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17543 "Unit not Ready. Additional sense code 0x%x\n", 17544 asc); 17545 } 17546 break; 17547 } 17548 17549 do_retry: 17550 17551 /* 17552 * Retry the command, as some targets may report NOT READY for 17553 * several seconds after being reset. 17554 */ 17555 xp->xb_nr_retry_count++; 17556 si.ssi_severity = SCSI_ERR_RETRYABLE; 17557 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17558 &si, EIO, un->un_busy_timeout, NULL); 17559 17560 return; 17561 17562 fail_command: 17563 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17564 sd_return_failed_command(un, bp, EIO); 17565 } 17566 17567 17568 17569 /* 17570 * Function: sd_sense_key_medium_or_hardware_error 17571 * 17572 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17573 * sense key. 17574 * 17575 * Context: May be called from interrupt context 17576 */ 17577 17578 static void 17579 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17580 uint8_t *sense_datap, 17581 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17582 { 17583 struct sd_sense_info si; 17584 uint8_t sense_key = scsi_sense_key(sense_datap); 17585 uint8_t asc = scsi_sense_asc(sense_datap); 17586 17587 ASSERT(un != NULL); 17588 ASSERT(mutex_owned(SD_MUTEX(un))); 17589 ASSERT(bp != NULL); 17590 ASSERT(xp != NULL); 17591 ASSERT(pktp != NULL); 17592 17593 si.ssi_severity = SCSI_ERR_FATAL; 17594 si.ssi_pfa_flag = FALSE; 17595 17596 if (sense_key == KEY_MEDIUM_ERROR) { 17597 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17598 } 17599 17600 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17601 17602 if ((un->un_reset_retry_count != 0) && 17603 (xp->xb_retry_count == un->un_reset_retry_count)) { 17604 mutex_exit(SD_MUTEX(un)); 17605 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17606 if (un->un_f_allow_bus_device_reset == TRUE) { 17607 17608 boolean_t try_resetting_target = B_TRUE; 17609 17610 /* 17611 * We need to be able to handle specific ASC when we are 17612 * handling a KEY_HARDWARE_ERROR. In particular 17613 * taking the default action of resetting the target may 17614 * not be the appropriate way to attempt recovery. 17615 * Resetting a target because of a single LUN failure 17616 * victimizes all LUNs on that target. 17617 * 17618 * This is true for the LSI arrays, if an LSI 17619 * array controller returns an ASC of 0x84 (LUN Dead) we 17620 * should trust it. 17621 */ 17622 17623 if (sense_key == KEY_HARDWARE_ERROR) { 17624 switch (asc) { 17625 case 0x84: 17626 if (SD_IS_LSI(un)) { 17627 try_resetting_target = B_FALSE; 17628 } 17629 break; 17630 default: 17631 break; 17632 } 17633 } 17634 17635 if (try_resetting_target == B_TRUE) { 17636 int reset_retval = 0; 17637 if (un->un_f_lun_reset_enabled == TRUE) { 17638 SD_TRACE(SD_LOG_IO_CORE, un, 17639 "sd_sense_key_medium_or_hardware_" 17640 "error: issuing RESET_LUN\n"); 17641 reset_retval = 17642 scsi_reset(SD_ADDRESS(un), 17643 RESET_LUN); 17644 } 17645 if (reset_retval == 0) { 17646 SD_TRACE(SD_LOG_IO_CORE, un, 17647 "sd_sense_key_medium_or_hardware_" 17648 "error: issuing RESET_TARGET\n"); 17649 (void) scsi_reset(SD_ADDRESS(un), 17650 RESET_TARGET); 17651 } 17652 } 17653 } 17654 mutex_enter(SD_MUTEX(un)); 17655 } 17656 17657 /* 17658 * This really ought to be a fatal error, but we will retry anyway 17659 * as some drives report this as a spurious error. 17660 */ 17661 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17662 &si, EIO, (clock_t)0, NULL); 17663 } 17664 17665 17666 17667 /* 17668 * Function: sd_sense_key_illegal_request 17669 * 17670 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17671 * 17672 * Context: May be called from interrupt context 17673 */ 17674 17675 static void 17676 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17677 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17678 { 17679 struct sd_sense_info si; 17680 17681 ASSERT(un != NULL); 17682 ASSERT(mutex_owned(SD_MUTEX(un))); 17683 ASSERT(bp != NULL); 17684 ASSERT(xp != NULL); 17685 ASSERT(pktp != NULL); 17686 17687 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17688 17689 si.ssi_severity = SCSI_ERR_INFO; 17690 si.ssi_pfa_flag = FALSE; 17691 17692 /* Pointless to retry if the target thinks it's an illegal request */ 17693 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17694 sd_return_failed_command(un, bp, EIO); 17695 } 17696 17697 17698 17699 17700 /* 17701 * Function: sd_sense_key_unit_attention 17702 * 17703 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17704 * 17705 * Context: May be called from interrupt context 17706 */ 17707 17708 static void 17709 sd_sense_key_unit_attention(struct sd_lun *un, 17710 uint8_t *sense_datap, 17711 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17712 { 17713 /* 17714 * For UNIT ATTENTION we allow retries for one minute. Devices 17715 * like Sonoma can return UNIT ATTENTION close to a minute 17716 * under certain conditions. 17717 */ 17718 int retry_check_flag = SD_RETRIES_UA; 17719 boolean_t kstat_updated = B_FALSE; 17720 struct sd_sense_info si; 17721 uint8_t asc = scsi_sense_asc(sense_datap); 17722 uint8_t ascq = scsi_sense_ascq(sense_datap); 17723 17724 ASSERT(un != NULL); 17725 ASSERT(mutex_owned(SD_MUTEX(un))); 17726 ASSERT(bp != NULL); 17727 ASSERT(xp != NULL); 17728 ASSERT(pktp != NULL); 17729 17730 si.ssi_severity = SCSI_ERR_INFO; 17731 si.ssi_pfa_flag = FALSE; 17732 17733 17734 switch (asc) { 17735 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17736 if (sd_report_pfa != 0) { 17737 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17738 si.ssi_pfa_flag = TRUE; 17739 retry_check_flag = SD_RETRIES_STANDARD; 17740 goto do_retry; 17741 } 17742 17743 break; 17744 17745 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17746 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17747 un->un_resvd_status |= 17748 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17749 } 17750 #ifdef _LP64 17751 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17752 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17753 un, KM_NOSLEEP) == 0) { 17754 /* 17755 * If we can't dispatch the task we'll just 17756 * live without descriptor sense. We can 17757 * try again on the next "unit attention" 17758 */ 17759 SD_ERROR(SD_LOG_ERROR, un, 17760 "sd_sense_key_unit_attention: " 17761 "Could not dispatch " 17762 "sd_reenable_dsense_task\n"); 17763 } 17764 } 17765 #endif /* _LP64 */ 17766 /* FALLTHRU */ 17767 17768 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17769 if (!un->un_f_has_removable_media) { 17770 break; 17771 } 17772 17773 /* 17774 * When we get a unit attention from a removable-media device, 17775 * it may be in a state that will take a long time to recover 17776 * (e.g., from a reset). Since we are executing in interrupt 17777 * context here, we cannot wait around for the device to come 17778 * back. So hand this command off to sd_media_change_task() 17779 * for deferred processing under taskq thread context. (Note 17780 * that the command still may be failed if a problem is 17781 * encountered at a later time.) 17782 */ 17783 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17784 KM_NOSLEEP) == 0) { 17785 /* 17786 * Cannot dispatch the request so fail the command. 17787 */ 17788 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17789 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17790 si.ssi_severity = SCSI_ERR_FATAL; 17791 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17792 sd_return_failed_command(un, bp, EIO); 17793 } 17794 17795 /* 17796 * If failed to dispatch sd_media_change_task(), we already 17797 * updated kstat. If succeed to dispatch sd_media_change_task(), 17798 * we should update kstat later if it encounters an error. So, 17799 * we update kstat_updated flag here. 17800 */ 17801 kstat_updated = B_TRUE; 17802 17803 /* 17804 * Either the command has been successfully dispatched to a 17805 * task Q for retrying, or the dispatch failed. In either case 17806 * do NOT retry again by calling sd_retry_command. This sets up 17807 * two retries of the same command and when one completes and 17808 * frees the resources the other will access freed memory, 17809 * a bad thing. 17810 */ 17811 return; 17812 17813 default: 17814 break; 17815 } 17816 17817 /* 17818 * ASC ASCQ 17819 * 2A 09 Capacity data has changed 17820 * 2A 01 Mode parameters changed 17821 * 3F 0E Reported luns data has changed 17822 * Arrays that support logical unit expansion should report 17823 * capacity changes(2Ah/09). Mode parameters changed and 17824 * reported luns data has changed are the approximation. 17825 */ 17826 if (((asc == 0x2a) && (ascq == 0x09)) || 17827 ((asc == 0x2a) && (ascq == 0x01)) || 17828 ((asc == 0x3f) && (ascq == 0x0e))) { 17829 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 17830 KM_NOSLEEP) == 0) { 17831 SD_ERROR(SD_LOG_ERROR, un, 17832 "sd_sense_key_unit_attention: " 17833 "Could not dispatch sd_target_change_task\n"); 17834 } 17835 } 17836 17837 /* 17838 * Update kstat if we haven't done that. 17839 */ 17840 if (!kstat_updated) { 17841 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17842 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17843 } 17844 17845 do_retry: 17846 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17847 EIO, SD_UA_RETRY_DELAY, NULL); 17848 } 17849 17850 17851 17852 /* 17853 * Function: sd_sense_key_fail_command 17854 * 17855 * Description: Use to fail a command when we don't like the sense key that 17856 * was returned. 17857 * 17858 * Context: May be called from interrupt context 17859 */ 17860 17861 static void 17862 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17863 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17864 { 17865 struct sd_sense_info si; 17866 17867 ASSERT(un != NULL); 17868 ASSERT(mutex_owned(SD_MUTEX(un))); 17869 ASSERT(bp != NULL); 17870 ASSERT(xp != NULL); 17871 ASSERT(pktp != NULL); 17872 17873 si.ssi_severity = SCSI_ERR_FATAL; 17874 si.ssi_pfa_flag = FALSE; 17875 17876 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17877 sd_return_failed_command(un, bp, EIO); 17878 } 17879 17880 17881 17882 /* 17883 * Function: sd_sense_key_blank_check 17884 * 17885 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17886 * Has no monetary connotation. 17887 * 17888 * Context: May be called from interrupt context 17889 */ 17890 17891 static void 17892 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17893 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17894 { 17895 struct sd_sense_info si; 17896 17897 ASSERT(un != NULL); 17898 ASSERT(mutex_owned(SD_MUTEX(un))); 17899 ASSERT(bp != NULL); 17900 ASSERT(xp != NULL); 17901 ASSERT(pktp != NULL); 17902 17903 /* 17904 * Blank check is not fatal for removable devices, therefore 17905 * it does not require a console message. 17906 */ 17907 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 17908 SCSI_ERR_FATAL; 17909 si.ssi_pfa_flag = FALSE; 17910 17911 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17912 sd_return_failed_command(un, bp, EIO); 17913 } 17914 17915 17916 17917 17918 /* 17919 * Function: sd_sense_key_aborted_command 17920 * 17921 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17922 * 17923 * Context: May be called from interrupt context 17924 */ 17925 17926 static void 17927 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17928 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17929 { 17930 struct sd_sense_info si; 17931 17932 ASSERT(un != NULL); 17933 ASSERT(mutex_owned(SD_MUTEX(un))); 17934 ASSERT(bp != NULL); 17935 ASSERT(xp != NULL); 17936 ASSERT(pktp != NULL); 17937 17938 si.ssi_severity = SCSI_ERR_FATAL; 17939 si.ssi_pfa_flag = FALSE; 17940 17941 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17942 17943 /* 17944 * This really ought to be a fatal error, but we will retry anyway 17945 * as some drives report this as a spurious error. 17946 */ 17947 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17948 &si, EIO, drv_usectohz(100000), NULL); 17949 } 17950 17951 17952 17953 /* 17954 * Function: sd_sense_key_default 17955 * 17956 * Description: Default recovery action for several SCSI sense keys (basically 17957 * attempts a retry). 17958 * 17959 * Context: May be called from interrupt context 17960 */ 17961 17962 static void 17963 sd_sense_key_default(struct sd_lun *un, 17964 uint8_t *sense_datap, 17965 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17966 { 17967 struct sd_sense_info si; 17968 uint8_t sense_key = scsi_sense_key(sense_datap); 17969 17970 ASSERT(un != NULL); 17971 ASSERT(mutex_owned(SD_MUTEX(un))); 17972 ASSERT(bp != NULL); 17973 ASSERT(xp != NULL); 17974 ASSERT(pktp != NULL); 17975 17976 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17977 17978 /* 17979 * Undecoded sense key. Attempt retries and hope that will fix 17980 * the problem. Otherwise, we're dead. 17981 */ 17982 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17983 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17984 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17985 } 17986 17987 si.ssi_severity = SCSI_ERR_FATAL; 17988 si.ssi_pfa_flag = FALSE; 17989 17990 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17991 &si, EIO, (clock_t)0, NULL); 17992 } 17993 17994 17995 17996 /* 17997 * Function: sd_print_retry_msg 17998 * 17999 * Description: Print a message indicating the retry action being taken. 18000 * 18001 * Arguments: un - ptr to associated softstate 18002 * bp - ptr to buf(9S) for the command 18003 * arg - not used. 18004 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18005 * or SD_NO_RETRY_ISSUED 18006 * 18007 * Context: May be called from interrupt context 18008 */ 18009 /* ARGSUSED */ 18010 static void 18011 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18012 { 18013 struct sd_xbuf *xp; 18014 struct scsi_pkt *pktp; 18015 char *reasonp; 18016 char *msgp; 18017 18018 ASSERT(un != NULL); 18019 ASSERT(mutex_owned(SD_MUTEX(un))); 18020 ASSERT(bp != NULL); 18021 pktp = SD_GET_PKTP(bp); 18022 ASSERT(pktp != NULL); 18023 xp = SD_GET_XBUF(bp); 18024 ASSERT(xp != NULL); 18025 18026 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18027 mutex_enter(&un->un_pm_mutex); 18028 if ((un->un_state == SD_STATE_SUSPENDED) || 18029 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18030 (pktp->pkt_flags & FLAG_SILENT)) { 18031 mutex_exit(&un->un_pm_mutex); 18032 goto update_pkt_reason; 18033 } 18034 mutex_exit(&un->un_pm_mutex); 18035 18036 /* 18037 * Suppress messages if they are all the same pkt_reason; with 18038 * TQ, many (up to 256) are returned with the same pkt_reason. 18039 * If we are in panic, then suppress the retry messages. 18040 */ 18041 switch (flag) { 18042 case SD_NO_RETRY_ISSUED: 18043 msgp = "giving up"; 18044 break; 18045 case SD_IMMEDIATE_RETRY_ISSUED: 18046 case SD_DELAYED_RETRY_ISSUED: 18047 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18048 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18049 (sd_error_level != SCSI_ERR_ALL))) { 18050 return; 18051 } 18052 msgp = "retrying command"; 18053 break; 18054 default: 18055 goto update_pkt_reason; 18056 } 18057 18058 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18059 scsi_rname(pktp->pkt_reason)); 18060 18061 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18062 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18063 18064 update_pkt_reason: 18065 /* 18066 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18067 * This is to prevent multiple console messages for the same failure 18068 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18069 * when the command is retried successfully because there still may be 18070 * more commands coming back with the same value of pktp->pkt_reason. 18071 */ 18072 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18073 un->un_last_pkt_reason = pktp->pkt_reason; 18074 } 18075 } 18076 18077 18078 /* 18079 * Function: sd_print_cmd_incomplete_msg 18080 * 18081 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18082 * 18083 * Arguments: un - ptr to associated softstate 18084 * bp - ptr to buf(9S) for the command 18085 * arg - passed to sd_print_retry_msg() 18086 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18087 * or SD_NO_RETRY_ISSUED 18088 * 18089 * Context: May be called from interrupt context 18090 */ 18091 18092 static void 18093 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18094 int code) 18095 { 18096 dev_info_t *dip; 18097 18098 ASSERT(un != NULL); 18099 ASSERT(mutex_owned(SD_MUTEX(un))); 18100 ASSERT(bp != NULL); 18101 18102 switch (code) { 18103 case SD_NO_RETRY_ISSUED: 18104 /* Command was failed. Someone turned off this target? */ 18105 if (un->un_state != SD_STATE_OFFLINE) { 18106 /* 18107 * Suppress message if we are detaching and 18108 * device has been disconnected 18109 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18110 * private interface and not part of the DDI 18111 */ 18112 dip = un->un_sd->sd_dev; 18113 if (!(DEVI_IS_DETACHING(dip) && 18114 DEVI_IS_DEVICE_REMOVED(dip))) { 18115 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18116 "disk not responding to selection\n"); 18117 } 18118 New_state(un, SD_STATE_OFFLINE); 18119 } 18120 break; 18121 18122 case SD_DELAYED_RETRY_ISSUED: 18123 case SD_IMMEDIATE_RETRY_ISSUED: 18124 default: 18125 /* Command was successfully queued for retry */ 18126 sd_print_retry_msg(un, bp, arg, code); 18127 break; 18128 } 18129 } 18130 18131 18132 /* 18133 * Function: sd_pkt_reason_cmd_incomplete 18134 * 18135 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18136 * 18137 * Context: May be called from interrupt context 18138 */ 18139 18140 static void 18141 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18142 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18143 { 18144 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18145 18146 ASSERT(un != NULL); 18147 ASSERT(mutex_owned(SD_MUTEX(un))); 18148 ASSERT(bp != NULL); 18149 ASSERT(xp != NULL); 18150 ASSERT(pktp != NULL); 18151 18152 /* Do not do a reset if selection did not complete */ 18153 /* Note: Should this not just check the bit? */ 18154 if (pktp->pkt_state != STATE_GOT_BUS) { 18155 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18156 sd_reset_target(un, pktp); 18157 } 18158 18159 /* 18160 * If the target was not successfully selected, then set 18161 * SD_RETRIES_FAILFAST to indicate that we lost communication 18162 * with the target, and further retries and/or commands are 18163 * likely to take a long time. 18164 */ 18165 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18166 flag |= SD_RETRIES_FAILFAST; 18167 } 18168 18169 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18170 18171 sd_retry_command(un, bp, flag, 18172 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18173 } 18174 18175 18176 18177 /* 18178 * Function: sd_pkt_reason_cmd_tran_err 18179 * 18180 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18181 * 18182 * Context: May be called from interrupt context 18183 */ 18184 18185 static void 18186 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18187 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18188 { 18189 ASSERT(un != NULL); 18190 ASSERT(mutex_owned(SD_MUTEX(un))); 18191 ASSERT(bp != NULL); 18192 ASSERT(xp != NULL); 18193 ASSERT(pktp != NULL); 18194 18195 /* 18196 * Do not reset if we got a parity error, or if 18197 * selection did not complete. 18198 */ 18199 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18200 /* Note: Should this not just check the bit for pkt_state? */ 18201 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18202 (pktp->pkt_state != STATE_GOT_BUS)) { 18203 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18204 sd_reset_target(un, pktp); 18205 } 18206 18207 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18208 18209 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18210 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18211 } 18212 18213 18214 18215 /* 18216 * Function: sd_pkt_reason_cmd_reset 18217 * 18218 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18219 * 18220 * Context: May be called from interrupt context 18221 */ 18222 18223 static void 18224 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18225 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18226 { 18227 ASSERT(un != NULL); 18228 ASSERT(mutex_owned(SD_MUTEX(un))); 18229 ASSERT(bp != NULL); 18230 ASSERT(xp != NULL); 18231 ASSERT(pktp != NULL); 18232 18233 /* The target may still be running the command, so try to reset. */ 18234 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18235 sd_reset_target(un, pktp); 18236 18237 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18238 18239 /* 18240 * If pkt_reason is CMD_RESET chances are that this pkt got 18241 * reset because another target on this bus caused it. The target 18242 * that caused it should get CMD_TIMEOUT with pkt_statistics 18243 * of STAT_TIMEOUT/STAT_DEV_RESET. 18244 */ 18245 18246 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18247 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18248 } 18249 18250 18251 18252 18253 /* 18254 * Function: sd_pkt_reason_cmd_aborted 18255 * 18256 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18257 * 18258 * Context: May be called from interrupt context 18259 */ 18260 18261 static void 18262 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18263 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18264 { 18265 ASSERT(un != NULL); 18266 ASSERT(mutex_owned(SD_MUTEX(un))); 18267 ASSERT(bp != NULL); 18268 ASSERT(xp != NULL); 18269 ASSERT(pktp != NULL); 18270 18271 /* The target may still be running the command, so try to reset. */ 18272 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18273 sd_reset_target(un, pktp); 18274 18275 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18276 18277 /* 18278 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18279 * aborted because another target on this bus caused it. The target 18280 * that caused it should get CMD_TIMEOUT with pkt_statistics 18281 * of STAT_TIMEOUT/STAT_DEV_RESET. 18282 */ 18283 18284 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18285 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18286 } 18287 18288 18289 18290 /* 18291 * Function: sd_pkt_reason_cmd_timeout 18292 * 18293 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18294 * 18295 * Context: May be called from interrupt context 18296 */ 18297 18298 static void 18299 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18300 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18301 { 18302 ASSERT(un != NULL); 18303 ASSERT(mutex_owned(SD_MUTEX(un))); 18304 ASSERT(bp != NULL); 18305 ASSERT(xp != NULL); 18306 ASSERT(pktp != NULL); 18307 18308 18309 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18310 sd_reset_target(un, pktp); 18311 18312 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18313 18314 /* 18315 * A command timeout indicates that we could not establish 18316 * communication with the target, so set SD_RETRIES_FAILFAST 18317 * as further retries/commands are likely to take a long time. 18318 */ 18319 sd_retry_command(un, bp, 18320 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18321 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18322 } 18323 18324 18325 18326 /* 18327 * Function: sd_pkt_reason_cmd_unx_bus_free 18328 * 18329 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18330 * 18331 * Context: May be called from interrupt context 18332 */ 18333 18334 static void 18335 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18336 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18337 { 18338 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18339 18340 ASSERT(un != NULL); 18341 ASSERT(mutex_owned(SD_MUTEX(un))); 18342 ASSERT(bp != NULL); 18343 ASSERT(xp != NULL); 18344 ASSERT(pktp != NULL); 18345 18346 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18347 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18348 18349 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18350 sd_print_retry_msg : NULL; 18351 18352 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18353 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18354 } 18355 18356 18357 /* 18358 * Function: sd_pkt_reason_cmd_tag_reject 18359 * 18360 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18361 * 18362 * Context: May be called from interrupt context 18363 */ 18364 18365 static void 18366 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18367 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18368 { 18369 ASSERT(un != NULL); 18370 ASSERT(mutex_owned(SD_MUTEX(un))); 18371 ASSERT(bp != NULL); 18372 ASSERT(xp != NULL); 18373 ASSERT(pktp != NULL); 18374 18375 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18376 pktp->pkt_flags = 0; 18377 un->un_tagflags = 0; 18378 if (un->un_f_opt_queueing == TRUE) { 18379 un->un_throttle = min(un->un_throttle, 3); 18380 } else { 18381 un->un_throttle = 1; 18382 } 18383 mutex_exit(SD_MUTEX(un)); 18384 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18385 mutex_enter(SD_MUTEX(un)); 18386 18387 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18388 18389 /* Legacy behavior not to check retry counts here. */ 18390 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18391 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18392 } 18393 18394 18395 /* 18396 * Function: sd_pkt_reason_default 18397 * 18398 * Description: Default recovery actions for SCSA pkt_reason values that 18399 * do not have more explicit recovery actions. 18400 * 18401 * Context: May be called from interrupt context 18402 */ 18403 18404 static void 18405 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18406 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18407 { 18408 ASSERT(un != NULL); 18409 ASSERT(mutex_owned(SD_MUTEX(un))); 18410 ASSERT(bp != NULL); 18411 ASSERT(xp != NULL); 18412 ASSERT(pktp != NULL); 18413 18414 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18415 sd_reset_target(un, pktp); 18416 18417 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18418 18419 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18420 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18421 } 18422 18423 18424 18425 /* 18426 * Function: sd_pkt_status_check_condition 18427 * 18428 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18429 * 18430 * Context: May be called from interrupt context 18431 */ 18432 18433 static void 18434 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18435 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18436 { 18437 ASSERT(un != NULL); 18438 ASSERT(mutex_owned(SD_MUTEX(un))); 18439 ASSERT(bp != NULL); 18440 ASSERT(xp != NULL); 18441 ASSERT(pktp != NULL); 18442 18443 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18444 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18445 18446 /* 18447 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18448 * command will be retried after the request sense). Otherwise, retry 18449 * the command. Note: we are issuing the request sense even though the 18450 * retry limit may have been reached for the failed command. 18451 */ 18452 if (un->un_f_arq_enabled == FALSE) { 18453 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18454 "no ARQ, sending request sense command\n"); 18455 sd_send_request_sense_command(un, bp, pktp); 18456 } else { 18457 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18458 "ARQ,retrying request sense command\n"); 18459 #if defined(__i386) || defined(__amd64) 18460 /* 18461 * The SD_RETRY_DELAY value need to be adjusted here 18462 * when SD_RETRY_DELAY change in sddef.h 18463 */ 18464 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18465 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18466 NULL); 18467 #else 18468 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18469 EIO, SD_RETRY_DELAY, NULL); 18470 #endif 18471 } 18472 18473 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18474 } 18475 18476 18477 /* 18478 * Function: sd_pkt_status_busy 18479 * 18480 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18481 * 18482 * Context: May be called from interrupt context 18483 */ 18484 18485 static void 18486 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18487 struct scsi_pkt *pktp) 18488 { 18489 ASSERT(un != NULL); 18490 ASSERT(mutex_owned(SD_MUTEX(un))); 18491 ASSERT(bp != NULL); 18492 ASSERT(xp != NULL); 18493 ASSERT(pktp != NULL); 18494 18495 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18496 "sd_pkt_status_busy: entry\n"); 18497 18498 /* If retries are exhausted, just fail the command. */ 18499 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18500 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18501 "device busy too long\n"); 18502 sd_return_failed_command(un, bp, EIO); 18503 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18504 "sd_pkt_status_busy: exit\n"); 18505 return; 18506 } 18507 xp->xb_retry_count++; 18508 18509 /* 18510 * Try to reset the target. However, we do not want to perform 18511 * more than one reset if the device continues to fail. The reset 18512 * will be performed when the retry count reaches the reset 18513 * threshold. This threshold should be set such that at least 18514 * one retry is issued before the reset is performed. 18515 */ 18516 if (xp->xb_retry_count == 18517 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18518 int rval = 0; 18519 mutex_exit(SD_MUTEX(un)); 18520 if (un->un_f_allow_bus_device_reset == TRUE) { 18521 /* 18522 * First try to reset the LUN; if we cannot then 18523 * try to reset the target. 18524 */ 18525 if (un->un_f_lun_reset_enabled == TRUE) { 18526 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18527 "sd_pkt_status_busy: RESET_LUN\n"); 18528 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18529 } 18530 if (rval == 0) { 18531 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18532 "sd_pkt_status_busy: RESET_TARGET\n"); 18533 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18534 } 18535 } 18536 if (rval == 0) { 18537 /* 18538 * If the RESET_LUN and/or RESET_TARGET failed, 18539 * try RESET_ALL 18540 */ 18541 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18542 "sd_pkt_status_busy: RESET_ALL\n"); 18543 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18544 } 18545 mutex_enter(SD_MUTEX(un)); 18546 if (rval == 0) { 18547 /* 18548 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18549 * At this point we give up & fail the command. 18550 */ 18551 sd_return_failed_command(un, bp, EIO); 18552 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18553 "sd_pkt_status_busy: exit (failed cmd)\n"); 18554 return; 18555 } 18556 } 18557 18558 /* 18559 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18560 * we have already checked the retry counts above. 18561 */ 18562 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18563 EIO, un->un_busy_timeout, NULL); 18564 18565 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18566 "sd_pkt_status_busy: exit\n"); 18567 } 18568 18569 18570 /* 18571 * Function: sd_pkt_status_reservation_conflict 18572 * 18573 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18574 * command status. 18575 * 18576 * Context: May be called from interrupt context 18577 */ 18578 18579 static void 18580 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18581 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18582 { 18583 ASSERT(un != NULL); 18584 ASSERT(mutex_owned(SD_MUTEX(un))); 18585 ASSERT(bp != NULL); 18586 ASSERT(xp != NULL); 18587 ASSERT(pktp != NULL); 18588 18589 /* 18590 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18591 * conflict could be due to various reasons like incorrect keys, not 18592 * registered or not reserved etc. So, we return EACCES to the caller. 18593 */ 18594 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18595 int cmd = SD_GET_PKT_OPCODE(pktp); 18596 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18597 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18598 sd_return_failed_command(un, bp, EACCES); 18599 return; 18600 } 18601 } 18602 18603 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18604 18605 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18606 if (sd_failfast_enable != 0) { 18607 /* By definition, we must panic here.... */ 18608 sd_panic_for_res_conflict(un); 18609 /*NOTREACHED*/ 18610 } 18611 SD_ERROR(SD_LOG_IO, un, 18612 "sd_handle_resv_conflict: Disk Reserved\n"); 18613 sd_return_failed_command(un, bp, EACCES); 18614 return; 18615 } 18616 18617 /* 18618 * 1147670: retry only if sd_retry_on_reservation_conflict 18619 * property is set (default is 1). Retries will not succeed 18620 * on a disk reserved by another initiator. HA systems 18621 * may reset this via sd.conf to avoid these retries. 18622 * 18623 * Note: The legacy return code for this failure is EIO, however EACCES 18624 * seems more appropriate for a reservation conflict. 18625 */ 18626 if (sd_retry_on_reservation_conflict == 0) { 18627 SD_ERROR(SD_LOG_IO, un, 18628 "sd_handle_resv_conflict: Device Reserved\n"); 18629 sd_return_failed_command(un, bp, EIO); 18630 return; 18631 } 18632 18633 /* 18634 * Retry the command if we can. 18635 * 18636 * Note: The legacy return code for this failure is EIO, however EACCES 18637 * seems more appropriate for a reservation conflict. 18638 */ 18639 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18640 (clock_t)2, NULL); 18641 } 18642 18643 18644 18645 /* 18646 * Function: sd_pkt_status_qfull 18647 * 18648 * Description: Handle a QUEUE FULL condition from the target. This can 18649 * occur if the HBA does not handle the queue full condition. 18650 * (Basically this means third-party HBAs as Sun HBAs will 18651 * handle the queue full condition.) Note that if there are 18652 * some commands already in the transport, then the queue full 18653 * has occurred because the queue for this nexus is actually 18654 * full. If there are no commands in the transport, then the 18655 * queue full is resulting from some other initiator or lun 18656 * consuming all the resources at the target. 18657 * 18658 * Context: May be called from interrupt context 18659 */ 18660 18661 static void 18662 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18663 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18664 { 18665 ASSERT(un != NULL); 18666 ASSERT(mutex_owned(SD_MUTEX(un))); 18667 ASSERT(bp != NULL); 18668 ASSERT(xp != NULL); 18669 ASSERT(pktp != NULL); 18670 18671 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18672 "sd_pkt_status_qfull: entry\n"); 18673 18674 /* 18675 * Just lower the QFULL throttle and retry the command. Note that 18676 * we do not limit the number of retries here. 18677 */ 18678 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18679 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18680 SD_RESTART_TIMEOUT, NULL); 18681 18682 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18683 "sd_pkt_status_qfull: exit\n"); 18684 } 18685 18686 18687 /* 18688 * Function: sd_reset_target 18689 * 18690 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18691 * RESET_TARGET, or RESET_ALL. 18692 * 18693 * Context: May be called under interrupt context. 18694 */ 18695 18696 static void 18697 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18698 { 18699 int rval = 0; 18700 18701 ASSERT(un != NULL); 18702 ASSERT(mutex_owned(SD_MUTEX(un))); 18703 ASSERT(pktp != NULL); 18704 18705 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18706 18707 /* 18708 * No need to reset if the transport layer has already done so. 18709 */ 18710 if ((pktp->pkt_statistics & 18711 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18712 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18713 "sd_reset_target: no reset\n"); 18714 return; 18715 } 18716 18717 mutex_exit(SD_MUTEX(un)); 18718 18719 if (un->un_f_allow_bus_device_reset == TRUE) { 18720 if (un->un_f_lun_reset_enabled == TRUE) { 18721 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18722 "sd_reset_target: RESET_LUN\n"); 18723 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18724 } 18725 if (rval == 0) { 18726 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18727 "sd_reset_target: RESET_TARGET\n"); 18728 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18729 } 18730 } 18731 18732 if (rval == 0) { 18733 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18734 "sd_reset_target: RESET_ALL\n"); 18735 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18736 } 18737 18738 mutex_enter(SD_MUTEX(un)); 18739 18740 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18741 } 18742 18743 /* 18744 * Function: sd_target_change_task 18745 * 18746 * Description: Handle dynamic target change 18747 * 18748 * Context: Executes in a taskq() thread context 18749 */ 18750 static void 18751 sd_target_change_task(void *arg) 18752 { 18753 struct sd_lun *un = arg; 18754 uint64_t capacity; 18755 diskaddr_t label_cap; 18756 uint_t lbasize; 18757 sd_ssc_t *ssc; 18758 18759 ASSERT(un != NULL); 18760 ASSERT(!mutex_owned(SD_MUTEX(un))); 18761 18762 if ((un->un_f_blockcount_is_valid == FALSE) || 18763 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18764 return; 18765 } 18766 18767 ssc = sd_ssc_init(un); 18768 18769 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18770 &lbasize, SD_PATH_DIRECT) != 0) { 18771 SD_ERROR(SD_LOG_ERROR, un, 18772 "sd_target_change_task: fail to read capacity\n"); 18773 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18774 goto task_exit; 18775 } 18776 18777 mutex_enter(SD_MUTEX(un)); 18778 if (capacity <= un->un_blockcount) { 18779 mutex_exit(SD_MUTEX(un)); 18780 goto task_exit; 18781 } 18782 18783 sd_update_block_info(un, lbasize, capacity); 18784 mutex_exit(SD_MUTEX(un)); 18785 18786 /* 18787 * If lun is EFI labeled and lun capacity is greater than the 18788 * capacity contained in the label, log a sys event. 18789 */ 18790 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18791 (void*)SD_PATH_DIRECT) == 0) { 18792 mutex_enter(SD_MUTEX(un)); 18793 if (un->un_f_blockcount_is_valid && 18794 un->un_blockcount > label_cap) { 18795 mutex_exit(SD_MUTEX(un)); 18796 sd_log_lun_expansion_event(un, KM_SLEEP); 18797 } else { 18798 mutex_exit(SD_MUTEX(un)); 18799 } 18800 } 18801 18802 task_exit: 18803 sd_ssc_fini(ssc); 18804 } 18805 18806 /* 18807 * Function: sd_log_lun_expansion_event 18808 * 18809 * Description: Log lun expansion sys event 18810 * 18811 * Context: Never called from interrupt context 18812 */ 18813 static void 18814 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18815 { 18816 int err; 18817 char *path; 18818 nvlist_t *dle_attr_list; 18819 18820 /* Allocate and build sysevent attribute list */ 18821 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 18822 if (err != 0) { 18823 SD_ERROR(SD_LOG_ERROR, un, 18824 "sd_log_lun_expansion_event: fail to allocate space\n"); 18825 return; 18826 } 18827 18828 path = kmem_alloc(MAXPATHLEN, km_flag); 18829 if (path == NULL) { 18830 nvlist_free(dle_attr_list); 18831 SD_ERROR(SD_LOG_ERROR, un, 18832 "sd_log_lun_expansion_event: fail to allocate space\n"); 18833 return; 18834 } 18835 /* 18836 * Add path attribute to identify the lun. 18837 * We are using minor node 'a' as the sysevent attribute. 18838 */ 18839 (void) snprintf(path, MAXPATHLEN, "/devices"); 18840 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 18841 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 18842 ":a"); 18843 18844 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 18845 if (err != 0) { 18846 nvlist_free(dle_attr_list); 18847 kmem_free(path, MAXPATHLEN); 18848 SD_ERROR(SD_LOG_ERROR, un, 18849 "sd_log_lun_expansion_event: fail to add attribute\n"); 18850 return; 18851 } 18852 18853 /* Log dynamic lun expansion sysevent */ 18854 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 18855 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 18856 if (err != DDI_SUCCESS) { 18857 SD_ERROR(SD_LOG_ERROR, un, 18858 "sd_log_lun_expansion_event: fail to log sysevent\n"); 18859 } 18860 18861 nvlist_free(dle_attr_list); 18862 kmem_free(path, MAXPATHLEN); 18863 } 18864 18865 /* 18866 * Function: sd_media_change_task 18867 * 18868 * Description: Recovery action for CDROM to become available. 18869 * 18870 * Context: Executes in a taskq() thread context 18871 */ 18872 18873 static void 18874 sd_media_change_task(void *arg) 18875 { 18876 struct scsi_pkt *pktp = arg; 18877 struct sd_lun *un; 18878 struct buf *bp; 18879 struct sd_xbuf *xp; 18880 int err = 0; 18881 int retry_count = 0; 18882 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18883 struct sd_sense_info si; 18884 18885 ASSERT(pktp != NULL); 18886 bp = (struct buf *)pktp->pkt_private; 18887 ASSERT(bp != NULL); 18888 xp = SD_GET_XBUF(bp); 18889 ASSERT(xp != NULL); 18890 un = SD_GET_UN(bp); 18891 ASSERT(un != NULL); 18892 ASSERT(!mutex_owned(SD_MUTEX(un))); 18893 ASSERT(un->un_f_monitor_media_state); 18894 18895 si.ssi_severity = SCSI_ERR_INFO; 18896 si.ssi_pfa_flag = FALSE; 18897 18898 /* 18899 * When a reset is issued on a CDROM, it takes a long time to 18900 * recover. First few attempts to read capacity and other things 18901 * related to handling unit attention fail (with a ASC 0x4 and 18902 * ASCQ 0x1). In that case we want to do enough retries and we want 18903 * to limit the retries in other cases of genuine failures like 18904 * no media in drive. 18905 */ 18906 while (retry_count++ < retry_limit) { 18907 if ((err = sd_handle_mchange(un)) == 0) { 18908 break; 18909 } 18910 if (err == EAGAIN) { 18911 retry_limit = SD_UNIT_ATTENTION_RETRY; 18912 } 18913 /* Sleep for 0.5 sec. & try again */ 18914 delay(drv_usectohz(500000)); 18915 } 18916 18917 /* 18918 * Dispatch (retry or fail) the original command here, 18919 * along with appropriate console messages.... 18920 * 18921 * Must grab the mutex before calling sd_retry_command, 18922 * sd_print_sense_msg and sd_return_failed_command. 18923 */ 18924 mutex_enter(SD_MUTEX(un)); 18925 if (err != SD_CMD_SUCCESS) { 18926 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18927 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18928 si.ssi_severity = SCSI_ERR_FATAL; 18929 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18930 sd_return_failed_command(un, bp, EIO); 18931 } else { 18932 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18933 &si, EIO, (clock_t)0, NULL); 18934 } 18935 mutex_exit(SD_MUTEX(un)); 18936 } 18937 18938 18939 18940 /* 18941 * Function: sd_handle_mchange 18942 * 18943 * Description: Perform geometry validation & other recovery when CDROM 18944 * has been removed from drive. 18945 * 18946 * Return Code: 0 for success 18947 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18948 * sd_send_scsi_READ_CAPACITY() 18949 * 18950 * Context: Executes in a taskq() thread context 18951 */ 18952 18953 static int 18954 sd_handle_mchange(struct sd_lun *un) 18955 { 18956 uint64_t capacity; 18957 uint32_t lbasize; 18958 int rval; 18959 sd_ssc_t *ssc; 18960 18961 ASSERT(!mutex_owned(SD_MUTEX(un))); 18962 ASSERT(un->un_f_monitor_media_state); 18963 18964 ssc = sd_ssc_init(un); 18965 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 18966 SD_PATH_DIRECT_PRIORITY); 18967 18968 if (rval != 0) 18969 goto failed; 18970 18971 mutex_enter(SD_MUTEX(un)); 18972 sd_update_block_info(un, lbasize, capacity); 18973 18974 if (un->un_errstats != NULL) { 18975 struct sd_errstats *stp = 18976 (struct sd_errstats *)un->un_errstats->ks_data; 18977 stp->sd_capacity.value.ui64 = (uint64_t) 18978 ((uint64_t)un->un_blockcount * 18979 (uint64_t)un->un_tgt_blocksize); 18980 } 18981 18982 /* 18983 * Check if the media in the device is writable or not 18984 */ 18985 if (ISCD(un)) { 18986 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 18987 } 18988 18989 /* 18990 * Note: Maybe let the strategy/partitioning chain worry about getting 18991 * valid geometry. 18992 */ 18993 mutex_exit(SD_MUTEX(un)); 18994 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 18995 18996 18997 if (cmlb_validate(un->un_cmlbhandle, 0, 18998 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 18999 sd_ssc_fini(ssc); 19000 return (EIO); 19001 } else { 19002 if (un->un_f_pkstats_enabled) { 19003 sd_set_pstats(un); 19004 SD_TRACE(SD_LOG_IO_PARTITION, un, 19005 "sd_handle_mchange: un:0x%p pstats created and " 19006 "set\n", un); 19007 } 19008 } 19009 19010 /* 19011 * Try to lock the door 19012 */ 19013 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19014 SD_PATH_DIRECT_PRIORITY); 19015 failed: 19016 if (rval != 0) 19017 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19018 sd_ssc_fini(ssc); 19019 return (rval); 19020 } 19021 19022 19023 /* 19024 * Function: sd_send_scsi_DOORLOCK 19025 * 19026 * Description: Issue the scsi DOOR LOCK command 19027 * 19028 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19029 * structure for this target. 19030 * flag - SD_REMOVAL_ALLOW 19031 * SD_REMOVAL_PREVENT 19032 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19033 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19034 * to use the USCSI "direct" chain and bypass the normal 19035 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19036 * command is issued as part of an error recovery action. 19037 * 19038 * Return Code: 0 - Success 19039 * errno return code from sd_ssc_send() 19040 * 19041 * Context: Can sleep. 19042 */ 19043 19044 static int 19045 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19046 { 19047 struct scsi_extended_sense sense_buf; 19048 union scsi_cdb cdb; 19049 struct uscsi_cmd ucmd_buf; 19050 int status; 19051 struct sd_lun *un; 19052 19053 ASSERT(ssc != NULL); 19054 un = ssc->ssc_un; 19055 ASSERT(un != NULL); 19056 ASSERT(!mutex_owned(SD_MUTEX(un))); 19057 19058 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19059 19060 /* already determined doorlock is not supported, fake success */ 19061 if (un->un_f_doorlock_supported == FALSE) { 19062 return (0); 19063 } 19064 19065 /* 19066 * If we are ejecting and see an SD_REMOVAL_PREVENT 19067 * ignore the command so we can complete the eject 19068 * operation. 19069 */ 19070 if (flag == SD_REMOVAL_PREVENT) { 19071 mutex_enter(SD_MUTEX(un)); 19072 if (un->un_f_ejecting == TRUE) { 19073 mutex_exit(SD_MUTEX(un)); 19074 return (EAGAIN); 19075 } 19076 mutex_exit(SD_MUTEX(un)); 19077 } 19078 19079 bzero(&cdb, sizeof (cdb)); 19080 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19081 19082 cdb.scc_cmd = SCMD_DOORLOCK; 19083 cdb.cdb_opaque[4] = (uchar_t)flag; 19084 19085 ucmd_buf.uscsi_cdb = (char *)&cdb; 19086 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19087 ucmd_buf.uscsi_bufaddr = NULL; 19088 ucmd_buf.uscsi_buflen = 0; 19089 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19090 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19091 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19092 ucmd_buf.uscsi_timeout = 15; 19093 19094 SD_TRACE(SD_LOG_IO, un, 19095 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19096 19097 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19098 UIO_SYSSPACE, path_flag); 19099 19100 if (status == 0) 19101 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19102 19103 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19104 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19105 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19106 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19107 19108 /* fake success and skip subsequent doorlock commands */ 19109 un->un_f_doorlock_supported = FALSE; 19110 return (0); 19111 } 19112 19113 return (status); 19114 } 19115 19116 /* 19117 * Function: sd_send_scsi_READ_CAPACITY 19118 * 19119 * Description: This routine uses the scsi READ CAPACITY command to determine 19120 * the device capacity in number of blocks and the device native 19121 * block size. If this function returns a failure, then the 19122 * values in *capp and *lbap are undefined. If the capacity 19123 * returned is 0xffffffff then the lun is too large for a 19124 * normal READ CAPACITY command and the results of a 19125 * READ CAPACITY 16 will be used instead. 19126 * 19127 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19128 * capp - ptr to unsigned 64-bit variable to receive the 19129 * capacity value from the command. 19130 * lbap - ptr to unsigned 32-bit varaible to receive the 19131 * block size value from the command 19132 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19133 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19134 * to use the USCSI "direct" chain and bypass the normal 19135 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19136 * command is issued as part of an error recovery action. 19137 * 19138 * Return Code: 0 - Success 19139 * EIO - IO error 19140 * EACCES - Reservation conflict detected 19141 * EAGAIN - Device is becoming ready 19142 * errno return code from sd_ssc_send() 19143 * 19144 * Context: Can sleep. Blocks until command completes. 19145 */ 19146 19147 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19148 19149 static int 19150 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19151 int path_flag) 19152 { 19153 struct scsi_extended_sense sense_buf; 19154 struct uscsi_cmd ucmd_buf; 19155 union scsi_cdb cdb; 19156 uint32_t *capacity_buf; 19157 uint64_t capacity; 19158 uint32_t lbasize; 19159 int status; 19160 struct sd_lun *un; 19161 19162 ASSERT(ssc != NULL); 19163 19164 un = ssc->ssc_un; 19165 ASSERT(un != NULL); 19166 ASSERT(!mutex_owned(SD_MUTEX(un))); 19167 ASSERT(capp != NULL); 19168 ASSERT(lbap != NULL); 19169 19170 SD_TRACE(SD_LOG_IO, un, 19171 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19172 19173 /* 19174 * First send a READ_CAPACITY command to the target. 19175 * (This command is mandatory under SCSI-2.) 19176 * 19177 * Set up the CDB for the READ_CAPACITY command. The Partial 19178 * Medium Indicator bit is cleared. The address field must be 19179 * zero if the PMI bit is zero. 19180 */ 19181 bzero(&cdb, sizeof (cdb)); 19182 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19183 19184 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19185 19186 cdb.scc_cmd = SCMD_READ_CAPACITY; 19187 19188 ucmd_buf.uscsi_cdb = (char *)&cdb; 19189 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19190 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19191 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19192 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19193 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19194 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19195 ucmd_buf.uscsi_timeout = 60; 19196 19197 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19198 UIO_SYSSPACE, path_flag); 19199 19200 switch (status) { 19201 case 0: 19202 /* Return failure if we did not get valid capacity data. */ 19203 if (ucmd_buf.uscsi_resid != 0) { 19204 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19205 "sd_send_scsi_READ_CAPACITY received " 19206 "invalid capacity data"); 19207 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19208 return (EIO); 19209 } 19210 19211 /* 19212 * Read capacity and block size from the READ CAPACITY 10 data. 19213 * This data may be adjusted later due to device specific 19214 * issues. 19215 * 19216 * According to the SCSI spec, the READ CAPACITY 10 19217 * command returns the following: 19218 * 19219 * bytes 0-3: Maximum logical block address available. 19220 * (MSB in byte:0 & LSB in byte:3) 19221 * 19222 * bytes 4-7: Block length in bytes 19223 * (MSB in byte:4 & LSB in byte:7) 19224 * 19225 */ 19226 capacity = BE_32(capacity_buf[0]); 19227 lbasize = BE_32(capacity_buf[1]); 19228 19229 /* 19230 * Done with capacity_buf 19231 */ 19232 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19233 19234 /* 19235 * if the reported capacity is set to all 0xf's, then 19236 * this disk is too large and requires SBC-2 commands. 19237 * Reissue the request using READ CAPACITY 16. 19238 */ 19239 if (capacity == 0xffffffff) { 19240 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19241 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19242 &lbasize, path_flag); 19243 if (status != 0) { 19244 return (status); 19245 } 19246 } 19247 break; /* Success! */ 19248 case EIO: 19249 switch (ucmd_buf.uscsi_status) { 19250 case STATUS_RESERVATION_CONFLICT: 19251 status = EACCES; 19252 break; 19253 case STATUS_CHECK: 19254 /* 19255 * Check condition; look for ASC/ASCQ of 0x04/0x01 19256 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19257 */ 19258 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19259 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19260 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19261 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19262 return (EAGAIN); 19263 } 19264 break; 19265 default: 19266 break; 19267 } 19268 /* FALLTHRU */ 19269 default: 19270 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19271 return (status); 19272 } 19273 19274 /* 19275 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19276 * (2352 and 0 are common) so for these devices always force the value 19277 * to 2048 as required by the ATAPI specs. 19278 */ 19279 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19280 lbasize = 2048; 19281 } 19282 19283 /* 19284 * Get the maximum LBA value from the READ CAPACITY data. 19285 * Here we assume that the Partial Medium Indicator (PMI) bit 19286 * was cleared when issuing the command. This means that the LBA 19287 * returned from the device is the LBA of the last logical block 19288 * on the logical unit. The actual logical block count will be 19289 * this value plus one. 19290 * 19291 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19292 * so scale the capacity value to reflect this. 19293 */ 19294 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19295 19296 /* 19297 * Copy the values from the READ CAPACITY command into the space 19298 * provided by the caller. 19299 */ 19300 *capp = capacity; 19301 *lbap = lbasize; 19302 19303 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19304 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19305 19306 /* 19307 * Both the lbasize and capacity from the device must be nonzero, 19308 * otherwise we assume that the values are not valid and return 19309 * failure to the caller. (4203735) 19310 */ 19311 if ((capacity == 0) || (lbasize == 0)) { 19312 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19313 "sd_send_scsi_READ_CAPACITY received invalid value " 19314 "capacity %llu lbasize %d", capacity, lbasize); 19315 return (EIO); 19316 } 19317 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19318 return (0); 19319 } 19320 19321 /* 19322 * Function: sd_send_scsi_READ_CAPACITY_16 19323 * 19324 * Description: This routine uses the scsi READ CAPACITY 16 command to 19325 * determine the device capacity in number of blocks and the 19326 * device native block size. If this function returns a failure, 19327 * then the values in *capp and *lbap are undefined. 19328 * This routine should always be called by 19329 * sd_send_scsi_READ_CAPACITY which will appy any device 19330 * specific adjustments to capacity and lbasize. 19331 * 19332 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19333 * capp - ptr to unsigned 64-bit variable to receive the 19334 * capacity value from the command. 19335 * lbap - ptr to unsigned 32-bit varaible to receive the 19336 * block size value from the command 19337 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19338 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19339 * to use the USCSI "direct" chain and bypass the normal 19340 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19341 * this command is issued as part of an error recovery 19342 * action. 19343 * 19344 * Return Code: 0 - Success 19345 * EIO - IO error 19346 * EACCES - Reservation conflict detected 19347 * EAGAIN - Device is becoming ready 19348 * errno return code from sd_ssc_send() 19349 * 19350 * Context: Can sleep. Blocks until command completes. 19351 */ 19352 19353 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19354 19355 static int 19356 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19357 uint32_t *lbap, int path_flag) 19358 { 19359 struct scsi_extended_sense sense_buf; 19360 struct uscsi_cmd ucmd_buf; 19361 union scsi_cdb cdb; 19362 uint64_t *capacity16_buf; 19363 uint64_t capacity; 19364 uint32_t lbasize; 19365 int status; 19366 struct sd_lun *un; 19367 19368 ASSERT(ssc != NULL); 19369 19370 un = ssc->ssc_un; 19371 ASSERT(un != NULL); 19372 ASSERT(!mutex_owned(SD_MUTEX(un))); 19373 ASSERT(capp != NULL); 19374 ASSERT(lbap != NULL); 19375 19376 SD_TRACE(SD_LOG_IO, un, 19377 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19378 19379 /* 19380 * First send a READ_CAPACITY_16 command to the target. 19381 * 19382 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19383 * Medium Indicator bit is cleared. The address field must be 19384 * zero if the PMI bit is zero. 19385 */ 19386 bzero(&cdb, sizeof (cdb)); 19387 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19388 19389 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19390 19391 ucmd_buf.uscsi_cdb = (char *)&cdb; 19392 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19393 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19394 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19395 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19396 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19397 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19398 ucmd_buf.uscsi_timeout = 60; 19399 19400 /* 19401 * Read Capacity (16) is a Service Action In command. One 19402 * command byte (0x9E) is overloaded for multiple operations, 19403 * with the second CDB byte specifying the desired operation 19404 */ 19405 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19406 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19407 19408 /* 19409 * Fill in allocation length field 19410 */ 19411 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19412 19413 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19414 UIO_SYSSPACE, path_flag); 19415 19416 switch (status) { 19417 case 0: 19418 /* Return failure if we did not get valid capacity data. */ 19419 if (ucmd_buf.uscsi_resid > 20) { 19420 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19421 "sd_send_scsi_READ_CAPACITY_16 received " 19422 "invalid capacity data"); 19423 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19424 return (EIO); 19425 } 19426 19427 /* 19428 * Read capacity and block size from the READ CAPACITY 10 data. 19429 * This data may be adjusted later due to device specific 19430 * issues. 19431 * 19432 * According to the SCSI spec, the READ CAPACITY 10 19433 * command returns the following: 19434 * 19435 * bytes 0-7: Maximum logical block address available. 19436 * (MSB in byte:0 & LSB in byte:7) 19437 * 19438 * bytes 8-11: Block length in bytes 19439 * (MSB in byte:8 & LSB in byte:11) 19440 * 19441 */ 19442 capacity = BE_64(capacity16_buf[0]); 19443 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19444 19445 /* 19446 * Done with capacity16_buf 19447 */ 19448 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19449 19450 /* 19451 * if the reported capacity is set to all 0xf's, then 19452 * this disk is too large. This could only happen with 19453 * a device that supports LBAs larger than 64 bits which 19454 * are not defined by any current T10 standards. 19455 */ 19456 if (capacity == 0xffffffffffffffff) { 19457 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19458 "disk is too large"); 19459 return (EIO); 19460 } 19461 break; /* Success! */ 19462 case EIO: 19463 switch (ucmd_buf.uscsi_status) { 19464 case STATUS_RESERVATION_CONFLICT: 19465 status = EACCES; 19466 break; 19467 case STATUS_CHECK: 19468 /* 19469 * Check condition; look for ASC/ASCQ of 0x04/0x01 19470 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19471 */ 19472 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19473 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19474 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19475 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19476 return (EAGAIN); 19477 } 19478 break; 19479 default: 19480 break; 19481 } 19482 /* FALLTHRU */ 19483 default: 19484 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19485 return (status); 19486 } 19487 19488 *capp = capacity; 19489 *lbap = lbasize; 19490 19491 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19492 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19493 19494 return (0); 19495 } 19496 19497 19498 /* 19499 * Function: sd_send_scsi_START_STOP_UNIT 19500 * 19501 * Description: Issue a scsi START STOP UNIT command to the target. 19502 * 19503 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19504 * structure for this target. 19505 * flag - SD_TARGET_START 19506 * SD_TARGET_STOP 19507 * SD_TARGET_EJECT 19508 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19509 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19510 * to use the USCSI "direct" chain and bypass the normal 19511 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19512 * command is issued as part of an error recovery action. 19513 * 19514 * Return Code: 0 - Success 19515 * EIO - IO error 19516 * EACCES - Reservation conflict detected 19517 * ENXIO - Not Ready, medium not present 19518 * errno return code from sd_ssc_send() 19519 * 19520 * Context: Can sleep. 19521 */ 19522 19523 static int 19524 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19525 { 19526 struct scsi_extended_sense sense_buf; 19527 union scsi_cdb cdb; 19528 struct uscsi_cmd ucmd_buf; 19529 int status; 19530 struct sd_lun *un; 19531 19532 ASSERT(ssc != NULL); 19533 un = ssc->ssc_un; 19534 ASSERT(un != NULL); 19535 ASSERT(!mutex_owned(SD_MUTEX(un))); 19536 19537 SD_TRACE(SD_LOG_IO, un, 19538 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19539 19540 if (un->un_f_check_start_stop && 19541 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19542 (un->un_f_start_stop_supported != TRUE)) { 19543 return (0); 19544 } 19545 19546 /* 19547 * If we are performing an eject operation and 19548 * we receive any command other than SD_TARGET_EJECT 19549 * we should immediately return. 19550 */ 19551 if (flag != SD_TARGET_EJECT) { 19552 mutex_enter(SD_MUTEX(un)); 19553 if (un->un_f_ejecting == TRUE) { 19554 mutex_exit(SD_MUTEX(un)); 19555 return (EAGAIN); 19556 } 19557 mutex_exit(SD_MUTEX(un)); 19558 } 19559 19560 bzero(&cdb, sizeof (cdb)); 19561 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19562 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19563 19564 cdb.scc_cmd = SCMD_START_STOP; 19565 cdb.cdb_opaque[4] = (uchar_t)flag; 19566 19567 ucmd_buf.uscsi_cdb = (char *)&cdb; 19568 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19569 ucmd_buf.uscsi_bufaddr = NULL; 19570 ucmd_buf.uscsi_buflen = 0; 19571 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19572 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19573 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19574 ucmd_buf.uscsi_timeout = 200; 19575 19576 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19577 UIO_SYSSPACE, path_flag); 19578 19579 switch (status) { 19580 case 0: 19581 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19582 break; /* Success! */ 19583 case EIO: 19584 switch (ucmd_buf.uscsi_status) { 19585 case STATUS_RESERVATION_CONFLICT: 19586 status = EACCES; 19587 break; 19588 case STATUS_CHECK: 19589 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19590 switch (scsi_sense_key( 19591 (uint8_t *)&sense_buf)) { 19592 case KEY_ILLEGAL_REQUEST: 19593 status = ENOTSUP; 19594 break; 19595 case KEY_NOT_READY: 19596 if (scsi_sense_asc( 19597 (uint8_t *)&sense_buf) 19598 == 0x3A) { 19599 status = ENXIO; 19600 } 19601 break; 19602 default: 19603 break; 19604 } 19605 } 19606 break; 19607 default: 19608 break; 19609 } 19610 break; 19611 default: 19612 break; 19613 } 19614 19615 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19616 19617 return (status); 19618 } 19619 19620 19621 /* 19622 * Function: sd_start_stop_unit_callback 19623 * 19624 * Description: timeout(9F) callback to begin recovery process for a 19625 * device that has spun down. 19626 * 19627 * Arguments: arg - pointer to associated softstate struct. 19628 * 19629 * Context: Executes in a timeout(9F) thread context 19630 */ 19631 19632 static void 19633 sd_start_stop_unit_callback(void *arg) 19634 { 19635 struct sd_lun *un = arg; 19636 ASSERT(un != NULL); 19637 ASSERT(!mutex_owned(SD_MUTEX(un))); 19638 19639 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19640 19641 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19642 } 19643 19644 19645 /* 19646 * Function: sd_start_stop_unit_task 19647 * 19648 * Description: Recovery procedure when a drive is spun down. 19649 * 19650 * Arguments: arg - pointer to associated softstate struct. 19651 * 19652 * Context: Executes in a taskq() thread context 19653 */ 19654 19655 static void 19656 sd_start_stop_unit_task(void *arg) 19657 { 19658 struct sd_lun *un = arg; 19659 sd_ssc_t *ssc; 19660 int rval; 19661 19662 ASSERT(un != NULL); 19663 ASSERT(!mutex_owned(SD_MUTEX(un))); 19664 19665 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19666 19667 /* 19668 * Some unformatted drives report not ready error, no need to 19669 * restart if format has been initiated. 19670 */ 19671 mutex_enter(SD_MUTEX(un)); 19672 if (un->un_f_format_in_progress == TRUE) { 19673 mutex_exit(SD_MUTEX(un)); 19674 return; 19675 } 19676 mutex_exit(SD_MUTEX(un)); 19677 19678 /* 19679 * When a START STOP command is issued from here, it is part of a 19680 * failure recovery operation and must be issued before any other 19681 * commands, including any pending retries. Thus it must be sent 19682 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19683 * succeeds or not, we will start I/O after the attempt. 19684 */ 19685 ssc = sd_ssc_init(un); 19686 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19687 SD_PATH_DIRECT_PRIORITY); 19688 if (rval != 0) 19689 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19690 sd_ssc_fini(ssc); 19691 /* 19692 * The above call blocks until the START_STOP_UNIT command completes. 19693 * Now that it has completed, we must re-try the original IO that 19694 * received the NOT READY condition in the first place. There are 19695 * three possible conditions here: 19696 * 19697 * (1) The original IO is on un_retry_bp. 19698 * (2) The original IO is on the regular wait queue, and un_retry_bp 19699 * is NULL. 19700 * (3) The original IO is on the regular wait queue, and un_retry_bp 19701 * points to some other, unrelated bp. 19702 * 19703 * For each case, we must call sd_start_cmds() with un_retry_bp 19704 * as the argument. If un_retry_bp is NULL, this will initiate 19705 * processing of the regular wait queue. If un_retry_bp is not NULL, 19706 * then this will process the bp on un_retry_bp. That may or may not 19707 * be the original IO, but that does not matter: the important thing 19708 * is to keep the IO processing going at this point. 19709 * 19710 * Note: This is a very specific error recovery sequence associated 19711 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19712 * serialize the I/O with completion of the spin-up. 19713 */ 19714 mutex_enter(SD_MUTEX(un)); 19715 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19716 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19717 un, un->un_retry_bp); 19718 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19719 sd_start_cmds(un, un->un_retry_bp); 19720 mutex_exit(SD_MUTEX(un)); 19721 19722 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19723 } 19724 19725 19726 /* 19727 * Function: sd_send_scsi_INQUIRY 19728 * 19729 * Description: Issue the scsi INQUIRY command. 19730 * 19731 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19732 * structure for this target. 19733 * bufaddr 19734 * buflen 19735 * evpd 19736 * page_code 19737 * page_length 19738 * 19739 * Return Code: 0 - Success 19740 * errno return code from sd_ssc_send() 19741 * 19742 * Context: Can sleep. Does not return until command is completed. 19743 */ 19744 19745 static int 19746 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19747 uchar_t evpd, uchar_t page_code, size_t *residp) 19748 { 19749 union scsi_cdb cdb; 19750 struct uscsi_cmd ucmd_buf; 19751 int status; 19752 struct sd_lun *un; 19753 19754 ASSERT(ssc != NULL); 19755 un = ssc->ssc_un; 19756 ASSERT(un != NULL); 19757 ASSERT(!mutex_owned(SD_MUTEX(un))); 19758 ASSERT(bufaddr != NULL); 19759 19760 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19761 19762 bzero(&cdb, sizeof (cdb)); 19763 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19764 bzero(bufaddr, buflen); 19765 19766 cdb.scc_cmd = SCMD_INQUIRY; 19767 cdb.cdb_opaque[1] = evpd; 19768 cdb.cdb_opaque[2] = page_code; 19769 FORMG0COUNT(&cdb, buflen); 19770 19771 ucmd_buf.uscsi_cdb = (char *)&cdb; 19772 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19773 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19774 ucmd_buf.uscsi_buflen = buflen; 19775 ucmd_buf.uscsi_rqbuf = NULL; 19776 ucmd_buf.uscsi_rqlen = 0; 19777 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19778 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19779 19780 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19781 UIO_SYSSPACE, SD_PATH_DIRECT); 19782 19783 /* 19784 * Only handle status == 0, the upper-level caller 19785 * will put different assessment based on the context. 19786 */ 19787 if (status == 0) 19788 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19789 19790 if ((status == 0) && (residp != NULL)) { 19791 *residp = ucmd_buf.uscsi_resid; 19792 } 19793 19794 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19795 19796 return (status); 19797 } 19798 19799 19800 /* 19801 * Function: sd_send_scsi_TEST_UNIT_READY 19802 * 19803 * Description: Issue the scsi TEST UNIT READY command. 19804 * This routine can be told to set the flag USCSI_DIAGNOSE to 19805 * prevent retrying failed commands. Use this when the intent 19806 * is either to check for device readiness, to clear a Unit 19807 * Attention, or to clear any outstanding sense data. 19808 * However under specific conditions the expected behavior 19809 * is for retries to bring a device ready, so use the flag 19810 * with caution. 19811 * 19812 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19813 * structure for this target. 19814 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19815 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19816 * 0: dont check for media present, do retries on cmd. 19817 * 19818 * Return Code: 0 - Success 19819 * EIO - IO error 19820 * EACCES - Reservation conflict detected 19821 * ENXIO - Not Ready, medium not present 19822 * errno return code from sd_ssc_send() 19823 * 19824 * Context: Can sleep. Does not return until command is completed. 19825 */ 19826 19827 static int 19828 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 19829 { 19830 struct scsi_extended_sense sense_buf; 19831 union scsi_cdb cdb; 19832 struct uscsi_cmd ucmd_buf; 19833 int status; 19834 struct sd_lun *un; 19835 19836 ASSERT(ssc != NULL); 19837 un = ssc->ssc_un; 19838 ASSERT(un != NULL); 19839 ASSERT(!mutex_owned(SD_MUTEX(un))); 19840 19841 SD_TRACE(SD_LOG_IO, un, 19842 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19843 19844 /* 19845 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19846 * timeouts when they receive a TUR and the queue is not empty. Check 19847 * the configuration flag set during attach (indicating the drive has 19848 * this firmware bug) and un_ncmds_in_transport before issuing the 19849 * TUR. If there are 19850 * pending commands return success, this is a bit arbitrary but is ok 19851 * for non-removables (i.e. the eliteI disks) and non-clustering 19852 * configurations. 19853 */ 19854 if (un->un_f_cfg_tur_check == TRUE) { 19855 mutex_enter(SD_MUTEX(un)); 19856 if (un->un_ncmds_in_transport != 0) { 19857 mutex_exit(SD_MUTEX(un)); 19858 return (0); 19859 } 19860 mutex_exit(SD_MUTEX(un)); 19861 } 19862 19863 bzero(&cdb, sizeof (cdb)); 19864 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19865 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19866 19867 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19868 19869 ucmd_buf.uscsi_cdb = (char *)&cdb; 19870 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19871 ucmd_buf.uscsi_bufaddr = NULL; 19872 ucmd_buf.uscsi_buflen = 0; 19873 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19874 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19875 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19876 19877 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19878 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19879 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19880 } 19881 ucmd_buf.uscsi_timeout = 60; 19882 19883 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19884 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 19885 SD_PATH_STANDARD)); 19886 19887 switch (status) { 19888 case 0: 19889 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19890 break; /* Success! */ 19891 case EIO: 19892 switch (ucmd_buf.uscsi_status) { 19893 case STATUS_RESERVATION_CONFLICT: 19894 status = EACCES; 19895 break; 19896 case STATUS_CHECK: 19897 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19898 break; 19899 } 19900 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19901 (scsi_sense_key((uint8_t *)&sense_buf) == 19902 KEY_NOT_READY) && 19903 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 19904 status = ENXIO; 19905 } 19906 break; 19907 default: 19908 break; 19909 } 19910 break; 19911 default: 19912 break; 19913 } 19914 19915 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19916 19917 return (status); 19918 } 19919 19920 /* 19921 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19922 * 19923 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19924 * 19925 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19926 * structure for this target. 19927 * 19928 * Return Code: 0 - Success 19929 * EACCES 19930 * ENOTSUP 19931 * errno return code from sd_ssc_send() 19932 * 19933 * Context: Can sleep. Does not return until command is completed. 19934 */ 19935 19936 static int 19937 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 19938 uint16_t data_len, uchar_t *data_bufp) 19939 { 19940 struct scsi_extended_sense sense_buf; 19941 union scsi_cdb cdb; 19942 struct uscsi_cmd ucmd_buf; 19943 int status; 19944 int no_caller_buf = FALSE; 19945 struct sd_lun *un; 19946 19947 ASSERT(ssc != NULL); 19948 un = ssc->ssc_un; 19949 ASSERT(un != NULL); 19950 ASSERT(!mutex_owned(SD_MUTEX(un))); 19951 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19952 19953 SD_TRACE(SD_LOG_IO, un, 19954 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19955 19956 bzero(&cdb, sizeof (cdb)); 19957 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19958 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19959 if (data_bufp == NULL) { 19960 /* Allocate a default buf if the caller did not give one */ 19961 ASSERT(data_len == 0); 19962 data_len = MHIOC_RESV_KEY_SIZE; 19963 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19964 no_caller_buf = TRUE; 19965 } 19966 19967 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19968 cdb.cdb_opaque[1] = usr_cmd; 19969 FORMG1COUNT(&cdb, data_len); 19970 19971 ucmd_buf.uscsi_cdb = (char *)&cdb; 19972 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19973 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19974 ucmd_buf.uscsi_buflen = data_len; 19975 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19976 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19977 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19978 ucmd_buf.uscsi_timeout = 60; 19979 19980 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19981 UIO_SYSSPACE, SD_PATH_STANDARD); 19982 19983 switch (status) { 19984 case 0: 19985 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19986 19987 break; /* Success! */ 19988 case EIO: 19989 switch (ucmd_buf.uscsi_status) { 19990 case STATUS_RESERVATION_CONFLICT: 19991 status = EACCES; 19992 break; 19993 case STATUS_CHECK: 19994 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19995 (scsi_sense_key((uint8_t *)&sense_buf) == 19996 KEY_ILLEGAL_REQUEST)) { 19997 status = ENOTSUP; 19998 } 19999 break; 20000 default: 20001 break; 20002 } 20003 break; 20004 default: 20005 break; 20006 } 20007 20008 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20009 20010 if (no_caller_buf == TRUE) { 20011 kmem_free(data_bufp, data_len); 20012 } 20013 20014 return (status); 20015 } 20016 20017 20018 /* 20019 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20020 * 20021 * Description: This routine is the driver entry point for handling CD-ROM 20022 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20023 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20024 * device. 20025 * 20026 * Arguments: ssc - ssc contains un - pointer to soft state struct 20027 * for the target. 20028 * usr_cmd SCSI-3 reservation facility command (one of 20029 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20030 * SD_SCSI3_PREEMPTANDABORT) 20031 * usr_bufp - user provided pointer register, reserve descriptor or 20032 * preempt and abort structure (mhioc_register_t, 20033 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20034 * 20035 * Return Code: 0 - Success 20036 * EACCES 20037 * ENOTSUP 20038 * errno return code from sd_ssc_send() 20039 * 20040 * Context: Can sleep. Does not return until command is completed. 20041 */ 20042 20043 static int 20044 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20045 uchar_t *usr_bufp) 20046 { 20047 struct scsi_extended_sense sense_buf; 20048 union scsi_cdb cdb; 20049 struct uscsi_cmd ucmd_buf; 20050 int status; 20051 uchar_t data_len = sizeof (sd_prout_t); 20052 sd_prout_t *prp; 20053 struct sd_lun *un; 20054 20055 ASSERT(ssc != NULL); 20056 un = ssc->ssc_un; 20057 ASSERT(un != NULL); 20058 ASSERT(!mutex_owned(SD_MUTEX(un))); 20059 ASSERT(data_len == 24); /* required by scsi spec */ 20060 20061 SD_TRACE(SD_LOG_IO, un, 20062 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20063 20064 if (usr_bufp == NULL) { 20065 return (EINVAL); 20066 } 20067 20068 bzero(&cdb, sizeof (cdb)); 20069 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20070 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20071 prp = kmem_zalloc(data_len, KM_SLEEP); 20072 20073 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20074 cdb.cdb_opaque[1] = usr_cmd; 20075 FORMG1COUNT(&cdb, data_len); 20076 20077 ucmd_buf.uscsi_cdb = (char *)&cdb; 20078 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20079 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20080 ucmd_buf.uscsi_buflen = data_len; 20081 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20082 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20083 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20084 ucmd_buf.uscsi_timeout = 60; 20085 20086 switch (usr_cmd) { 20087 case SD_SCSI3_REGISTER: { 20088 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20089 20090 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20091 bcopy(ptr->newkey.key, prp->service_key, 20092 MHIOC_RESV_KEY_SIZE); 20093 prp->aptpl = ptr->aptpl; 20094 break; 20095 } 20096 case SD_SCSI3_RESERVE: 20097 case SD_SCSI3_RELEASE: { 20098 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20099 20100 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20101 prp->scope_address = BE_32(ptr->scope_specific_addr); 20102 cdb.cdb_opaque[2] = ptr->type; 20103 break; 20104 } 20105 case SD_SCSI3_PREEMPTANDABORT: { 20106 mhioc_preemptandabort_t *ptr = 20107 (mhioc_preemptandabort_t *)usr_bufp; 20108 20109 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20110 bcopy(ptr->victim_key.key, prp->service_key, 20111 MHIOC_RESV_KEY_SIZE); 20112 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20113 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20114 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20115 break; 20116 } 20117 case SD_SCSI3_REGISTERANDIGNOREKEY: 20118 { 20119 mhioc_registerandignorekey_t *ptr; 20120 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20121 bcopy(ptr->newkey.key, 20122 prp->service_key, MHIOC_RESV_KEY_SIZE); 20123 prp->aptpl = ptr->aptpl; 20124 break; 20125 } 20126 default: 20127 ASSERT(FALSE); 20128 break; 20129 } 20130 20131 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20132 UIO_SYSSPACE, SD_PATH_STANDARD); 20133 20134 switch (status) { 20135 case 0: 20136 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20137 break; /* Success! */ 20138 case EIO: 20139 switch (ucmd_buf.uscsi_status) { 20140 case STATUS_RESERVATION_CONFLICT: 20141 status = EACCES; 20142 break; 20143 case STATUS_CHECK: 20144 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20145 (scsi_sense_key((uint8_t *)&sense_buf) == 20146 KEY_ILLEGAL_REQUEST)) { 20147 status = ENOTSUP; 20148 } 20149 break; 20150 default: 20151 break; 20152 } 20153 break; 20154 default: 20155 break; 20156 } 20157 20158 kmem_free(prp, data_len); 20159 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20160 return (status); 20161 } 20162 20163 20164 /* 20165 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20166 * 20167 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20168 * 20169 * Arguments: un - pointer to the target's soft state struct 20170 * dkc - pointer to the callback structure 20171 * 20172 * Return Code: 0 - success 20173 * errno-type error code 20174 * 20175 * Context: kernel thread context only. 20176 * 20177 * _______________________________________________________________ 20178 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20179 * |FLUSH_VOLATILE| | operation | 20180 * |______________|______________|_________________________________| 20181 * | 0 | NULL | Synchronous flush on both | 20182 * | | | volatile and non-volatile cache | 20183 * |______________|______________|_________________________________| 20184 * | 1 | NULL | Synchronous flush on volatile | 20185 * | | | cache; disk drivers may suppress| 20186 * | | | flush if disk table indicates | 20187 * | | | non-volatile cache | 20188 * |______________|______________|_________________________________| 20189 * | 0 | !NULL | Asynchronous flush on both | 20190 * | | | volatile and non-volatile cache;| 20191 * |______________|______________|_________________________________| 20192 * | 1 | !NULL | Asynchronous flush on volatile | 20193 * | | | cache; disk drivers may suppress| 20194 * | | | flush if disk table indicates | 20195 * | | | non-volatile cache | 20196 * |______________|______________|_________________________________| 20197 * 20198 */ 20199 20200 static int 20201 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20202 { 20203 struct sd_uscsi_info *uip; 20204 struct uscsi_cmd *uscmd; 20205 union scsi_cdb *cdb; 20206 struct buf *bp; 20207 int rval = 0; 20208 int is_async; 20209 20210 SD_TRACE(SD_LOG_IO, un, 20211 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20212 20213 ASSERT(un != NULL); 20214 ASSERT(!mutex_owned(SD_MUTEX(un))); 20215 20216 if (dkc == NULL || dkc->dkc_callback == NULL) { 20217 is_async = FALSE; 20218 } else { 20219 is_async = TRUE; 20220 } 20221 20222 mutex_enter(SD_MUTEX(un)); 20223 /* check whether cache flush should be suppressed */ 20224 if (un->un_f_suppress_cache_flush == TRUE) { 20225 mutex_exit(SD_MUTEX(un)); 20226 /* 20227 * suppress the cache flush if the device is told to do 20228 * so by sd.conf or disk table 20229 */ 20230 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20231 skip the cache flush since suppress_cache_flush is %d!\n", 20232 un->un_f_suppress_cache_flush); 20233 20234 if (is_async == TRUE) { 20235 /* invoke callback for asynchronous flush */ 20236 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20237 } 20238 return (rval); 20239 } 20240 mutex_exit(SD_MUTEX(un)); 20241 20242 /* 20243 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20244 * set properly 20245 */ 20246 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20247 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20248 20249 mutex_enter(SD_MUTEX(un)); 20250 if (dkc != NULL && un->un_f_sync_nv_supported && 20251 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20252 /* 20253 * if the device supports SYNC_NV bit, turn on 20254 * the SYNC_NV bit to only flush volatile cache 20255 */ 20256 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20257 } 20258 mutex_exit(SD_MUTEX(un)); 20259 20260 /* 20261 * First get some memory for the uscsi_cmd struct and cdb 20262 * and initialize for SYNCHRONIZE_CACHE cmd. 20263 */ 20264 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20265 uscmd->uscsi_cdblen = CDB_GROUP1; 20266 uscmd->uscsi_cdb = (caddr_t)cdb; 20267 uscmd->uscsi_bufaddr = NULL; 20268 uscmd->uscsi_buflen = 0; 20269 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20270 uscmd->uscsi_rqlen = SENSE_LENGTH; 20271 uscmd->uscsi_rqresid = SENSE_LENGTH; 20272 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20273 uscmd->uscsi_timeout = sd_io_time; 20274 20275 /* 20276 * Allocate an sd_uscsi_info struct and fill it with the info 20277 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20278 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20279 * since we allocate the buf here in this function, we do not 20280 * need to preserve the prior contents of b_private. 20281 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20282 */ 20283 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20284 uip->ui_flags = SD_PATH_DIRECT; 20285 uip->ui_cmdp = uscmd; 20286 20287 bp = getrbuf(KM_SLEEP); 20288 bp->b_private = uip; 20289 20290 /* 20291 * Setup buffer to carry uscsi request. 20292 */ 20293 bp->b_flags = B_BUSY; 20294 bp->b_bcount = 0; 20295 bp->b_blkno = 0; 20296 20297 if (is_async == TRUE) { 20298 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20299 uip->ui_dkc = *dkc; 20300 } 20301 20302 bp->b_edev = SD_GET_DEV(un); 20303 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20304 20305 /* 20306 * Unset un_f_sync_cache_required flag 20307 */ 20308 mutex_enter(SD_MUTEX(un)); 20309 un->un_f_sync_cache_required = FALSE; 20310 mutex_exit(SD_MUTEX(un)); 20311 20312 (void) sd_uscsi_strategy(bp); 20313 20314 /* 20315 * If synchronous request, wait for completion 20316 * If async just return and let b_iodone callback 20317 * cleanup. 20318 * NOTE: On return, u_ncmds_in_driver will be decremented, 20319 * but it was also incremented in sd_uscsi_strategy(), so 20320 * we should be ok. 20321 */ 20322 if (is_async == FALSE) { 20323 (void) biowait(bp); 20324 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20325 } 20326 20327 return (rval); 20328 } 20329 20330 20331 static int 20332 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20333 { 20334 struct sd_uscsi_info *uip; 20335 struct uscsi_cmd *uscmd; 20336 uint8_t *sense_buf; 20337 struct sd_lun *un; 20338 int status; 20339 union scsi_cdb *cdb; 20340 20341 uip = (struct sd_uscsi_info *)(bp->b_private); 20342 ASSERT(uip != NULL); 20343 20344 uscmd = uip->ui_cmdp; 20345 ASSERT(uscmd != NULL); 20346 20347 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20348 ASSERT(sense_buf != NULL); 20349 20350 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20351 ASSERT(un != NULL); 20352 20353 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20354 20355 status = geterror(bp); 20356 switch (status) { 20357 case 0: 20358 break; /* Success! */ 20359 case EIO: 20360 switch (uscmd->uscsi_status) { 20361 case STATUS_RESERVATION_CONFLICT: 20362 /* Ignore reservation conflict */ 20363 status = 0; 20364 goto done; 20365 20366 case STATUS_CHECK: 20367 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20368 (scsi_sense_key(sense_buf) == 20369 KEY_ILLEGAL_REQUEST)) { 20370 /* Ignore Illegal Request error */ 20371 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20372 mutex_enter(SD_MUTEX(un)); 20373 un->un_f_sync_nv_supported = FALSE; 20374 mutex_exit(SD_MUTEX(un)); 20375 status = 0; 20376 SD_TRACE(SD_LOG_IO, un, 20377 "un_f_sync_nv_supported \ 20378 is set to false.\n"); 20379 goto done; 20380 } 20381 20382 mutex_enter(SD_MUTEX(un)); 20383 un->un_f_sync_cache_supported = FALSE; 20384 mutex_exit(SD_MUTEX(un)); 20385 SD_TRACE(SD_LOG_IO, un, 20386 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20387 un_f_sync_cache_supported set to false \ 20388 with asc = %x, ascq = %x\n", 20389 scsi_sense_asc(sense_buf), 20390 scsi_sense_ascq(sense_buf)); 20391 status = ENOTSUP; 20392 goto done; 20393 } 20394 break; 20395 default: 20396 break; 20397 } 20398 /* FALLTHRU */ 20399 default: 20400 /* 20401 * Turn on the un_f_sync_cache_required flag 20402 * since the SYNC CACHE command failed 20403 */ 20404 mutex_enter(SD_MUTEX(un)); 20405 un->un_f_sync_cache_required = TRUE; 20406 mutex_exit(SD_MUTEX(un)); 20407 20408 /* 20409 * Don't log an error message if this device 20410 * has removable media. 20411 */ 20412 if (!un->un_f_has_removable_media) { 20413 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20414 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20415 } 20416 break; 20417 } 20418 20419 done: 20420 if (uip->ui_dkc.dkc_callback != NULL) { 20421 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20422 } 20423 20424 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20425 freerbuf(bp); 20426 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20427 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20428 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20429 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20430 20431 return (status); 20432 } 20433 20434 20435 /* 20436 * Function: sd_send_scsi_GET_CONFIGURATION 20437 * 20438 * Description: Issues the get configuration command to the device. 20439 * Called from sd_check_for_writable_cd & sd_get_media_info 20440 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20441 * Arguments: ssc 20442 * ucmdbuf 20443 * rqbuf 20444 * rqbuflen 20445 * bufaddr 20446 * buflen 20447 * path_flag 20448 * 20449 * Return Code: 0 - Success 20450 * errno return code from sd_ssc_send() 20451 * 20452 * Context: Can sleep. Does not return until command is completed. 20453 * 20454 */ 20455 20456 static int 20457 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20458 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20459 int path_flag) 20460 { 20461 char cdb[CDB_GROUP1]; 20462 int status; 20463 struct sd_lun *un; 20464 20465 ASSERT(ssc != NULL); 20466 un = ssc->ssc_un; 20467 ASSERT(un != NULL); 20468 ASSERT(!mutex_owned(SD_MUTEX(un))); 20469 ASSERT(bufaddr != NULL); 20470 ASSERT(ucmdbuf != NULL); 20471 ASSERT(rqbuf != NULL); 20472 20473 SD_TRACE(SD_LOG_IO, un, 20474 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20475 20476 bzero(cdb, sizeof (cdb)); 20477 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20478 bzero(rqbuf, rqbuflen); 20479 bzero(bufaddr, buflen); 20480 20481 /* 20482 * Set up cdb field for the get configuration command. 20483 */ 20484 cdb[0] = SCMD_GET_CONFIGURATION; 20485 cdb[1] = 0x02; /* Requested Type */ 20486 cdb[8] = SD_PROFILE_HEADER_LEN; 20487 ucmdbuf->uscsi_cdb = cdb; 20488 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20489 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20490 ucmdbuf->uscsi_buflen = buflen; 20491 ucmdbuf->uscsi_timeout = sd_io_time; 20492 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20493 ucmdbuf->uscsi_rqlen = rqbuflen; 20494 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20495 20496 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20497 UIO_SYSSPACE, path_flag); 20498 20499 switch (status) { 20500 case 0: 20501 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20502 break; /* Success! */ 20503 case EIO: 20504 switch (ucmdbuf->uscsi_status) { 20505 case STATUS_RESERVATION_CONFLICT: 20506 status = EACCES; 20507 break; 20508 default: 20509 break; 20510 } 20511 break; 20512 default: 20513 break; 20514 } 20515 20516 if (status == 0) { 20517 SD_DUMP_MEMORY(un, SD_LOG_IO, 20518 "sd_send_scsi_GET_CONFIGURATION: data", 20519 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20520 } 20521 20522 SD_TRACE(SD_LOG_IO, un, 20523 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20524 20525 return (status); 20526 } 20527 20528 /* 20529 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20530 * 20531 * Description: Issues the get configuration command to the device to 20532 * retrieve a specific feature. Called from 20533 * sd_check_for_writable_cd & sd_set_mmc_caps. 20534 * Arguments: ssc 20535 * ucmdbuf 20536 * rqbuf 20537 * rqbuflen 20538 * bufaddr 20539 * buflen 20540 * feature 20541 * 20542 * Return Code: 0 - Success 20543 * errno return code from sd_ssc_send() 20544 * 20545 * Context: Can sleep. Does not return until command is completed. 20546 * 20547 */ 20548 static int 20549 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20550 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20551 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20552 { 20553 char cdb[CDB_GROUP1]; 20554 int status; 20555 struct sd_lun *un; 20556 20557 ASSERT(ssc != NULL); 20558 un = ssc->ssc_un; 20559 ASSERT(un != NULL); 20560 ASSERT(!mutex_owned(SD_MUTEX(un))); 20561 ASSERT(bufaddr != NULL); 20562 ASSERT(ucmdbuf != NULL); 20563 ASSERT(rqbuf != NULL); 20564 20565 SD_TRACE(SD_LOG_IO, un, 20566 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20567 20568 bzero(cdb, sizeof (cdb)); 20569 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20570 bzero(rqbuf, rqbuflen); 20571 bzero(bufaddr, buflen); 20572 20573 /* 20574 * Set up cdb field for the get configuration command. 20575 */ 20576 cdb[0] = SCMD_GET_CONFIGURATION; 20577 cdb[1] = 0x02; /* Requested Type */ 20578 cdb[3] = feature; 20579 cdb[8] = buflen; 20580 ucmdbuf->uscsi_cdb = cdb; 20581 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20582 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20583 ucmdbuf->uscsi_buflen = buflen; 20584 ucmdbuf->uscsi_timeout = sd_io_time; 20585 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20586 ucmdbuf->uscsi_rqlen = rqbuflen; 20587 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20588 20589 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20590 UIO_SYSSPACE, path_flag); 20591 20592 switch (status) { 20593 case 0: 20594 20595 break; /* Success! */ 20596 case EIO: 20597 switch (ucmdbuf->uscsi_status) { 20598 case STATUS_RESERVATION_CONFLICT: 20599 status = EACCES; 20600 break; 20601 default: 20602 break; 20603 } 20604 break; 20605 default: 20606 break; 20607 } 20608 20609 if (status == 0) { 20610 SD_DUMP_MEMORY(un, SD_LOG_IO, 20611 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20612 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20613 } 20614 20615 SD_TRACE(SD_LOG_IO, un, 20616 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20617 20618 return (status); 20619 } 20620 20621 20622 /* 20623 * Function: sd_send_scsi_MODE_SENSE 20624 * 20625 * Description: Utility function for issuing a scsi MODE SENSE command. 20626 * Note: This routine uses a consistent implementation for Group0, 20627 * Group1, and Group2 commands across all platforms. ATAPI devices 20628 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20629 * 20630 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20631 * structure for this target. 20632 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20633 * CDB_GROUP[1|2] (10 byte). 20634 * bufaddr - buffer for page data retrieved from the target. 20635 * buflen - size of page to be retrieved. 20636 * page_code - page code of data to be retrieved from the target. 20637 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20638 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20639 * to use the USCSI "direct" chain and bypass the normal 20640 * command waitq. 20641 * 20642 * Return Code: 0 - Success 20643 * errno return code from sd_ssc_send() 20644 * 20645 * Context: Can sleep. Does not return until command is completed. 20646 */ 20647 20648 static int 20649 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20650 size_t buflen, uchar_t page_code, int path_flag) 20651 { 20652 struct scsi_extended_sense sense_buf; 20653 union scsi_cdb cdb; 20654 struct uscsi_cmd ucmd_buf; 20655 int status; 20656 int headlen; 20657 struct sd_lun *un; 20658 20659 ASSERT(ssc != NULL); 20660 un = ssc->ssc_un; 20661 ASSERT(un != NULL); 20662 ASSERT(!mutex_owned(SD_MUTEX(un))); 20663 ASSERT(bufaddr != NULL); 20664 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20665 (cdbsize == CDB_GROUP2)); 20666 20667 SD_TRACE(SD_LOG_IO, un, 20668 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20669 20670 bzero(&cdb, sizeof (cdb)); 20671 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20672 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20673 bzero(bufaddr, buflen); 20674 20675 if (cdbsize == CDB_GROUP0) { 20676 cdb.scc_cmd = SCMD_MODE_SENSE; 20677 cdb.cdb_opaque[2] = page_code; 20678 FORMG0COUNT(&cdb, buflen); 20679 headlen = MODE_HEADER_LENGTH; 20680 } else { 20681 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20682 cdb.cdb_opaque[2] = page_code; 20683 FORMG1COUNT(&cdb, buflen); 20684 headlen = MODE_HEADER_LENGTH_GRP2; 20685 } 20686 20687 ASSERT(headlen <= buflen); 20688 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20689 20690 ucmd_buf.uscsi_cdb = (char *)&cdb; 20691 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20692 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20693 ucmd_buf.uscsi_buflen = buflen; 20694 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20695 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20696 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20697 ucmd_buf.uscsi_timeout = 60; 20698 20699 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20700 UIO_SYSSPACE, path_flag); 20701 20702 switch (status) { 20703 case 0: 20704 /* 20705 * sr_check_wp() uses 0x3f page code and check the header of 20706 * mode page to determine if target device is write-protected. 20707 * But some USB devices return 0 bytes for 0x3f page code. For 20708 * this case, make sure that mode page header is returned at 20709 * least. 20710 */ 20711 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20712 status = EIO; 20713 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 20714 "mode page header is not returned"); 20715 } 20716 break; /* Success! */ 20717 case EIO: 20718 switch (ucmd_buf.uscsi_status) { 20719 case STATUS_RESERVATION_CONFLICT: 20720 status = EACCES; 20721 break; 20722 default: 20723 break; 20724 } 20725 break; 20726 default: 20727 break; 20728 } 20729 20730 if (status == 0) { 20731 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20732 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20733 } 20734 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20735 20736 return (status); 20737 } 20738 20739 20740 /* 20741 * Function: sd_send_scsi_MODE_SELECT 20742 * 20743 * Description: Utility function for issuing a scsi MODE SELECT command. 20744 * Note: This routine uses a consistent implementation for Group0, 20745 * Group1, and Group2 commands across all platforms. ATAPI devices 20746 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20747 * 20748 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20749 * structure for this target. 20750 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20751 * CDB_GROUP[1|2] (10 byte). 20752 * bufaddr - buffer for page data retrieved from the target. 20753 * buflen - size of page to be retrieved. 20754 * save_page - boolean to determin if SP bit should be set. 20755 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20756 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20757 * to use the USCSI "direct" chain and bypass the normal 20758 * command waitq. 20759 * 20760 * Return Code: 0 - Success 20761 * errno return code from sd_ssc_send() 20762 * 20763 * Context: Can sleep. Does not return until command is completed. 20764 */ 20765 20766 static int 20767 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20768 size_t buflen, uchar_t save_page, int path_flag) 20769 { 20770 struct scsi_extended_sense sense_buf; 20771 union scsi_cdb cdb; 20772 struct uscsi_cmd ucmd_buf; 20773 int status; 20774 struct sd_lun *un; 20775 20776 ASSERT(ssc != NULL); 20777 un = ssc->ssc_un; 20778 ASSERT(un != NULL); 20779 ASSERT(!mutex_owned(SD_MUTEX(un))); 20780 ASSERT(bufaddr != NULL); 20781 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20782 (cdbsize == CDB_GROUP2)); 20783 20784 SD_TRACE(SD_LOG_IO, un, 20785 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20786 20787 bzero(&cdb, sizeof (cdb)); 20788 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20789 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20790 20791 /* Set the PF bit for many third party drives */ 20792 cdb.cdb_opaque[1] = 0x10; 20793 20794 /* Set the savepage(SP) bit if given */ 20795 if (save_page == SD_SAVE_PAGE) { 20796 cdb.cdb_opaque[1] |= 0x01; 20797 } 20798 20799 if (cdbsize == CDB_GROUP0) { 20800 cdb.scc_cmd = SCMD_MODE_SELECT; 20801 FORMG0COUNT(&cdb, buflen); 20802 } else { 20803 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20804 FORMG1COUNT(&cdb, buflen); 20805 } 20806 20807 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20808 20809 ucmd_buf.uscsi_cdb = (char *)&cdb; 20810 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20811 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20812 ucmd_buf.uscsi_buflen = buflen; 20813 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20814 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20815 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20816 ucmd_buf.uscsi_timeout = 60; 20817 20818 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20819 UIO_SYSSPACE, path_flag); 20820 20821 switch (status) { 20822 case 0: 20823 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20824 break; /* Success! */ 20825 case EIO: 20826 switch (ucmd_buf.uscsi_status) { 20827 case STATUS_RESERVATION_CONFLICT: 20828 status = EACCES; 20829 break; 20830 default: 20831 break; 20832 } 20833 break; 20834 default: 20835 break; 20836 } 20837 20838 if (status == 0) { 20839 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20840 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20841 } 20842 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20843 20844 return (status); 20845 } 20846 20847 20848 /* 20849 * Function: sd_send_scsi_RDWR 20850 * 20851 * Description: Issue a scsi READ or WRITE command with the given parameters. 20852 * 20853 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20854 * structure for this target. 20855 * cmd: SCMD_READ or SCMD_WRITE 20856 * bufaddr: Address of caller's buffer to receive the RDWR data 20857 * buflen: Length of caller's buffer receive the RDWR data. 20858 * start_block: Block number for the start of the RDWR operation. 20859 * (Assumes target-native block size.) 20860 * residp: Pointer to variable to receive the redisual of the 20861 * RDWR operation (may be NULL of no residual requested). 20862 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20863 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20864 * to use the USCSI "direct" chain and bypass the normal 20865 * command waitq. 20866 * 20867 * Return Code: 0 - Success 20868 * errno return code from sd_ssc_send() 20869 * 20870 * Context: Can sleep. Does not return until command is completed. 20871 */ 20872 20873 static int 20874 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 20875 size_t buflen, daddr_t start_block, int path_flag) 20876 { 20877 struct scsi_extended_sense sense_buf; 20878 union scsi_cdb cdb; 20879 struct uscsi_cmd ucmd_buf; 20880 uint32_t block_count; 20881 int status; 20882 int cdbsize; 20883 uchar_t flag; 20884 struct sd_lun *un; 20885 20886 ASSERT(ssc != NULL); 20887 un = ssc->ssc_un; 20888 ASSERT(un != NULL); 20889 ASSERT(!mutex_owned(SD_MUTEX(un))); 20890 ASSERT(bufaddr != NULL); 20891 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20892 20893 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20894 20895 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20896 return (EINVAL); 20897 } 20898 20899 mutex_enter(SD_MUTEX(un)); 20900 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20901 mutex_exit(SD_MUTEX(un)); 20902 20903 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20904 20905 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20906 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20907 bufaddr, buflen, start_block, block_count); 20908 20909 bzero(&cdb, sizeof (cdb)); 20910 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20911 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20912 20913 /* Compute CDB size to use */ 20914 if (start_block > 0xffffffff) 20915 cdbsize = CDB_GROUP4; 20916 else if ((start_block & 0xFFE00000) || 20917 (un->un_f_cfg_is_atapi == TRUE)) 20918 cdbsize = CDB_GROUP1; 20919 else 20920 cdbsize = CDB_GROUP0; 20921 20922 switch (cdbsize) { 20923 case CDB_GROUP0: /* 6-byte CDBs */ 20924 cdb.scc_cmd = cmd; 20925 FORMG0ADDR(&cdb, start_block); 20926 FORMG0COUNT(&cdb, block_count); 20927 break; 20928 case CDB_GROUP1: /* 10-byte CDBs */ 20929 cdb.scc_cmd = cmd | SCMD_GROUP1; 20930 FORMG1ADDR(&cdb, start_block); 20931 FORMG1COUNT(&cdb, block_count); 20932 break; 20933 case CDB_GROUP4: /* 16-byte CDBs */ 20934 cdb.scc_cmd = cmd | SCMD_GROUP4; 20935 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20936 FORMG4COUNT(&cdb, block_count); 20937 break; 20938 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20939 default: 20940 /* All others reserved */ 20941 return (EINVAL); 20942 } 20943 20944 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20945 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20946 20947 ucmd_buf.uscsi_cdb = (char *)&cdb; 20948 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20949 ucmd_buf.uscsi_bufaddr = bufaddr; 20950 ucmd_buf.uscsi_buflen = buflen; 20951 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20952 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20953 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20954 ucmd_buf.uscsi_timeout = 60; 20955 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20956 UIO_SYSSPACE, path_flag); 20957 20958 switch (status) { 20959 case 0: 20960 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20961 break; /* Success! */ 20962 case EIO: 20963 switch (ucmd_buf.uscsi_status) { 20964 case STATUS_RESERVATION_CONFLICT: 20965 status = EACCES; 20966 break; 20967 default: 20968 break; 20969 } 20970 break; 20971 default: 20972 break; 20973 } 20974 20975 if (status == 0) { 20976 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20977 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20978 } 20979 20980 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20981 20982 return (status); 20983 } 20984 20985 20986 /* 20987 * Function: sd_send_scsi_LOG_SENSE 20988 * 20989 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20990 * 20991 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20992 * structure for this target. 20993 * 20994 * Return Code: 0 - Success 20995 * errno return code from sd_ssc_send() 20996 * 20997 * Context: Can sleep. Does not return until command is completed. 20998 */ 20999 21000 static int 21001 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21002 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21003 int path_flag) 21004 21005 { 21006 struct scsi_extended_sense sense_buf; 21007 union scsi_cdb cdb; 21008 struct uscsi_cmd ucmd_buf; 21009 int status; 21010 struct sd_lun *un; 21011 21012 ASSERT(ssc != NULL); 21013 un = ssc->ssc_un; 21014 ASSERT(un != NULL); 21015 ASSERT(!mutex_owned(SD_MUTEX(un))); 21016 21017 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21018 21019 bzero(&cdb, sizeof (cdb)); 21020 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21021 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21022 21023 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21024 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21025 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21026 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21027 FORMG1COUNT(&cdb, buflen); 21028 21029 ucmd_buf.uscsi_cdb = (char *)&cdb; 21030 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21031 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21032 ucmd_buf.uscsi_buflen = buflen; 21033 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21034 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21035 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21036 ucmd_buf.uscsi_timeout = 60; 21037 21038 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21039 UIO_SYSSPACE, path_flag); 21040 21041 switch (status) { 21042 case 0: 21043 break; 21044 case EIO: 21045 switch (ucmd_buf.uscsi_status) { 21046 case STATUS_RESERVATION_CONFLICT: 21047 status = EACCES; 21048 break; 21049 case STATUS_CHECK: 21050 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21051 (scsi_sense_key((uint8_t *)&sense_buf) == 21052 KEY_ILLEGAL_REQUEST) && 21053 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21054 /* 21055 * ASC 0x24: INVALID FIELD IN CDB 21056 */ 21057 switch (page_code) { 21058 case START_STOP_CYCLE_PAGE: 21059 /* 21060 * The start stop cycle counter is 21061 * implemented as page 0x31 in earlier 21062 * generation disks. In new generation 21063 * disks the start stop cycle counter is 21064 * implemented as page 0xE. To properly 21065 * handle this case if an attempt for 21066 * log page 0xE is made and fails we 21067 * will try again using page 0x31. 21068 * 21069 * Network storage BU committed to 21070 * maintain the page 0x31 for this 21071 * purpose and will not have any other 21072 * page implemented with page code 0x31 21073 * until all disks transition to the 21074 * standard page. 21075 */ 21076 mutex_enter(SD_MUTEX(un)); 21077 un->un_start_stop_cycle_page = 21078 START_STOP_CYCLE_VU_PAGE; 21079 cdb.cdb_opaque[2] = 21080 (char)(page_control << 6) | 21081 un->un_start_stop_cycle_page; 21082 mutex_exit(SD_MUTEX(un)); 21083 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21084 status = sd_ssc_send( 21085 ssc, &ucmd_buf, FKIOCTL, 21086 UIO_SYSSPACE, path_flag); 21087 21088 break; 21089 case TEMPERATURE_PAGE: 21090 status = ENOTTY; 21091 break; 21092 default: 21093 break; 21094 } 21095 } 21096 break; 21097 default: 21098 break; 21099 } 21100 break; 21101 default: 21102 break; 21103 } 21104 21105 if (status == 0) { 21106 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21107 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21108 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21109 } 21110 21111 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21112 21113 return (status); 21114 } 21115 21116 21117 /* 21118 * Function: sdioctl 21119 * 21120 * Description: Driver's ioctl(9e) entry point function. 21121 * 21122 * Arguments: dev - device number 21123 * cmd - ioctl operation to be performed 21124 * arg - user argument, contains data to be set or reference 21125 * parameter for get 21126 * flag - bit flag, indicating open settings, 32/64 bit type 21127 * cred_p - user credential pointer 21128 * rval_p - calling process return value (OPT) 21129 * 21130 * Return Code: EINVAL 21131 * ENOTTY 21132 * ENXIO 21133 * EIO 21134 * EFAULT 21135 * ENOTSUP 21136 * EPERM 21137 * 21138 * Context: Called from the device switch at normal priority. 21139 */ 21140 21141 static int 21142 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21143 { 21144 struct sd_lun *un = NULL; 21145 int err = 0; 21146 int i = 0; 21147 cred_t *cr; 21148 int tmprval = EINVAL; 21149 int is_valid; 21150 sd_ssc_t *ssc; 21151 21152 /* 21153 * All device accesses go thru sdstrategy where we check on suspend 21154 * status 21155 */ 21156 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21157 return (ENXIO); 21158 } 21159 21160 ASSERT(!mutex_owned(SD_MUTEX(un))); 21161 21162 /* Initialize sd_ssc_t for internal uscsi commands */ 21163 ssc = sd_ssc_init(un); 21164 21165 is_valid = SD_IS_VALID_LABEL(un); 21166 21167 /* 21168 * Moved this wait from sd_uscsi_strategy to here for 21169 * reasons of deadlock prevention. Internal driver commands, 21170 * specifically those to change a devices power level, result 21171 * in a call to sd_uscsi_strategy. 21172 */ 21173 mutex_enter(SD_MUTEX(un)); 21174 while ((un->un_state == SD_STATE_SUSPENDED) || 21175 (un->un_state == SD_STATE_PM_CHANGING)) { 21176 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21177 } 21178 /* 21179 * Twiddling the counter here protects commands from now 21180 * through to the top of sd_uscsi_strategy. Without the 21181 * counter inc. a power down, for example, could get in 21182 * after the above check for state is made and before 21183 * execution gets to the top of sd_uscsi_strategy. 21184 * That would cause problems. 21185 */ 21186 un->un_ncmds_in_driver++; 21187 21188 if (!is_valid && 21189 (flag & (FNDELAY | FNONBLOCK))) { 21190 switch (cmd) { 21191 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21192 case DKIOCGVTOC: 21193 case DKIOCGEXTVTOC: 21194 case DKIOCGAPART: 21195 case DKIOCPARTINFO: 21196 case DKIOCEXTPARTINFO: 21197 case DKIOCSGEOM: 21198 case DKIOCSAPART: 21199 case DKIOCGETEFI: 21200 case DKIOCPARTITION: 21201 case DKIOCSVTOC: 21202 case DKIOCSEXTVTOC: 21203 case DKIOCSETEFI: 21204 case DKIOCGMBOOT: 21205 case DKIOCSMBOOT: 21206 case DKIOCG_PHYGEOM: 21207 case DKIOCG_VIRTGEOM: 21208 /* let cmlb handle it */ 21209 goto skip_ready_valid; 21210 21211 case CDROMPAUSE: 21212 case CDROMRESUME: 21213 case CDROMPLAYMSF: 21214 case CDROMPLAYTRKIND: 21215 case CDROMREADTOCHDR: 21216 case CDROMREADTOCENTRY: 21217 case CDROMSTOP: 21218 case CDROMSTART: 21219 case CDROMVOLCTRL: 21220 case CDROMSUBCHNL: 21221 case CDROMREADMODE2: 21222 case CDROMREADMODE1: 21223 case CDROMREADOFFSET: 21224 case CDROMSBLKMODE: 21225 case CDROMGBLKMODE: 21226 case CDROMGDRVSPEED: 21227 case CDROMSDRVSPEED: 21228 case CDROMCDDA: 21229 case CDROMCDXA: 21230 case CDROMSUBCODE: 21231 if (!ISCD(un)) { 21232 un->un_ncmds_in_driver--; 21233 ASSERT(un->un_ncmds_in_driver >= 0); 21234 mutex_exit(SD_MUTEX(un)); 21235 err = ENOTTY; 21236 goto done_without_assess; 21237 } 21238 break; 21239 case FDEJECT: 21240 case DKIOCEJECT: 21241 case CDROMEJECT: 21242 if (!un->un_f_eject_media_supported) { 21243 un->un_ncmds_in_driver--; 21244 ASSERT(un->un_ncmds_in_driver >= 0); 21245 mutex_exit(SD_MUTEX(un)); 21246 err = ENOTTY; 21247 goto done_without_assess; 21248 } 21249 break; 21250 case DKIOCFLUSHWRITECACHE: 21251 mutex_exit(SD_MUTEX(un)); 21252 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21253 if (err != 0) { 21254 mutex_enter(SD_MUTEX(un)); 21255 un->un_ncmds_in_driver--; 21256 ASSERT(un->un_ncmds_in_driver >= 0); 21257 mutex_exit(SD_MUTEX(un)); 21258 err = EIO; 21259 goto done_quick_assess; 21260 } 21261 mutex_enter(SD_MUTEX(un)); 21262 /* FALLTHROUGH */ 21263 case DKIOCREMOVABLE: 21264 case DKIOCHOTPLUGGABLE: 21265 case DKIOCINFO: 21266 case DKIOCGMEDIAINFO: 21267 case MHIOCENFAILFAST: 21268 case MHIOCSTATUS: 21269 case MHIOCTKOWN: 21270 case MHIOCRELEASE: 21271 case MHIOCGRP_INKEYS: 21272 case MHIOCGRP_INRESV: 21273 case MHIOCGRP_REGISTER: 21274 case MHIOCGRP_RESERVE: 21275 case MHIOCGRP_PREEMPTANDABORT: 21276 case MHIOCGRP_REGISTERANDIGNOREKEY: 21277 case CDROMCLOSETRAY: 21278 case USCSICMD: 21279 goto skip_ready_valid; 21280 default: 21281 break; 21282 } 21283 21284 mutex_exit(SD_MUTEX(un)); 21285 err = sd_ready_and_valid(ssc, SDPART(dev)); 21286 mutex_enter(SD_MUTEX(un)); 21287 21288 if (err != SD_READY_VALID) { 21289 switch (cmd) { 21290 case DKIOCSTATE: 21291 case CDROMGDRVSPEED: 21292 case CDROMSDRVSPEED: 21293 case FDEJECT: /* for eject command */ 21294 case DKIOCEJECT: 21295 case CDROMEJECT: 21296 case DKIOCREMOVABLE: 21297 case DKIOCHOTPLUGGABLE: 21298 break; 21299 default: 21300 if (un->un_f_has_removable_media) { 21301 err = ENXIO; 21302 } else { 21303 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21304 if (err == SD_RESERVED_BY_OTHERS) { 21305 err = EACCES; 21306 } else { 21307 err = EIO; 21308 } 21309 } 21310 un->un_ncmds_in_driver--; 21311 ASSERT(un->un_ncmds_in_driver >= 0); 21312 mutex_exit(SD_MUTEX(un)); 21313 21314 goto done_without_assess; 21315 } 21316 } 21317 } 21318 21319 skip_ready_valid: 21320 mutex_exit(SD_MUTEX(un)); 21321 21322 switch (cmd) { 21323 case DKIOCINFO: 21324 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21325 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21326 break; 21327 21328 case DKIOCGMEDIAINFO: 21329 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21330 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21331 break; 21332 21333 case DKIOCGGEOM: 21334 case DKIOCGVTOC: 21335 case DKIOCGEXTVTOC: 21336 case DKIOCGAPART: 21337 case DKIOCPARTINFO: 21338 case DKIOCEXTPARTINFO: 21339 case DKIOCSGEOM: 21340 case DKIOCSAPART: 21341 case DKIOCGETEFI: 21342 case DKIOCPARTITION: 21343 case DKIOCSVTOC: 21344 case DKIOCSEXTVTOC: 21345 case DKIOCSETEFI: 21346 case DKIOCGMBOOT: 21347 case DKIOCSMBOOT: 21348 case DKIOCG_PHYGEOM: 21349 case DKIOCG_VIRTGEOM: 21350 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21351 21352 /* TUR should spin up */ 21353 21354 if (un->un_f_has_removable_media) 21355 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21356 SD_CHECK_FOR_MEDIA); 21357 21358 else 21359 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21360 21361 if (err != 0) 21362 goto done_with_assess; 21363 21364 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21365 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21366 21367 if ((err == 0) && 21368 ((cmd == DKIOCSETEFI) || 21369 (un->un_f_pkstats_enabled) && 21370 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 21371 21372 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21373 (void *)SD_PATH_DIRECT); 21374 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21375 sd_set_pstats(un); 21376 SD_TRACE(SD_LOG_IO_PARTITION, un, 21377 "sd_ioctl: un:0x%p pstats created and " 21378 "set\n", un); 21379 } 21380 } 21381 21382 if ((cmd == DKIOCSVTOC) || 21383 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21384 21385 mutex_enter(SD_MUTEX(un)); 21386 if (un->un_f_devid_supported && 21387 (un->un_f_opt_fab_devid == TRUE)) { 21388 if (un->un_devid == NULL) { 21389 sd_register_devid(ssc, SD_DEVINFO(un), 21390 SD_TARGET_IS_UNRESERVED); 21391 } else { 21392 /* 21393 * The device id for this disk 21394 * has been fabricated. The 21395 * device id must be preserved 21396 * by writing it back out to 21397 * disk. 21398 */ 21399 if (sd_write_deviceid(ssc) != 0) { 21400 ddi_devid_free(un->un_devid); 21401 un->un_devid = NULL; 21402 } 21403 } 21404 } 21405 mutex_exit(SD_MUTEX(un)); 21406 } 21407 21408 break; 21409 21410 case DKIOCLOCK: 21411 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21412 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21413 SD_PATH_STANDARD); 21414 goto done_with_assess; 21415 21416 case DKIOCUNLOCK: 21417 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21418 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21419 SD_PATH_STANDARD); 21420 goto done_with_assess; 21421 21422 case DKIOCSTATE: { 21423 enum dkio_state state; 21424 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21425 21426 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21427 err = EFAULT; 21428 } else { 21429 err = sd_check_media(dev, state); 21430 if (err == 0) { 21431 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21432 sizeof (int), flag) != 0) 21433 err = EFAULT; 21434 } 21435 } 21436 break; 21437 } 21438 21439 case DKIOCREMOVABLE: 21440 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21441 i = un->un_f_has_removable_media ? 1 : 0; 21442 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21443 err = EFAULT; 21444 } else { 21445 err = 0; 21446 } 21447 break; 21448 21449 case DKIOCHOTPLUGGABLE: 21450 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21451 i = un->un_f_is_hotpluggable ? 1 : 0; 21452 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21453 err = EFAULT; 21454 } else { 21455 err = 0; 21456 } 21457 break; 21458 21459 case DKIOCGTEMPERATURE: 21460 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21461 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21462 break; 21463 21464 case MHIOCENFAILFAST: 21465 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21466 if ((err = drv_priv(cred_p)) == 0) { 21467 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21468 } 21469 break; 21470 21471 case MHIOCTKOWN: 21472 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21473 if ((err = drv_priv(cred_p)) == 0) { 21474 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21475 } 21476 break; 21477 21478 case MHIOCRELEASE: 21479 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21480 if ((err = drv_priv(cred_p)) == 0) { 21481 err = sd_mhdioc_release(dev); 21482 } 21483 break; 21484 21485 case MHIOCSTATUS: 21486 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21487 if ((err = drv_priv(cred_p)) == 0) { 21488 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21489 case 0: 21490 err = 0; 21491 break; 21492 case EACCES: 21493 *rval_p = 1; 21494 err = 0; 21495 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21496 break; 21497 default: 21498 err = EIO; 21499 goto done_with_assess; 21500 } 21501 } 21502 break; 21503 21504 case MHIOCQRESERVE: 21505 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21506 if ((err = drv_priv(cred_p)) == 0) { 21507 err = sd_reserve_release(dev, SD_RESERVE); 21508 } 21509 break; 21510 21511 case MHIOCREREGISTERDEVID: 21512 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21513 if (drv_priv(cred_p) == EPERM) { 21514 err = EPERM; 21515 } else if (!un->un_f_devid_supported) { 21516 err = ENOTTY; 21517 } else { 21518 err = sd_mhdioc_register_devid(dev); 21519 } 21520 break; 21521 21522 case MHIOCGRP_INKEYS: 21523 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21524 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21525 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21526 err = ENOTSUP; 21527 } else { 21528 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21529 flag); 21530 } 21531 } 21532 break; 21533 21534 case MHIOCGRP_INRESV: 21535 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21536 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21537 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21538 err = ENOTSUP; 21539 } else { 21540 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21541 } 21542 } 21543 break; 21544 21545 case MHIOCGRP_REGISTER: 21546 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21547 if ((err = drv_priv(cred_p)) != EPERM) { 21548 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21549 err = ENOTSUP; 21550 } else if (arg != NULL) { 21551 mhioc_register_t reg; 21552 if (ddi_copyin((void *)arg, ®, 21553 sizeof (mhioc_register_t), flag) != 0) { 21554 err = EFAULT; 21555 } else { 21556 err = 21557 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21558 ssc, SD_SCSI3_REGISTER, 21559 (uchar_t *)®); 21560 if (err != 0) 21561 goto done_with_assess; 21562 } 21563 } 21564 } 21565 break; 21566 21567 case MHIOCGRP_RESERVE: 21568 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21569 if ((err = drv_priv(cred_p)) != EPERM) { 21570 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21571 err = ENOTSUP; 21572 } else if (arg != NULL) { 21573 mhioc_resv_desc_t resv_desc; 21574 if (ddi_copyin((void *)arg, &resv_desc, 21575 sizeof (mhioc_resv_desc_t), flag) != 0) { 21576 err = EFAULT; 21577 } else { 21578 err = 21579 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21580 ssc, SD_SCSI3_RESERVE, 21581 (uchar_t *)&resv_desc); 21582 if (err != 0) 21583 goto done_with_assess; 21584 } 21585 } 21586 } 21587 break; 21588 21589 case MHIOCGRP_PREEMPTANDABORT: 21590 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21591 if ((err = drv_priv(cred_p)) != EPERM) { 21592 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21593 err = ENOTSUP; 21594 } else if (arg != NULL) { 21595 mhioc_preemptandabort_t preempt_abort; 21596 if (ddi_copyin((void *)arg, &preempt_abort, 21597 sizeof (mhioc_preemptandabort_t), 21598 flag) != 0) { 21599 err = EFAULT; 21600 } else { 21601 err = 21602 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21603 ssc, SD_SCSI3_PREEMPTANDABORT, 21604 (uchar_t *)&preempt_abort); 21605 if (err != 0) 21606 goto done_with_assess; 21607 } 21608 } 21609 } 21610 break; 21611 21612 case MHIOCGRP_REGISTERANDIGNOREKEY: 21613 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21614 if ((err = drv_priv(cred_p)) != EPERM) { 21615 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21616 err = ENOTSUP; 21617 } else if (arg != NULL) { 21618 mhioc_registerandignorekey_t r_and_i; 21619 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21620 sizeof (mhioc_registerandignorekey_t), 21621 flag) != 0) { 21622 err = EFAULT; 21623 } else { 21624 err = 21625 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21626 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21627 (uchar_t *)&r_and_i); 21628 if (err != 0) 21629 goto done_with_assess; 21630 } 21631 } 21632 } 21633 break; 21634 21635 case USCSICMD: 21636 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21637 cr = ddi_get_cred(); 21638 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21639 err = EPERM; 21640 } else { 21641 enum uio_seg uioseg; 21642 21643 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21644 UIO_USERSPACE; 21645 if (un->un_f_format_in_progress == TRUE) { 21646 err = EAGAIN; 21647 break; 21648 } 21649 21650 err = sd_ssc_send(ssc, 21651 (struct uscsi_cmd *)arg, 21652 flag, uioseg, SD_PATH_STANDARD); 21653 if (err != 0) 21654 goto done_with_assess; 21655 else 21656 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21657 } 21658 break; 21659 21660 case CDROMPAUSE: 21661 case CDROMRESUME: 21662 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21663 if (!ISCD(un)) { 21664 err = ENOTTY; 21665 } else { 21666 err = sr_pause_resume(dev, cmd); 21667 } 21668 break; 21669 21670 case CDROMPLAYMSF: 21671 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21672 if (!ISCD(un)) { 21673 err = ENOTTY; 21674 } else { 21675 err = sr_play_msf(dev, (caddr_t)arg, flag); 21676 } 21677 break; 21678 21679 case CDROMPLAYTRKIND: 21680 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21681 #if defined(__i386) || defined(__amd64) 21682 /* 21683 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21684 */ 21685 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21686 #else 21687 if (!ISCD(un)) { 21688 #endif 21689 err = ENOTTY; 21690 } else { 21691 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21692 } 21693 break; 21694 21695 case CDROMREADTOCHDR: 21696 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21697 if (!ISCD(un)) { 21698 err = ENOTTY; 21699 } else { 21700 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21701 } 21702 break; 21703 21704 case CDROMREADTOCENTRY: 21705 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21706 if (!ISCD(un)) { 21707 err = ENOTTY; 21708 } else { 21709 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21710 } 21711 break; 21712 21713 case CDROMSTOP: 21714 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21715 if (!ISCD(un)) { 21716 err = ENOTTY; 21717 } else { 21718 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21719 SD_PATH_STANDARD); 21720 goto done_with_assess; 21721 } 21722 break; 21723 21724 case CDROMSTART: 21725 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21726 if (!ISCD(un)) { 21727 err = ENOTTY; 21728 } else { 21729 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21730 SD_PATH_STANDARD); 21731 goto done_with_assess; 21732 } 21733 break; 21734 21735 case CDROMCLOSETRAY: 21736 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21737 if (!ISCD(un)) { 21738 err = ENOTTY; 21739 } else { 21740 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21741 SD_PATH_STANDARD); 21742 goto done_with_assess; 21743 } 21744 break; 21745 21746 case FDEJECT: /* for eject command */ 21747 case DKIOCEJECT: 21748 case CDROMEJECT: 21749 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21750 if (!un->un_f_eject_media_supported) { 21751 err = ENOTTY; 21752 } else { 21753 err = sr_eject(dev); 21754 } 21755 break; 21756 21757 case CDROMVOLCTRL: 21758 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21759 if (!ISCD(un)) { 21760 err = ENOTTY; 21761 } else { 21762 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21763 } 21764 break; 21765 21766 case CDROMSUBCHNL: 21767 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21768 if (!ISCD(un)) { 21769 err = ENOTTY; 21770 } else { 21771 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21772 } 21773 break; 21774 21775 case CDROMREADMODE2: 21776 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21777 if (!ISCD(un)) { 21778 err = ENOTTY; 21779 } else if (un->un_f_cfg_is_atapi == TRUE) { 21780 /* 21781 * If the drive supports READ CD, use that instead of 21782 * switching the LBA size via a MODE SELECT 21783 * Block Descriptor 21784 */ 21785 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21786 } else { 21787 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21788 } 21789 break; 21790 21791 case CDROMREADMODE1: 21792 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21793 if (!ISCD(un)) { 21794 err = ENOTTY; 21795 } else { 21796 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21797 } 21798 break; 21799 21800 case CDROMREADOFFSET: 21801 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21802 if (!ISCD(un)) { 21803 err = ENOTTY; 21804 } else { 21805 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21806 flag); 21807 } 21808 break; 21809 21810 case CDROMSBLKMODE: 21811 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21812 /* 21813 * There is no means of changing block size in case of atapi 21814 * drives, thus return ENOTTY if drive type is atapi 21815 */ 21816 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21817 err = ENOTTY; 21818 } else if (un->un_f_mmc_cap == TRUE) { 21819 21820 /* 21821 * MMC Devices do not support changing the 21822 * logical block size 21823 * 21824 * Note: EINVAL is being returned instead of ENOTTY to 21825 * maintain consistancy with the original mmc 21826 * driver update. 21827 */ 21828 err = EINVAL; 21829 } else { 21830 mutex_enter(SD_MUTEX(un)); 21831 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21832 (un->un_ncmds_in_transport > 0)) { 21833 mutex_exit(SD_MUTEX(un)); 21834 err = EINVAL; 21835 } else { 21836 mutex_exit(SD_MUTEX(un)); 21837 err = sr_change_blkmode(dev, cmd, arg, flag); 21838 } 21839 } 21840 break; 21841 21842 case CDROMGBLKMODE: 21843 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21844 if (!ISCD(un)) { 21845 err = ENOTTY; 21846 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21847 (un->un_f_blockcount_is_valid != FALSE)) { 21848 /* 21849 * Drive is an ATAPI drive so return target block 21850 * size for ATAPI drives since we cannot change the 21851 * blocksize on ATAPI drives. Used primarily to detect 21852 * if an ATAPI cdrom is present. 21853 */ 21854 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21855 sizeof (int), flag) != 0) { 21856 err = EFAULT; 21857 } else { 21858 err = 0; 21859 } 21860 21861 } else { 21862 /* 21863 * Drive supports changing block sizes via a Mode 21864 * Select. 21865 */ 21866 err = sr_change_blkmode(dev, cmd, arg, flag); 21867 } 21868 break; 21869 21870 case CDROMGDRVSPEED: 21871 case CDROMSDRVSPEED: 21872 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21873 if (!ISCD(un)) { 21874 err = ENOTTY; 21875 } else if (un->un_f_mmc_cap == TRUE) { 21876 /* 21877 * Note: In the future the driver implementation 21878 * for getting and 21879 * setting cd speed should entail: 21880 * 1) If non-mmc try the Toshiba mode page 21881 * (sr_change_speed) 21882 * 2) If mmc but no support for Real Time Streaming try 21883 * the SET CD SPEED (0xBB) command 21884 * (sr_atapi_change_speed) 21885 * 3) If mmc and support for Real Time Streaming 21886 * try the GET PERFORMANCE and SET STREAMING 21887 * commands (not yet implemented, 4380808) 21888 */ 21889 /* 21890 * As per recent MMC spec, CD-ROM speed is variable 21891 * and changes with LBA. Since there is no such 21892 * things as drive speed now, fail this ioctl. 21893 * 21894 * Note: EINVAL is returned for consistancy of original 21895 * implementation which included support for getting 21896 * the drive speed of mmc devices but not setting 21897 * the drive speed. Thus EINVAL would be returned 21898 * if a set request was made for an mmc device. 21899 * We no longer support get or set speed for 21900 * mmc but need to remain consistent with regard 21901 * to the error code returned. 21902 */ 21903 err = EINVAL; 21904 } else if (un->un_f_cfg_is_atapi == TRUE) { 21905 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21906 } else { 21907 err = sr_change_speed(dev, cmd, arg, flag); 21908 } 21909 break; 21910 21911 case CDROMCDDA: 21912 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21913 if (!ISCD(un)) { 21914 err = ENOTTY; 21915 } else { 21916 err = sr_read_cdda(dev, (void *)arg, flag); 21917 } 21918 break; 21919 21920 case CDROMCDXA: 21921 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21922 if (!ISCD(un)) { 21923 err = ENOTTY; 21924 } else { 21925 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21926 } 21927 break; 21928 21929 case CDROMSUBCODE: 21930 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21931 if (!ISCD(un)) { 21932 err = ENOTTY; 21933 } else { 21934 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21935 } 21936 break; 21937 21938 21939 #ifdef SDDEBUG 21940 /* RESET/ABORTS testing ioctls */ 21941 case DKIOCRESET: { 21942 int reset_level; 21943 21944 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21945 err = EFAULT; 21946 } else { 21947 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21948 "reset_level = 0x%lx\n", reset_level); 21949 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21950 err = 0; 21951 } else { 21952 err = EIO; 21953 } 21954 } 21955 break; 21956 } 21957 21958 case DKIOCABORT: 21959 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21960 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21961 err = 0; 21962 } else { 21963 err = EIO; 21964 } 21965 break; 21966 #endif 21967 21968 #ifdef SD_FAULT_INJECTION 21969 /* SDIOC FaultInjection testing ioctls */ 21970 case SDIOCSTART: 21971 case SDIOCSTOP: 21972 case SDIOCINSERTPKT: 21973 case SDIOCINSERTXB: 21974 case SDIOCINSERTUN: 21975 case SDIOCINSERTARQ: 21976 case SDIOCPUSH: 21977 case SDIOCRETRIEVE: 21978 case SDIOCRUN: 21979 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21980 "SDIOC detected cmd:0x%X:\n", cmd); 21981 /* call error generator */ 21982 sd_faultinjection_ioctl(cmd, arg, un); 21983 err = 0; 21984 break; 21985 21986 #endif /* SD_FAULT_INJECTION */ 21987 21988 case DKIOCFLUSHWRITECACHE: 21989 { 21990 struct dk_callback *dkc = (struct dk_callback *)arg; 21991 21992 mutex_enter(SD_MUTEX(un)); 21993 if (!un->un_f_sync_cache_supported || 21994 !un->un_f_write_cache_enabled) { 21995 err = un->un_f_sync_cache_supported ? 21996 0 : ENOTSUP; 21997 mutex_exit(SD_MUTEX(un)); 21998 if ((flag & FKIOCTL) && dkc != NULL && 21999 dkc->dkc_callback != NULL) { 22000 (*dkc->dkc_callback)(dkc->dkc_cookie, 22001 err); 22002 /* 22003 * Did callback and reported error. 22004 * Since we did a callback, ioctl 22005 * should return 0. 22006 */ 22007 err = 0; 22008 } 22009 break; 22010 } 22011 mutex_exit(SD_MUTEX(un)); 22012 22013 if ((flag & FKIOCTL) && dkc != NULL && 22014 dkc->dkc_callback != NULL) { 22015 /* async SYNC CACHE request */ 22016 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22017 } else { 22018 /* synchronous SYNC CACHE request */ 22019 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22020 } 22021 } 22022 break; 22023 22024 case DKIOCGETWCE: { 22025 22026 int wce; 22027 22028 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22029 break; 22030 } 22031 22032 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22033 err = EFAULT; 22034 } 22035 break; 22036 } 22037 22038 case DKIOCSETWCE: { 22039 22040 int wce, sync_supported; 22041 22042 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22043 err = EFAULT; 22044 break; 22045 } 22046 22047 /* 22048 * Synchronize multiple threads trying to enable 22049 * or disable the cache via the un_f_wcc_cv 22050 * condition variable. 22051 */ 22052 mutex_enter(SD_MUTEX(un)); 22053 22054 /* 22055 * Don't allow the cache to be enabled if the 22056 * config file has it disabled. 22057 */ 22058 if (un->un_f_opt_disable_cache && wce) { 22059 mutex_exit(SD_MUTEX(un)); 22060 err = EINVAL; 22061 break; 22062 } 22063 22064 /* 22065 * Wait for write cache change in progress 22066 * bit to be clear before proceeding. 22067 */ 22068 while (un->un_f_wcc_inprog) 22069 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22070 22071 un->un_f_wcc_inprog = 1; 22072 22073 if (un->un_f_write_cache_enabled && wce == 0) { 22074 /* 22075 * Disable the write cache. Don't clear 22076 * un_f_write_cache_enabled until after 22077 * the mode select and flush are complete. 22078 */ 22079 sync_supported = un->un_f_sync_cache_supported; 22080 22081 /* 22082 * If cache flush is suppressed, we assume that the 22083 * controller firmware will take care of managing the 22084 * write cache for us: no need to explicitly 22085 * disable it. 22086 */ 22087 if (!un->un_f_suppress_cache_flush) { 22088 mutex_exit(SD_MUTEX(un)); 22089 if ((err = sd_cache_control(ssc, 22090 SD_CACHE_NOCHANGE, 22091 SD_CACHE_DISABLE)) == 0 && 22092 sync_supported) { 22093 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22094 NULL); 22095 } 22096 } else { 22097 mutex_exit(SD_MUTEX(un)); 22098 } 22099 22100 mutex_enter(SD_MUTEX(un)); 22101 if (err == 0) { 22102 un->un_f_write_cache_enabled = 0; 22103 } 22104 22105 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22106 /* 22107 * Set un_f_write_cache_enabled first, so there is 22108 * no window where the cache is enabled, but the 22109 * bit says it isn't. 22110 */ 22111 un->un_f_write_cache_enabled = 1; 22112 22113 /* 22114 * If cache flush is suppressed, we assume that the 22115 * controller firmware will take care of managing the 22116 * write cache for us: no need to explicitly 22117 * enable it. 22118 */ 22119 if (!un->un_f_suppress_cache_flush) { 22120 mutex_exit(SD_MUTEX(un)); 22121 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22122 SD_CACHE_ENABLE); 22123 } else { 22124 mutex_exit(SD_MUTEX(un)); 22125 } 22126 22127 mutex_enter(SD_MUTEX(un)); 22128 22129 if (err) { 22130 un->un_f_write_cache_enabled = 0; 22131 } 22132 } 22133 22134 un->un_f_wcc_inprog = 0; 22135 cv_broadcast(&un->un_wcc_cv); 22136 mutex_exit(SD_MUTEX(un)); 22137 break; 22138 } 22139 22140 default: 22141 err = ENOTTY; 22142 break; 22143 } 22144 mutex_enter(SD_MUTEX(un)); 22145 un->un_ncmds_in_driver--; 22146 ASSERT(un->un_ncmds_in_driver >= 0); 22147 mutex_exit(SD_MUTEX(un)); 22148 22149 22150 done_without_assess: 22151 sd_ssc_fini(ssc); 22152 22153 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22154 return (err); 22155 22156 done_with_assess: 22157 mutex_enter(SD_MUTEX(un)); 22158 un->un_ncmds_in_driver--; 22159 ASSERT(un->un_ncmds_in_driver >= 0); 22160 mutex_exit(SD_MUTEX(un)); 22161 22162 done_quick_assess: 22163 if (err != 0) 22164 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22165 /* Uninitialize sd_ssc_t pointer */ 22166 sd_ssc_fini(ssc); 22167 22168 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22169 return (err); 22170 } 22171 22172 22173 /* 22174 * Function: sd_dkio_ctrl_info 22175 * 22176 * Description: This routine is the driver entry point for handling controller 22177 * information ioctl requests (DKIOCINFO). 22178 * 22179 * Arguments: dev - the device number 22180 * arg - pointer to user provided dk_cinfo structure 22181 * specifying the controller type and attributes. 22182 * flag - this argument is a pass through to ddi_copyxxx() 22183 * directly from the mode argument of ioctl(). 22184 * 22185 * Return Code: 0 22186 * EFAULT 22187 * ENXIO 22188 */ 22189 22190 static int 22191 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22192 { 22193 struct sd_lun *un = NULL; 22194 struct dk_cinfo *info; 22195 dev_info_t *pdip; 22196 int lun, tgt; 22197 22198 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22199 return (ENXIO); 22200 } 22201 22202 info = (struct dk_cinfo *) 22203 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22204 22205 switch (un->un_ctype) { 22206 case CTYPE_CDROM: 22207 info->dki_ctype = DKC_CDROM; 22208 break; 22209 default: 22210 info->dki_ctype = DKC_SCSI_CCS; 22211 break; 22212 } 22213 pdip = ddi_get_parent(SD_DEVINFO(un)); 22214 info->dki_cnum = ddi_get_instance(pdip); 22215 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22216 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22217 } else { 22218 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22219 DK_DEVLEN - 1); 22220 } 22221 22222 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22223 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22224 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22225 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22226 22227 /* Unit Information */ 22228 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22229 info->dki_slave = ((tgt << 3) | lun); 22230 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22231 DK_DEVLEN - 1); 22232 info->dki_flags = DKI_FMTVOL; 22233 info->dki_partition = SDPART(dev); 22234 22235 /* Max Transfer size of this device in blocks */ 22236 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22237 info->dki_addr = 0; 22238 info->dki_space = 0; 22239 info->dki_prio = 0; 22240 info->dki_vec = 0; 22241 22242 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22243 kmem_free(info, sizeof (struct dk_cinfo)); 22244 return (EFAULT); 22245 } else { 22246 kmem_free(info, sizeof (struct dk_cinfo)); 22247 return (0); 22248 } 22249 } 22250 22251 22252 /* 22253 * Function: sd_get_media_info 22254 * 22255 * Description: This routine is the driver entry point for handling ioctl 22256 * requests for the media type or command set profile used by the 22257 * drive to operate on the media (DKIOCGMEDIAINFO). 22258 * 22259 * Arguments: dev - the device number 22260 * arg - pointer to user provided dk_minfo structure 22261 * specifying the media type, logical block size and 22262 * drive capacity. 22263 * flag - this argument is a pass through to ddi_copyxxx() 22264 * directly from the mode argument of ioctl(). 22265 * 22266 * Return Code: 0 22267 * EACCESS 22268 * EFAULT 22269 * ENXIO 22270 * EIO 22271 */ 22272 22273 static int 22274 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22275 { 22276 struct sd_lun *un = NULL; 22277 struct uscsi_cmd com; 22278 struct scsi_inquiry *sinq; 22279 struct dk_minfo media_info; 22280 u_longlong_t media_capacity; 22281 uint64_t capacity; 22282 uint_t lbasize; 22283 uchar_t *out_data; 22284 uchar_t *rqbuf; 22285 int rval = 0; 22286 int rtn; 22287 sd_ssc_t *ssc; 22288 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22289 (un->un_state == SD_STATE_OFFLINE)) { 22290 return (ENXIO); 22291 } 22292 22293 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22294 22295 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22296 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22297 22298 /* Issue a TUR to determine if the drive is ready with media present */ 22299 ssc = sd_ssc_init(un); 22300 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22301 if (rval == ENXIO) { 22302 goto done; 22303 } else if (rval != 0) { 22304 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22305 } 22306 22307 /* Now get configuration data */ 22308 if (ISCD(un)) { 22309 media_info.dki_media_type = DK_CDROM; 22310 22311 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22312 if (un->un_f_mmc_cap == TRUE) { 22313 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22314 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22315 SD_PATH_STANDARD); 22316 22317 if (rtn) { 22318 /* 22319 * We ignore all failures for CD and need to 22320 * put the assessment before processing code 22321 * to avoid missing assessment for FMA. 22322 */ 22323 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22324 /* 22325 * Failed for other than an illegal request 22326 * or command not supported 22327 */ 22328 if ((com.uscsi_status == STATUS_CHECK) && 22329 (com.uscsi_rqstatus == STATUS_GOOD)) { 22330 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22331 (rqbuf[12] != 0x20)) { 22332 rval = EIO; 22333 goto no_assessment; 22334 } 22335 } 22336 } else { 22337 /* 22338 * The GET CONFIGURATION command succeeded 22339 * so set the media type according to the 22340 * returned data 22341 */ 22342 media_info.dki_media_type = out_data[6]; 22343 media_info.dki_media_type <<= 8; 22344 media_info.dki_media_type |= out_data[7]; 22345 } 22346 } 22347 } else { 22348 /* 22349 * The profile list is not available, so we attempt to identify 22350 * the media type based on the inquiry data 22351 */ 22352 sinq = un->un_sd->sd_inq; 22353 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22354 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22355 /* This is a direct access device or optical disk */ 22356 media_info.dki_media_type = DK_FIXED_DISK; 22357 22358 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22359 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22360 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22361 media_info.dki_media_type = DK_ZIP; 22362 } else if ( 22363 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22364 media_info.dki_media_type = DK_JAZ; 22365 } 22366 } 22367 } else { 22368 /* 22369 * Not a CD, direct access or optical disk so return 22370 * unknown media 22371 */ 22372 media_info.dki_media_type = DK_UNKNOWN; 22373 } 22374 } 22375 22376 /* Now read the capacity so we can provide the lbasize and capacity */ 22377 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22378 SD_PATH_DIRECT); 22379 switch (rval) { 22380 case 0: 22381 break; 22382 case EACCES: 22383 rval = EACCES; 22384 goto done; 22385 default: 22386 rval = EIO; 22387 goto done; 22388 } 22389 22390 /* 22391 * If lun is expanded dynamically, update the un structure. 22392 */ 22393 mutex_enter(SD_MUTEX(un)); 22394 if ((un->un_f_blockcount_is_valid == TRUE) && 22395 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22396 (capacity > un->un_blockcount)) { 22397 sd_update_block_info(un, lbasize, capacity); 22398 } 22399 mutex_exit(SD_MUTEX(un)); 22400 22401 media_info.dki_lbsize = lbasize; 22402 media_capacity = capacity; 22403 22404 /* 22405 * sd_send_scsi_READ_CAPACITY() reports capacity in 22406 * un->un_sys_blocksize chunks. So we need to convert it into 22407 * cap.lbasize chunks. 22408 */ 22409 media_capacity *= un->un_sys_blocksize; 22410 media_capacity /= lbasize; 22411 media_info.dki_capacity = media_capacity; 22412 22413 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22414 rval = EFAULT; 22415 /* Put goto. Anybody might add some code below in future */ 22416 goto no_assessment; 22417 } 22418 done: 22419 if (rval != 0) { 22420 if (rval == EIO) 22421 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22422 else 22423 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22424 } 22425 no_assessment: 22426 sd_ssc_fini(ssc); 22427 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22428 kmem_free(rqbuf, SENSE_LENGTH); 22429 return (rval); 22430 } 22431 22432 22433 /* 22434 * Function: sd_check_media 22435 * 22436 * Description: This utility routine implements the functionality for the 22437 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22438 * driver state changes from that specified by the user 22439 * (inserted or ejected). For example, if the user specifies 22440 * DKIO_EJECTED and the current media state is inserted this 22441 * routine will immediately return DKIO_INSERTED. However, if the 22442 * current media state is not inserted the user thread will be 22443 * blocked until the drive state changes. If DKIO_NONE is specified 22444 * the user thread will block until a drive state change occurs. 22445 * 22446 * Arguments: dev - the device number 22447 * state - user pointer to a dkio_state, updated with the current 22448 * drive state at return. 22449 * 22450 * Return Code: ENXIO 22451 * EIO 22452 * EAGAIN 22453 * EINTR 22454 */ 22455 22456 static int 22457 sd_check_media(dev_t dev, enum dkio_state state) 22458 { 22459 struct sd_lun *un = NULL; 22460 enum dkio_state prev_state; 22461 opaque_t token = NULL; 22462 int rval = 0; 22463 sd_ssc_t *ssc; 22464 22465 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22466 return (ENXIO); 22467 } 22468 22469 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22470 22471 ssc = sd_ssc_init(un); 22472 22473 mutex_enter(SD_MUTEX(un)); 22474 22475 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22476 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22477 22478 prev_state = un->un_mediastate; 22479 22480 /* is there anything to do? */ 22481 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22482 /* 22483 * submit the request to the scsi_watch service; 22484 * scsi_media_watch_cb() does the real work 22485 */ 22486 mutex_exit(SD_MUTEX(un)); 22487 22488 /* 22489 * This change handles the case where a scsi watch request is 22490 * added to a device that is powered down. To accomplish this 22491 * we power up the device before adding the scsi watch request, 22492 * since the scsi watch sends a TUR directly to the device 22493 * which the device cannot handle if it is powered down. 22494 */ 22495 if (sd_pm_entry(un) != DDI_SUCCESS) { 22496 mutex_enter(SD_MUTEX(un)); 22497 goto done; 22498 } 22499 22500 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22501 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22502 (caddr_t)dev); 22503 22504 sd_pm_exit(un); 22505 22506 mutex_enter(SD_MUTEX(un)); 22507 if (token == NULL) { 22508 rval = EAGAIN; 22509 goto done; 22510 } 22511 22512 /* 22513 * This is a special case IOCTL that doesn't return 22514 * until the media state changes. Routine sdpower 22515 * knows about and handles this so don't count it 22516 * as an active cmd in the driver, which would 22517 * keep the device busy to the pm framework. 22518 * If the count isn't decremented the device can't 22519 * be powered down. 22520 */ 22521 un->un_ncmds_in_driver--; 22522 ASSERT(un->un_ncmds_in_driver >= 0); 22523 22524 /* 22525 * if a prior request had been made, this will be the same 22526 * token, as scsi_watch was designed that way. 22527 */ 22528 un->un_swr_token = token; 22529 un->un_specified_mediastate = state; 22530 22531 /* 22532 * now wait for media change 22533 * we will not be signalled unless mediastate == state but it is 22534 * still better to test for this condition, since there is a 22535 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22536 */ 22537 SD_TRACE(SD_LOG_COMMON, un, 22538 "sd_check_media: waiting for media state change\n"); 22539 while (un->un_mediastate == state) { 22540 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22541 SD_TRACE(SD_LOG_COMMON, un, 22542 "sd_check_media: waiting for media state " 22543 "was interrupted\n"); 22544 un->un_ncmds_in_driver++; 22545 rval = EINTR; 22546 goto done; 22547 } 22548 SD_TRACE(SD_LOG_COMMON, un, 22549 "sd_check_media: received signal, state=%x\n", 22550 un->un_mediastate); 22551 } 22552 /* 22553 * Inc the counter to indicate the device once again 22554 * has an active outstanding cmd. 22555 */ 22556 un->un_ncmds_in_driver++; 22557 } 22558 22559 /* invalidate geometry */ 22560 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22561 sr_ejected(un); 22562 } 22563 22564 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22565 uint64_t capacity; 22566 uint_t lbasize; 22567 22568 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22569 mutex_exit(SD_MUTEX(un)); 22570 /* 22571 * Since the following routines use SD_PATH_DIRECT, we must 22572 * call PM directly before the upcoming disk accesses. This 22573 * may cause the disk to be power/spin up. 22574 */ 22575 22576 if (sd_pm_entry(un) == DDI_SUCCESS) { 22577 rval = sd_send_scsi_READ_CAPACITY(ssc, 22578 &capacity, &lbasize, SD_PATH_DIRECT); 22579 if (rval != 0) { 22580 sd_pm_exit(un); 22581 if (rval == EIO) 22582 sd_ssc_assessment(ssc, 22583 SD_FMT_STATUS_CHECK); 22584 else 22585 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22586 mutex_enter(SD_MUTEX(un)); 22587 goto done; 22588 } 22589 } else { 22590 rval = EIO; 22591 mutex_enter(SD_MUTEX(un)); 22592 goto done; 22593 } 22594 mutex_enter(SD_MUTEX(un)); 22595 22596 sd_update_block_info(un, lbasize, capacity); 22597 22598 /* 22599 * Check if the media in the device is writable or not 22600 */ 22601 if (ISCD(un)) { 22602 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22603 } 22604 22605 mutex_exit(SD_MUTEX(un)); 22606 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22607 if ((cmlb_validate(un->un_cmlbhandle, 0, 22608 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22609 sd_set_pstats(un); 22610 SD_TRACE(SD_LOG_IO_PARTITION, un, 22611 "sd_check_media: un:0x%p pstats created and " 22612 "set\n", un); 22613 } 22614 22615 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22616 SD_PATH_DIRECT); 22617 22618 sd_pm_exit(un); 22619 22620 if (rval != 0) { 22621 if (rval == EIO) 22622 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22623 else 22624 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22625 } 22626 22627 mutex_enter(SD_MUTEX(un)); 22628 } 22629 done: 22630 sd_ssc_fini(ssc); 22631 un->un_f_watcht_stopped = FALSE; 22632 /* 22633 * Use of this local token and the mutex ensures that we avoid 22634 * some race conditions associated with terminating the 22635 * scsi watch. 22636 */ 22637 if (token) { 22638 un->un_swr_token = (opaque_t)NULL; 22639 mutex_exit(SD_MUTEX(un)); 22640 (void) scsi_watch_request_terminate(token, 22641 SCSI_WATCH_TERMINATE_WAIT); 22642 mutex_enter(SD_MUTEX(un)); 22643 } 22644 22645 /* 22646 * Update the capacity kstat value, if no media previously 22647 * (capacity kstat is 0) and a media has been inserted 22648 * (un_f_blockcount_is_valid == TRUE) 22649 */ 22650 if (un->un_errstats) { 22651 struct sd_errstats *stp = NULL; 22652 22653 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22654 if ((stp->sd_capacity.value.ui64 == 0) && 22655 (un->un_f_blockcount_is_valid == TRUE)) { 22656 stp->sd_capacity.value.ui64 = 22657 (uint64_t)((uint64_t)un->un_blockcount * 22658 un->un_sys_blocksize); 22659 } 22660 } 22661 mutex_exit(SD_MUTEX(un)); 22662 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22663 return (rval); 22664 } 22665 22666 22667 /* 22668 * Function: sd_delayed_cv_broadcast 22669 * 22670 * Description: Delayed cv_broadcast to allow for target to recover from media 22671 * insertion. 22672 * 22673 * Arguments: arg - driver soft state (unit) structure 22674 */ 22675 22676 static void 22677 sd_delayed_cv_broadcast(void *arg) 22678 { 22679 struct sd_lun *un = arg; 22680 22681 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22682 22683 mutex_enter(SD_MUTEX(un)); 22684 un->un_dcvb_timeid = NULL; 22685 cv_broadcast(&un->un_state_cv); 22686 mutex_exit(SD_MUTEX(un)); 22687 } 22688 22689 22690 /* 22691 * Function: sd_media_watch_cb 22692 * 22693 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22694 * routine processes the TUR sense data and updates the driver 22695 * state if a transition has occurred. The user thread 22696 * (sd_check_media) is then signalled. 22697 * 22698 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22699 * among multiple watches that share this callback function 22700 * resultp - scsi watch facility result packet containing scsi 22701 * packet, status byte and sense data 22702 * 22703 * Return Code: 0 for success, -1 for failure 22704 */ 22705 22706 static int 22707 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22708 { 22709 struct sd_lun *un; 22710 struct scsi_status *statusp = resultp->statusp; 22711 uint8_t *sensep = (uint8_t *)resultp->sensep; 22712 enum dkio_state state = DKIO_NONE; 22713 dev_t dev = (dev_t)arg; 22714 uchar_t actual_sense_length; 22715 uint8_t skey, asc, ascq; 22716 22717 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22718 return (-1); 22719 } 22720 actual_sense_length = resultp->actual_sense_length; 22721 22722 mutex_enter(SD_MUTEX(un)); 22723 SD_TRACE(SD_LOG_COMMON, un, 22724 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22725 *((char *)statusp), (void *)sensep, actual_sense_length); 22726 22727 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22728 un->un_mediastate = DKIO_DEV_GONE; 22729 cv_broadcast(&un->un_state_cv); 22730 mutex_exit(SD_MUTEX(un)); 22731 22732 return (0); 22733 } 22734 22735 /* 22736 * If there was a check condition then sensep points to valid sense data 22737 * If status was not a check condition but a reservation or busy status 22738 * then the new state is DKIO_NONE 22739 */ 22740 if (sensep != NULL) { 22741 skey = scsi_sense_key(sensep); 22742 asc = scsi_sense_asc(sensep); 22743 ascq = scsi_sense_ascq(sensep); 22744 22745 SD_INFO(SD_LOG_COMMON, un, 22746 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22747 skey, asc, ascq); 22748 /* This routine only uses up to 13 bytes of sense data. */ 22749 if (actual_sense_length >= 13) { 22750 if (skey == KEY_UNIT_ATTENTION) { 22751 if (asc == 0x28) { 22752 state = DKIO_INSERTED; 22753 } 22754 } else if (skey == KEY_NOT_READY) { 22755 /* 22756 * Sense data of 02/06/00 means that the 22757 * drive could not read the media (No 22758 * reference position found). In this case 22759 * to prevent a hang on the DKIOCSTATE IOCTL 22760 * we set the media state to DKIO_INSERTED. 22761 */ 22762 if (asc == 0x06 && ascq == 0x00) 22763 state = DKIO_INSERTED; 22764 22765 /* 22766 * if 02/04/02 means that the host 22767 * should send start command. Explicitly 22768 * leave the media state as is 22769 * (inserted) as the media is inserted 22770 * and host has stopped device for PM 22771 * reasons. Upon next true read/write 22772 * to this media will bring the 22773 * device to the right state good for 22774 * media access. 22775 */ 22776 if (asc == 0x3a) { 22777 state = DKIO_EJECTED; 22778 } else { 22779 /* 22780 * If the drive is busy with an 22781 * operation or long write, keep the 22782 * media in an inserted state. 22783 */ 22784 22785 if ((asc == 0x04) && 22786 ((ascq == 0x02) || 22787 (ascq == 0x07) || 22788 (ascq == 0x08))) { 22789 state = DKIO_INSERTED; 22790 } 22791 } 22792 } else if (skey == KEY_NO_SENSE) { 22793 if ((asc == 0x00) && (ascq == 0x00)) { 22794 /* 22795 * Sense Data 00/00/00 does not provide 22796 * any information about the state of 22797 * the media. Ignore it. 22798 */ 22799 mutex_exit(SD_MUTEX(un)); 22800 return (0); 22801 } 22802 } 22803 } 22804 } else if ((*((char *)statusp) == STATUS_GOOD) && 22805 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22806 state = DKIO_INSERTED; 22807 } 22808 22809 SD_TRACE(SD_LOG_COMMON, un, 22810 "sd_media_watch_cb: state=%x, specified=%x\n", 22811 state, un->un_specified_mediastate); 22812 22813 /* 22814 * now signal the waiting thread if this is *not* the specified state; 22815 * delay the signal if the state is DKIO_INSERTED to allow the target 22816 * to recover 22817 */ 22818 if (state != un->un_specified_mediastate) { 22819 un->un_mediastate = state; 22820 if (state == DKIO_INSERTED) { 22821 /* 22822 * delay the signal to give the drive a chance 22823 * to do what it apparently needs to do 22824 */ 22825 SD_TRACE(SD_LOG_COMMON, un, 22826 "sd_media_watch_cb: delayed cv_broadcast\n"); 22827 if (un->un_dcvb_timeid == NULL) { 22828 un->un_dcvb_timeid = 22829 timeout(sd_delayed_cv_broadcast, un, 22830 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 22831 } 22832 } else { 22833 SD_TRACE(SD_LOG_COMMON, un, 22834 "sd_media_watch_cb: immediate cv_broadcast\n"); 22835 cv_broadcast(&un->un_state_cv); 22836 } 22837 } 22838 mutex_exit(SD_MUTEX(un)); 22839 return (0); 22840 } 22841 22842 22843 /* 22844 * Function: sd_dkio_get_temp 22845 * 22846 * Description: This routine is the driver entry point for handling ioctl 22847 * requests to get the disk temperature. 22848 * 22849 * Arguments: dev - the device number 22850 * arg - pointer to user provided dk_temperature structure. 22851 * flag - this argument is a pass through to ddi_copyxxx() 22852 * directly from the mode argument of ioctl(). 22853 * 22854 * Return Code: 0 22855 * EFAULT 22856 * ENXIO 22857 * EAGAIN 22858 */ 22859 22860 static int 22861 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 22862 { 22863 struct sd_lun *un = NULL; 22864 struct dk_temperature *dktemp = NULL; 22865 uchar_t *temperature_page; 22866 int rval = 0; 22867 int path_flag = SD_PATH_STANDARD; 22868 sd_ssc_t *ssc; 22869 22870 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22871 return (ENXIO); 22872 } 22873 22874 ssc = sd_ssc_init(un); 22875 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 22876 22877 /* copyin the disk temp argument to get the user flags */ 22878 if (ddi_copyin((void *)arg, dktemp, 22879 sizeof (struct dk_temperature), flag) != 0) { 22880 rval = EFAULT; 22881 goto done; 22882 } 22883 22884 /* Initialize the temperature to invalid. */ 22885 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22886 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22887 22888 /* 22889 * Note: Investigate removing the "bypass pm" semantic. 22890 * Can we just bypass PM always? 22891 */ 22892 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 22893 path_flag = SD_PATH_DIRECT; 22894 ASSERT(!mutex_owned(&un->un_pm_mutex)); 22895 mutex_enter(&un->un_pm_mutex); 22896 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22897 /* 22898 * If DKT_BYPASS_PM is set, and the drive happens to be 22899 * in low power mode, we can not wake it up, Need to 22900 * return EAGAIN. 22901 */ 22902 mutex_exit(&un->un_pm_mutex); 22903 rval = EAGAIN; 22904 goto done; 22905 } else { 22906 /* 22907 * Indicate to PM the device is busy. This is required 22908 * to avoid a race - i.e. the ioctl is issuing a 22909 * command and the pm framework brings down the device 22910 * to low power mode (possible power cut-off on some 22911 * platforms). 22912 */ 22913 mutex_exit(&un->un_pm_mutex); 22914 if (sd_pm_entry(un) != DDI_SUCCESS) { 22915 rval = EAGAIN; 22916 goto done; 22917 } 22918 } 22919 } 22920 22921 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 22922 22923 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 22924 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 22925 if (rval != 0) 22926 goto done2; 22927 22928 /* 22929 * For the current temperature verify that the parameter length is 0x02 22930 * and the parameter code is 0x00 22931 */ 22932 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 22933 (temperature_page[5] == 0x00)) { 22934 if (temperature_page[9] == 0xFF) { 22935 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22936 } else { 22937 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 22938 } 22939 } 22940 22941 /* 22942 * For the reference temperature verify that the parameter 22943 * length is 0x02 and the parameter code is 0x01 22944 */ 22945 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 22946 (temperature_page[11] == 0x01)) { 22947 if (temperature_page[15] == 0xFF) { 22948 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22949 } else { 22950 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 22951 } 22952 } 22953 22954 /* Do the copyout regardless of the temperature commands status. */ 22955 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 22956 flag) != 0) { 22957 rval = EFAULT; 22958 goto done1; 22959 } 22960 22961 done2: 22962 if (rval != 0) { 22963 if (rval == EIO) 22964 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22965 else 22966 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22967 } 22968 done1: 22969 if (path_flag == SD_PATH_DIRECT) { 22970 sd_pm_exit(un); 22971 } 22972 22973 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 22974 done: 22975 sd_ssc_fini(ssc); 22976 if (dktemp != NULL) { 22977 kmem_free(dktemp, sizeof (struct dk_temperature)); 22978 } 22979 22980 return (rval); 22981 } 22982 22983 22984 /* 22985 * Function: sd_log_page_supported 22986 * 22987 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 22988 * supported log pages. 22989 * 22990 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22991 * structure for this target. 22992 * log_page - 22993 * 22994 * Return Code: -1 - on error (log sense is optional and may not be supported). 22995 * 0 - log page not found. 22996 * 1 - log page found. 22997 */ 22998 22999 static int 23000 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23001 { 23002 uchar_t *log_page_data; 23003 int i; 23004 int match = 0; 23005 int log_size; 23006 int status = 0; 23007 struct sd_lun *un; 23008 23009 ASSERT(ssc != NULL); 23010 un = ssc->ssc_un; 23011 ASSERT(un != NULL); 23012 23013 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23014 23015 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23016 SD_PATH_DIRECT); 23017 23018 if (status != 0) { 23019 if (status == EIO) { 23020 /* 23021 * Some disks do not support log sense, we 23022 * should ignore this kind of error(sense key is 23023 * 0x5 - illegal request). 23024 */ 23025 uint8_t *sensep; 23026 int senlen; 23027 23028 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23029 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23030 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23031 23032 if (senlen > 0 && 23033 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23034 sd_ssc_assessment(ssc, 23035 SD_FMT_IGNORE_COMPROMISE); 23036 } else { 23037 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23038 } 23039 } else { 23040 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23041 } 23042 23043 SD_ERROR(SD_LOG_COMMON, un, 23044 "sd_log_page_supported: failed log page retrieval\n"); 23045 kmem_free(log_page_data, 0xFF); 23046 return (-1); 23047 } 23048 23049 log_size = log_page_data[3]; 23050 23051 /* 23052 * The list of supported log pages start from the fourth byte. Check 23053 * until we run out of log pages or a match is found. 23054 */ 23055 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23056 if (log_page_data[i] == log_page) { 23057 match++; 23058 } 23059 } 23060 kmem_free(log_page_data, 0xFF); 23061 return (match); 23062 } 23063 23064 23065 /* 23066 * Function: sd_mhdioc_failfast 23067 * 23068 * Description: This routine is the driver entry point for handling ioctl 23069 * requests to enable/disable the multihost failfast option. 23070 * (MHIOCENFAILFAST) 23071 * 23072 * Arguments: dev - the device number 23073 * arg - user specified probing interval. 23074 * flag - this argument is a pass through to ddi_copyxxx() 23075 * directly from the mode argument of ioctl(). 23076 * 23077 * Return Code: 0 23078 * EFAULT 23079 * ENXIO 23080 */ 23081 23082 static int 23083 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23084 { 23085 struct sd_lun *un = NULL; 23086 int mh_time; 23087 int rval = 0; 23088 23089 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23090 return (ENXIO); 23091 } 23092 23093 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23094 return (EFAULT); 23095 23096 if (mh_time) { 23097 mutex_enter(SD_MUTEX(un)); 23098 un->un_resvd_status |= SD_FAILFAST; 23099 mutex_exit(SD_MUTEX(un)); 23100 /* 23101 * If mh_time is INT_MAX, then this ioctl is being used for 23102 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23103 */ 23104 if (mh_time != INT_MAX) { 23105 rval = sd_check_mhd(dev, mh_time); 23106 } 23107 } else { 23108 (void) sd_check_mhd(dev, 0); 23109 mutex_enter(SD_MUTEX(un)); 23110 un->un_resvd_status &= ~SD_FAILFAST; 23111 mutex_exit(SD_MUTEX(un)); 23112 } 23113 return (rval); 23114 } 23115 23116 23117 /* 23118 * Function: sd_mhdioc_takeown 23119 * 23120 * Description: This routine is the driver entry point for handling ioctl 23121 * requests to forcefully acquire exclusive access rights to the 23122 * multihost disk (MHIOCTKOWN). 23123 * 23124 * Arguments: dev - the device number 23125 * arg - user provided structure specifying the delay 23126 * parameters in milliseconds 23127 * flag - this argument is a pass through to ddi_copyxxx() 23128 * directly from the mode argument of ioctl(). 23129 * 23130 * Return Code: 0 23131 * EFAULT 23132 * ENXIO 23133 */ 23134 23135 static int 23136 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23137 { 23138 struct sd_lun *un = NULL; 23139 struct mhioctkown *tkown = NULL; 23140 int rval = 0; 23141 23142 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23143 return (ENXIO); 23144 } 23145 23146 if (arg != NULL) { 23147 tkown = (struct mhioctkown *) 23148 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23149 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23150 if (rval != 0) { 23151 rval = EFAULT; 23152 goto error; 23153 } 23154 } 23155 23156 rval = sd_take_ownership(dev, tkown); 23157 mutex_enter(SD_MUTEX(un)); 23158 if (rval == 0) { 23159 un->un_resvd_status |= SD_RESERVE; 23160 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23161 sd_reinstate_resv_delay = 23162 tkown->reinstate_resv_delay * 1000; 23163 } else { 23164 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23165 } 23166 /* 23167 * Give the scsi_watch routine interval set by 23168 * the MHIOCENFAILFAST ioctl precedence here. 23169 */ 23170 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23171 mutex_exit(SD_MUTEX(un)); 23172 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23173 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23174 "sd_mhdioc_takeown : %d\n", 23175 sd_reinstate_resv_delay); 23176 } else { 23177 mutex_exit(SD_MUTEX(un)); 23178 } 23179 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23180 sd_mhd_reset_notify_cb, (caddr_t)un); 23181 } else { 23182 un->un_resvd_status &= ~SD_RESERVE; 23183 mutex_exit(SD_MUTEX(un)); 23184 } 23185 23186 error: 23187 if (tkown != NULL) { 23188 kmem_free(tkown, sizeof (struct mhioctkown)); 23189 } 23190 return (rval); 23191 } 23192 23193 23194 /* 23195 * Function: sd_mhdioc_release 23196 * 23197 * Description: This routine is the driver entry point for handling ioctl 23198 * requests to release exclusive access rights to the multihost 23199 * disk (MHIOCRELEASE). 23200 * 23201 * Arguments: dev - the device number 23202 * 23203 * Return Code: 0 23204 * ENXIO 23205 */ 23206 23207 static int 23208 sd_mhdioc_release(dev_t dev) 23209 { 23210 struct sd_lun *un = NULL; 23211 timeout_id_t resvd_timeid_save; 23212 int resvd_status_save; 23213 int rval = 0; 23214 23215 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23216 return (ENXIO); 23217 } 23218 23219 mutex_enter(SD_MUTEX(un)); 23220 resvd_status_save = un->un_resvd_status; 23221 un->un_resvd_status &= 23222 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23223 if (un->un_resvd_timeid) { 23224 resvd_timeid_save = un->un_resvd_timeid; 23225 un->un_resvd_timeid = NULL; 23226 mutex_exit(SD_MUTEX(un)); 23227 (void) untimeout(resvd_timeid_save); 23228 } else { 23229 mutex_exit(SD_MUTEX(un)); 23230 } 23231 23232 /* 23233 * destroy any pending timeout thread that may be attempting to 23234 * reinstate reservation on this device. 23235 */ 23236 sd_rmv_resv_reclaim_req(dev); 23237 23238 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23239 mutex_enter(SD_MUTEX(un)); 23240 if ((un->un_mhd_token) && 23241 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23242 mutex_exit(SD_MUTEX(un)); 23243 (void) sd_check_mhd(dev, 0); 23244 } else { 23245 mutex_exit(SD_MUTEX(un)); 23246 } 23247 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23248 sd_mhd_reset_notify_cb, (caddr_t)un); 23249 } else { 23250 /* 23251 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23252 */ 23253 mutex_enter(SD_MUTEX(un)); 23254 un->un_resvd_status = resvd_status_save; 23255 mutex_exit(SD_MUTEX(un)); 23256 } 23257 return (rval); 23258 } 23259 23260 23261 /* 23262 * Function: sd_mhdioc_register_devid 23263 * 23264 * Description: This routine is the driver entry point for handling ioctl 23265 * requests to register the device id (MHIOCREREGISTERDEVID). 23266 * 23267 * Note: The implementation for this ioctl has been updated to 23268 * be consistent with the original PSARC case (1999/357) 23269 * (4375899, 4241671, 4220005) 23270 * 23271 * Arguments: dev - the device number 23272 * 23273 * Return Code: 0 23274 * ENXIO 23275 */ 23276 23277 static int 23278 sd_mhdioc_register_devid(dev_t dev) 23279 { 23280 struct sd_lun *un = NULL; 23281 int rval = 0; 23282 sd_ssc_t *ssc; 23283 23284 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23285 return (ENXIO); 23286 } 23287 23288 ASSERT(!mutex_owned(SD_MUTEX(un))); 23289 23290 mutex_enter(SD_MUTEX(un)); 23291 23292 /* If a devid already exists, de-register it */ 23293 if (un->un_devid != NULL) { 23294 ddi_devid_unregister(SD_DEVINFO(un)); 23295 /* 23296 * After unregister devid, needs to free devid memory 23297 */ 23298 ddi_devid_free(un->un_devid); 23299 un->un_devid = NULL; 23300 } 23301 23302 /* Check for reservation conflict */ 23303 mutex_exit(SD_MUTEX(un)); 23304 ssc = sd_ssc_init(un); 23305 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23306 mutex_enter(SD_MUTEX(un)); 23307 23308 switch (rval) { 23309 case 0: 23310 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23311 break; 23312 case EACCES: 23313 break; 23314 default: 23315 rval = EIO; 23316 } 23317 23318 mutex_exit(SD_MUTEX(un)); 23319 if (rval != 0) { 23320 if (rval == EIO) 23321 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23322 else 23323 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23324 } 23325 sd_ssc_fini(ssc); 23326 return (rval); 23327 } 23328 23329 23330 /* 23331 * Function: sd_mhdioc_inkeys 23332 * 23333 * Description: This routine is the driver entry point for handling ioctl 23334 * requests to issue the SCSI-3 Persistent In Read Keys command 23335 * to the device (MHIOCGRP_INKEYS). 23336 * 23337 * Arguments: dev - the device number 23338 * arg - user provided in_keys structure 23339 * flag - this argument is a pass through to ddi_copyxxx() 23340 * directly from the mode argument of ioctl(). 23341 * 23342 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23343 * ENXIO 23344 * EFAULT 23345 */ 23346 23347 static int 23348 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23349 { 23350 struct sd_lun *un; 23351 mhioc_inkeys_t inkeys; 23352 int rval = 0; 23353 23354 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23355 return (ENXIO); 23356 } 23357 23358 #ifdef _MULTI_DATAMODEL 23359 switch (ddi_model_convert_from(flag & FMODELS)) { 23360 case DDI_MODEL_ILP32: { 23361 struct mhioc_inkeys32 inkeys32; 23362 23363 if (ddi_copyin(arg, &inkeys32, 23364 sizeof (struct mhioc_inkeys32), flag) != 0) { 23365 return (EFAULT); 23366 } 23367 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23368 if ((rval = sd_persistent_reservation_in_read_keys(un, 23369 &inkeys, flag)) != 0) { 23370 return (rval); 23371 } 23372 inkeys32.generation = inkeys.generation; 23373 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23374 flag) != 0) { 23375 return (EFAULT); 23376 } 23377 break; 23378 } 23379 case DDI_MODEL_NONE: 23380 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23381 flag) != 0) { 23382 return (EFAULT); 23383 } 23384 if ((rval = sd_persistent_reservation_in_read_keys(un, 23385 &inkeys, flag)) != 0) { 23386 return (rval); 23387 } 23388 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23389 flag) != 0) { 23390 return (EFAULT); 23391 } 23392 break; 23393 } 23394 23395 #else /* ! _MULTI_DATAMODEL */ 23396 23397 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23398 return (EFAULT); 23399 } 23400 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23401 if (rval != 0) { 23402 return (rval); 23403 } 23404 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23405 return (EFAULT); 23406 } 23407 23408 #endif /* _MULTI_DATAMODEL */ 23409 23410 return (rval); 23411 } 23412 23413 23414 /* 23415 * Function: sd_mhdioc_inresv 23416 * 23417 * Description: This routine is the driver entry point for handling ioctl 23418 * requests to issue the SCSI-3 Persistent In Read Reservations 23419 * command to the device (MHIOCGRP_INKEYS). 23420 * 23421 * Arguments: dev - the device number 23422 * arg - user provided in_resv structure 23423 * flag - this argument is a pass through to ddi_copyxxx() 23424 * directly from the mode argument of ioctl(). 23425 * 23426 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23427 * ENXIO 23428 * EFAULT 23429 */ 23430 23431 static int 23432 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23433 { 23434 struct sd_lun *un; 23435 mhioc_inresvs_t inresvs; 23436 int rval = 0; 23437 23438 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23439 return (ENXIO); 23440 } 23441 23442 #ifdef _MULTI_DATAMODEL 23443 23444 switch (ddi_model_convert_from(flag & FMODELS)) { 23445 case DDI_MODEL_ILP32: { 23446 struct mhioc_inresvs32 inresvs32; 23447 23448 if (ddi_copyin(arg, &inresvs32, 23449 sizeof (struct mhioc_inresvs32), flag) != 0) { 23450 return (EFAULT); 23451 } 23452 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23453 if ((rval = sd_persistent_reservation_in_read_resv(un, 23454 &inresvs, flag)) != 0) { 23455 return (rval); 23456 } 23457 inresvs32.generation = inresvs.generation; 23458 if (ddi_copyout(&inresvs32, arg, 23459 sizeof (struct mhioc_inresvs32), flag) != 0) { 23460 return (EFAULT); 23461 } 23462 break; 23463 } 23464 case DDI_MODEL_NONE: 23465 if (ddi_copyin(arg, &inresvs, 23466 sizeof (mhioc_inresvs_t), flag) != 0) { 23467 return (EFAULT); 23468 } 23469 if ((rval = sd_persistent_reservation_in_read_resv(un, 23470 &inresvs, flag)) != 0) { 23471 return (rval); 23472 } 23473 if (ddi_copyout(&inresvs, arg, 23474 sizeof (mhioc_inresvs_t), flag) != 0) { 23475 return (EFAULT); 23476 } 23477 break; 23478 } 23479 23480 #else /* ! _MULTI_DATAMODEL */ 23481 23482 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23483 return (EFAULT); 23484 } 23485 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23486 if (rval != 0) { 23487 return (rval); 23488 } 23489 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23490 return (EFAULT); 23491 } 23492 23493 #endif /* ! _MULTI_DATAMODEL */ 23494 23495 return (rval); 23496 } 23497 23498 23499 /* 23500 * The following routines support the clustering functionality described below 23501 * and implement lost reservation reclaim functionality. 23502 * 23503 * Clustering 23504 * ---------- 23505 * The clustering code uses two different, independent forms of SCSI 23506 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23507 * Persistent Group Reservations. For any particular disk, it will use either 23508 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23509 * 23510 * SCSI-2 23511 * The cluster software takes ownership of a multi-hosted disk by issuing the 23512 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23513 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23514 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23515 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23516 * driver. The meaning of failfast is that if the driver (on this host) ever 23517 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23518 * it should immediately panic the host. The motivation for this ioctl is that 23519 * if this host does encounter reservation conflict, the underlying cause is 23520 * that some other host of the cluster has decided that this host is no longer 23521 * in the cluster and has seized control of the disks for itself. Since this 23522 * host is no longer in the cluster, it ought to panic itself. The 23523 * MHIOCENFAILFAST ioctl does two things: 23524 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23525 * error to panic the host 23526 * (b) it sets up a periodic timer to test whether this host still has 23527 * "access" (in that no other host has reserved the device): if the 23528 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23529 * purpose of that periodic timer is to handle scenarios where the host is 23530 * otherwise temporarily quiescent, temporarily doing no real i/o. 23531 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23532 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23533 * the device itself. 23534 * 23535 * SCSI-3 PGR 23536 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23537 * facility is supported through the shared multihost disk ioctls 23538 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23539 * MHIOCGRP_PREEMPTANDABORT) 23540 * 23541 * Reservation Reclaim: 23542 * -------------------- 23543 * To support the lost reservation reclaim operations this driver creates a 23544 * single thread to handle reinstating reservations on all devices that have 23545 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23546 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23547 * and the reservation reclaim thread loops through the requests to regain the 23548 * lost reservations. 23549 */ 23550 23551 /* 23552 * Function: sd_check_mhd() 23553 * 23554 * Description: This function sets up and submits a scsi watch request or 23555 * terminates an existing watch request. This routine is used in 23556 * support of reservation reclaim. 23557 * 23558 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23559 * among multiple watches that share the callback function 23560 * interval - the number of microseconds specifying the watch 23561 * interval for issuing TEST UNIT READY commands. If 23562 * set to 0 the watch should be terminated. If the 23563 * interval is set to 0 and if the device is required 23564 * to hold reservation while disabling failfast, the 23565 * watch is restarted with an interval of 23566 * reinstate_resv_delay. 23567 * 23568 * Return Code: 0 - Successful submit/terminate of scsi watch request 23569 * ENXIO - Indicates an invalid device was specified 23570 * EAGAIN - Unable to submit the scsi watch request 23571 */ 23572 23573 static int 23574 sd_check_mhd(dev_t dev, int interval) 23575 { 23576 struct sd_lun *un; 23577 opaque_t token; 23578 23579 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23580 return (ENXIO); 23581 } 23582 23583 /* is this a watch termination request? */ 23584 if (interval == 0) { 23585 mutex_enter(SD_MUTEX(un)); 23586 /* if there is an existing watch task then terminate it */ 23587 if (un->un_mhd_token) { 23588 token = un->un_mhd_token; 23589 un->un_mhd_token = NULL; 23590 mutex_exit(SD_MUTEX(un)); 23591 (void) scsi_watch_request_terminate(token, 23592 SCSI_WATCH_TERMINATE_ALL_WAIT); 23593 mutex_enter(SD_MUTEX(un)); 23594 } else { 23595 mutex_exit(SD_MUTEX(un)); 23596 /* 23597 * Note: If we return here we don't check for the 23598 * failfast case. This is the original legacy 23599 * implementation but perhaps we should be checking 23600 * the failfast case. 23601 */ 23602 return (0); 23603 } 23604 /* 23605 * If the device is required to hold reservation while 23606 * disabling failfast, we need to restart the scsi_watch 23607 * routine with an interval of reinstate_resv_delay. 23608 */ 23609 if (un->un_resvd_status & SD_RESERVE) { 23610 interval = sd_reinstate_resv_delay/1000; 23611 } else { 23612 /* no failfast so bail */ 23613 mutex_exit(SD_MUTEX(un)); 23614 return (0); 23615 } 23616 mutex_exit(SD_MUTEX(un)); 23617 } 23618 23619 /* 23620 * adjust minimum time interval to 1 second, 23621 * and convert from msecs to usecs 23622 */ 23623 if (interval > 0 && interval < 1000) { 23624 interval = 1000; 23625 } 23626 interval *= 1000; 23627 23628 /* 23629 * submit the request to the scsi_watch service 23630 */ 23631 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23632 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23633 if (token == NULL) { 23634 return (EAGAIN); 23635 } 23636 23637 /* 23638 * save token for termination later on 23639 */ 23640 mutex_enter(SD_MUTEX(un)); 23641 un->un_mhd_token = token; 23642 mutex_exit(SD_MUTEX(un)); 23643 return (0); 23644 } 23645 23646 23647 /* 23648 * Function: sd_mhd_watch_cb() 23649 * 23650 * Description: This function is the call back function used by the scsi watch 23651 * facility. The scsi watch facility sends the "Test Unit Ready" 23652 * and processes the status. If applicable (i.e. a "Unit Attention" 23653 * status and automatic "Request Sense" not used) the scsi watch 23654 * facility will send a "Request Sense" and retrieve the sense data 23655 * to be passed to this callback function. In either case the 23656 * automatic "Request Sense" or the facility submitting one, this 23657 * callback is passed the status and sense data. 23658 * 23659 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23660 * among multiple watches that share this callback function 23661 * resultp - scsi watch facility result packet containing scsi 23662 * packet, status byte and sense data 23663 * 23664 * Return Code: 0 - continue the watch task 23665 * non-zero - terminate the watch task 23666 */ 23667 23668 static int 23669 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23670 { 23671 struct sd_lun *un; 23672 struct scsi_status *statusp; 23673 uint8_t *sensep; 23674 struct scsi_pkt *pkt; 23675 uchar_t actual_sense_length; 23676 dev_t dev = (dev_t)arg; 23677 23678 ASSERT(resultp != NULL); 23679 statusp = resultp->statusp; 23680 sensep = (uint8_t *)resultp->sensep; 23681 pkt = resultp->pkt; 23682 actual_sense_length = resultp->actual_sense_length; 23683 23684 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23685 return (ENXIO); 23686 } 23687 23688 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23689 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23690 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23691 23692 /* Begin processing of the status and/or sense data */ 23693 if (pkt->pkt_reason != CMD_CMPLT) { 23694 /* Handle the incomplete packet */ 23695 sd_mhd_watch_incomplete(un, pkt); 23696 return (0); 23697 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23698 if (*((unsigned char *)statusp) 23699 == STATUS_RESERVATION_CONFLICT) { 23700 /* 23701 * Handle a reservation conflict by panicking if 23702 * configured for failfast or by logging the conflict 23703 * and updating the reservation status 23704 */ 23705 mutex_enter(SD_MUTEX(un)); 23706 if ((un->un_resvd_status & SD_FAILFAST) && 23707 (sd_failfast_enable)) { 23708 sd_panic_for_res_conflict(un); 23709 /*NOTREACHED*/ 23710 } 23711 SD_INFO(SD_LOG_IOCTL_MHD, un, 23712 "sd_mhd_watch_cb: Reservation Conflict\n"); 23713 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23714 mutex_exit(SD_MUTEX(un)); 23715 } 23716 } 23717 23718 if (sensep != NULL) { 23719 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23720 mutex_enter(SD_MUTEX(un)); 23721 if ((scsi_sense_asc(sensep) == 23722 SD_SCSI_RESET_SENSE_CODE) && 23723 (un->un_resvd_status & SD_RESERVE)) { 23724 /* 23725 * The additional sense code indicates a power 23726 * on or bus device reset has occurred; update 23727 * the reservation status. 23728 */ 23729 un->un_resvd_status |= 23730 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23731 SD_INFO(SD_LOG_IOCTL_MHD, un, 23732 "sd_mhd_watch_cb: Lost Reservation\n"); 23733 } 23734 } else { 23735 return (0); 23736 } 23737 } else { 23738 mutex_enter(SD_MUTEX(un)); 23739 } 23740 23741 if ((un->un_resvd_status & SD_RESERVE) && 23742 (un->un_resvd_status & SD_LOST_RESERVE)) { 23743 if (un->un_resvd_status & SD_WANT_RESERVE) { 23744 /* 23745 * A reset occurred in between the last probe and this 23746 * one so if a timeout is pending cancel it. 23747 */ 23748 if (un->un_resvd_timeid) { 23749 timeout_id_t temp_id = un->un_resvd_timeid; 23750 un->un_resvd_timeid = NULL; 23751 mutex_exit(SD_MUTEX(un)); 23752 (void) untimeout(temp_id); 23753 mutex_enter(SD_MUTEX(un)); 23754 } 23755 un->un_resvd_status &= ~SD_WANT_RESERVE; 23756 } 23757 if (un->un_resvd_timeid == 0) { 23758 /* Schedule a timeout to handle the lost reservation */ 23759 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23760 (void *)dev, 23761 drv_usectohz(sd_reinstate_resv_delay)); 23762 } 23763 } 23764 mutex_exit(SD_MUTEX(un)); 23765 return (0); 23766 } 23767 23768 23769 /* 23770 * Function: sd_mhd_watch_incomplete() 23771 * 23772 * Description: This function is used to find out why a scsi pkt sent by the 23773 * scsi watch facility was not completed. Under some scenarios this 23774 * routine will return. Otherwise it will send a bus reset to see 23775 * if the drive is still online. 23776 * 23777 * Arguments: un - driver soft state (unit) structure 23778 * pkt - incomplete scsi pkt 23779 */ 23780 23781 static void 23782 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23783 { 23784 int be_chatty; 23785 int perr; 23786 23787 ASSERT(pkt != NULL); 23788 ASSERT(un != NULL); 23789 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23790 perr = (pkt->pkt_statistics & STAT_PERR); 23791 23792 mutex_enter(SD_MUTEX(un)); 23793 if (un->un_state == SD_STATE_DUMPING) { 23794 mutex_exit(SD_MUTEX(un)); 23795 return; 23796 } 23797 23798 switch (pkt->pkt_reason) { 23799 case CMD_UNX_BUS_FREE: 23800 /* 23801 * If we had a parity error that caused the target to drop BSY*, 23802 * don't be chatty about it. 23803 */ 23804 if (perr && be_chatty) { 23805 be_chatty = 0; 23806 } 23807 break; 23808 case CMD_TAG_REJECT: 23809 /* 23810 * The SCSI-2 spec states that a tag reject will be sent by the 23811 * target if tagged queuing is not supported. A tag reject may 23812 * also be sent during certain initialization periods or to 23813 * control internal resources. For the latter case the target 23814 * may also return Queue Full. 23815 * 23816 * If this driver receives a tag reject from a target that is 23817 * going through an init period or controlling internal 23818 * resources tagged queuing will be disabled. This is a less 23819 * than optimal behavior but the driver is unable to determine 23820 * the target state and assumes tagged queueing is not supported 23821 */ 23822 pkt->pkt_flags = 0; 23823 un->un_tagflags = 0; 23824 23825 if (un->un_f_opt_queueing == TRUE) { 23826 un->un_throttle = min(un->un_throttle, 3); 23827 } else { 23828 un->un_throttle = 1; 23829 } 23830 mutex_exit(SD_MUTEX(un)); 23831 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 23832 mutex_enter(SD_MUTEX(un)); 23833 break; 23834 case CMD_INCOMPLETE: 23835 /* 23836 * The transport stopped with an abnormal state, fallthrough and 23837 * reset the target and/or bus unless selection did not complete 23838 * (indicated by STATE_GOT_BUS) in which case we don't want to 23839 * go through a target/bus reset 23840 */ 23841 if (pkt->pkt_state == STATE_GOT_BUS) { 23842 break; 23843 } 23844 /*FALLTHROUGH*/ 23845 23846 case CMD_TIMEOUT: 23847 default: 23848 /* 23849 * The lun may still be running the command, so a lun reset 23850 * should be attempted. If the lun reset fails or cannot be 23851 * issued, than try a target reset. Lastly try a bus reset. 23852 */ 23853 if ((pkt->pkt_statistics & 23854 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 23855 int reset_retval = 0; 23856 mutex_exit(SD_MUTEX(un)); 23857 if (un->un_f_allow_bus_device_reset == TRUE) { 23858 if (un->un_f_lun_reset_enabled == TRUE) { 23859 reset_retval = 23860 scsi_reset(SD_ADDRESS(un), 23861 RESET_LUN); 23862 } 23863 if (reset_retval == 0) { 23864 reset_retval = 23865 scsi_reset(SD_ADDRESS(un), 23866 RESET_TARGET); 23867 } 23868 } 23869 if (reset_retval == 0) { 23870 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23871 } 23872 mutex_enter(SD_MUTEX(un)); 23873 } 23874 break; 23875 } 23876 23877 /* A device/bus reset has occurred; update the reservation status. */ 23878 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 23879 (STAT_BUS_RESET | STAT_DEV_RESET))) { 23880 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 23881 un->un_resvd_status |= 23882 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23883 SD_INFO(SD_LOG_IOCTL_MHD, un, 23884 "sd_mhd_watch_incomplete: Lost Reservation\n"); 23885 } 23886 } 23887 23888 /* 23889 * The disk has been turned off; Update the device state. 23890 * 23891 * Note: Should we be offlining the disk here? 23892 */ 23893 if (pkt->pkt_state == STATE_GOT_BUS) { 23894 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 23895 "Disk not responding to selection\n"); 23896 if (un->un_state != SD_STATE_OFFLINE) { 23897 New_state(un, SD_STATE_OFFLINE); 23898 } 23899 } else if (be_chatty) { 23900 /* 23901 * suppress messages if they are all the same pkt reason; 23902 * with TQ, many (up to 256) are returned with the same 23903 * pkt_reason 23904 */ 23905 if (pkt->pkt_reason != un->un_last_pkt_reason) { 23906 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23907 "sd_mhd_watch_incomplete: " 23908 "SCSI transport failed: reason '%s'\n", 23909 scsi_rname(pkt->pkt_reason)); 23910 } 23911 } 23912 un->un_last_pkt_reason = pkt->pkt_reason; 23913 mutex_exit(SD_MUTEX(un)); 23914 } 23915 23916 23917 /* 23918 * Function: sd_sname() 23919 * 23920 * Description: This is a simple little routine to return a string containing 23921 * a printable description of command status byte for use in 23922 * logging. 23923 * 23924 * Arguments: status - pointer to a status byte 23925 * 23926 * Return Code: char * - string containing status description. 23927 */ 23928 23929 static char * 23930 sd_sname(uchar_t status) 23931 { 23932 switch (status & STATUS_MASK) { 23933 case STATUS_GOOD: 23934 return ("good status"); 23935 case STATUS_CHECK: 23936 return ("check condition"); 23937 case STATUS_MET: 23938 return ("condition met"); 23939 case STATUS_BUSY: 23940 return ("busy"); 23941 case STATUS_INTERMEDIATE: 23942 return ("intermediate"); 23943 case STATUS_INTERMEDIATE_MET: 23944 return ("intermediate - condition met"); 23945 case STATUS_RESERVATION_CONFLICT: 23946 return ("reservation_conflict"); 23947 case STATUS_TERMINATED: 23948 return ("command terminated"); 23949 case STATUS_QFULL: 23950 return ("queue full"); 23951 default: 23952 return ("<unknown status>"); 23953 } 23954 } 23955 23956 23957 /* 23958 * Function: sd_mhd_resvd_recover() 23959 * 23960 * Description: This function adds a reservation entry to the 23961 * sd_resv_reclaim_request list and signals the reservation 23962 * reclaim thread that there is work pending. If the reservation 23963 * reclaim thread has not been previously created this function 23964 * will kick it off. 23965 * 23966 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23967 * among multiple watches that share this callback function 23968 * 23969 * Context: This routine is called by timeout() and is run in interrupt 23970 * context. It must not sleep or call other functions which may 23971 * sleep. 23972 */ 23973 23974 static void 23975 sd_mhd_resvd_recover(void *arg) 23976 { 23977 dev_t dev = (dev_t)arg; 23978 struct sd_lun *un; 23979 struct sd_thr_request *sd_treq = NULL; 23980 struct sd_thr_request *sd_cur = NULL; 23981 struct sd_thr_request *sd_prev = NULL; 23982 int already_there = 0; 23983 23984 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23985 return; 23986 } 23987 23988 mutex_enter(SD_MUTEX(un)); 23989 un->un_resvd_timeid = NULL; 23990 if (un->un_resvd_status & SD_WANT_RESERVE) { 23991 /* 23992 * There was a reset so don't issue the reserve, allow the 23993 * sd_mhd_watch_cb callback function to notice this and 23994 * reschedule the timeout for reservation. 23995 */ 23996 mutex_exit(SD_MUTEX(un)); 23997 return; 23998 } 23999 mutex_exit(SD_MUTEX(un)); 24000 24001 /* 24002 * Add this device to the sd_resv_reclaim_request list and the 24003 * sd_resv_reclaim_thread should take care of the rest. 24004 * 24005 * Note: We can't sleep in this context so if the memory allocation 24006 * fails allow the sd_mhd_watch_cb callback function to notice this and 24007 * reschedule the timeout for reservation. (4378460) 24008 */ 24009 sd_treq = (struct sd_thr_request *) 24010 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24011 if (sd_treq == NULL) { 24012 return; 24013 } 24014 24015 sd_treq->sd_thr_req_next = NULL; 24016 sd_treq->dev = dev; 24017 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24018 if (sd_tr.srq_thr_req_head == NULL) { 24019 sd_tr.srq_thr_req_head = sd_treq; 24020 } else { 24021 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24022 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24023 if (sd_cur->dev == dev) { 24024 /* 24025 * already in Queue so don't log 24026 * another request for the device 24027 */ 24028 already_there = 1; 24029 break; 24030 } 24031 sd_prev = sd_cur; 24032 } 24033 if (!already_there) { 24034 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24035 "logging request for %lx\n", dev); 24036 sd_prev->sd_thr_req_next = sd_treq; 24037 } else { 24038 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24039 } 24040 } 24041 24042 /* 24043 * Create a kernel thread to do the reservation reclaim and free up this 24044 * thread. We cannot block this thread while we go away to do the 24045 * reservation reclaim 24046 */ 24047 if (sd_tr.srq_resv_reclaim_thread == NULL) 24048 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24049 sd_resv_reclaim_thread, NULL, 24050 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24051 24052 /* Tell the reservation reclaim thread that it has work to do */ 24053 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24054 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24055 } 24056 24057 /* 24058 * Function: sd_resv_reclaim_thread() 24059 * 24060 * Description: This function implements the reservation reclaim operations 24061 * 24062 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24063 * among multiple watches that share this callback function 24064 */ 24065 24066 static void 24067 sd_resv_reclaim_thread() 24068 { 24069 struct sd_lun *un; 24070 struct sd_thr_request *sd_mhreq; 24071 24072 /* Wait for work */ 24073 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24074 if (sd_tr.srq_thr_req_head == NULL) { 24075 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24076 &sd_tr.srq_resv_reclaim_mutex); 24077 } 24078 24079 /* Loop while we have work */ 24080 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24081 un = ddi_get_soft_state(sd_state, 24082 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24083 if (un == NULL) { 24084 /* 24085 * softstate structure is NULL so just 24086 * dequeue the request and continue 24087 */ 24088 sd_tr.srq_thr_req_head = 24089 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24090 kmem_free(sd_tr.srq_thr_cur_req, 24091 sizeof (struct sd_thr_request)); 24092 continue; 24093 } 24094 24095 /* dequeue the request */ 24096 sd_mhreq = sd_tr.srq_thr_cur_req; 24097 sd_tr.srq_thr_req_head = 24098 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24099 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24100 24101 /* 24102 * Reclaim reservation only if SD_RESERVE is still set. There 24103 * may have been a call to MHIOCRELEASE before we got here. 24104 */ 24105 mutex_enter(SD_MUTEX(un)); 24106 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24107 /* 24108 * Note: The SD_LOST_RESERVE flag is cleared before 24109 * reclaiming the reservation. If this is done after the 24110 * call to sd_reserve_release a reservation loss in the 24111 * window between pkt completion of reserve cmd and 24112 * mutex_enter below may not be recognized 24113 */ 24114 un->un_resvd_status &= ~SD_LOST_RESERVE; 24115 mutex_exit(SD_MUTEX(un)); 24116 24117 if (sd_reserve_release(sd_mhreq->dev, 24118 SD_RESERVE) == 0) { 24119 mutex_enter(SD_MUTEX(un)); 24120 un->un_resvd_status |= SD_RESERVE; 24121 mutex_exit(SD_MUTEX(un)); 24122 SD_INFO(SD_LOG_IOCTL_MHD, un, 24123 "sd_resv_reclaim_thread: " 24124 "Reservation Recovered\n"); 24125 } else { 24126 mutex_enter(SD_MUTEX(un)); 24127 un->un_resvd_status |= SD_LOST_RESERVE; 24128 mutex_exit(SD_MUTEX(un)); 24129 SD_INFO(SD_LOG_IOCTL_MHD, un, 24130 "sd_resv_reclaim_thread: Failed " 24131 "Reservation Recovery\n"); 24132 } 24133 } else { 24134 mutex_exit(SD_MUTEX(un)); 24135 } 24136 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24137 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24138 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24139 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24140 /* 24141 * wakeup the destroy thread if anyone is waiting on 24142 * us to complete. 24143 */ 24144 cv_signal(&sd_tr.srq_inprocess_cv); 24145 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24146 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24147 } 24148 24149 /* 24150 * cleanup the sd_tr structure now that this thread will not exist 24151 */ 24152 ASSERT(sd_tr.srq_thr_req_head == NULL); 24153 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24154 sd_tr.srq_resv_reclaim_thread = NULL; 24155 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24156 thread_exit(); 24157 } 24158 24159 24160 /* 24161 * Function: sd_rmv_resv_reclaim_req() 24162 * 24163 * Description: This function removes any pending reservation reclaim requests 24164 * for the specified device. 24165 * 24166 * Arguments: dev - the device 'dev_t' 24167 */ 24168 24169 static void 24170 sd_rmv_resv_reclaim_req(dev_t dev) 24171 { 24172 struct sd_thr_request *sd_mhreq; 24173 struct sd_thr_request *sd_prev; 24174 24175 /* Remove a reservation reclaim request from the list */ 24176 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24177 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24178 /* 24179 * We are attempting to reinstate reservation for 24180 * this device. We wait for sd_reserve_release() 24181 * to return before we return. 24182 */ 24183 cv_wait(&sd_tr.srq_inprocess_cv, 24184 &sd_tr.srq_resv_reclaim_mutex); 24185 } else { 24186 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24187 if (sd_mhreq && sd_mhreq->dev == dev) { 24188 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24189 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24190 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24191 return; 24192 } 24193 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24194 if (sd_mhreq && sd_mhreq->dev == dev) { 24195 break; 24196 } 24197 sd_prev = sd_mhreq; 24198 } 24199 if (sd_mhreq != NULL) { 24200 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24201 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24202 } 24203 } 24204 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24205 } 24206 24207 24208 /* 24209 * Function: sd_mhd_reset_notify_cb() 24210 * 24211 * Description: This is a call back function for scsi_reset_notify. This 24212 * function updates the softstate reserved status and logs the 24213 * reset. The driver scsi watch facility callback function 24214 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24215 * will reclaim the reservation. 24216 * 24217 * Arguments: arg - driver soft state (unit) structure 24218 */ 24219 24220 static void 24221 sd_mhd_reset_notify_cb(caddr_t arg) 24222 { 24223 struct sd_lun *un = (struct sd_lun *)arg; 24224 24225 mutex_enter(SD_MUTEX(un)); 24226 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24227 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24228 SD_INFO(SD_LOG_IOCTL_MHD, un, 24229 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24230 } 24231 mutex_exit(SD_MUTEX(un)); 24232 } 24233 24234 24235 /* 24236 * Function: sd_take_ownership() 24237 * 24238 * Description: This routine implements an algorithm to achieve a stable 24239 * reservation on disks which don't implement priority reserve, 24240 * and makes sure that other host lose re-reservation attempts. 24241 * This algorithm contains of a loop that keeps issuing the RESERVE 24242 * for some period of time (min_ownership_delay, default 6 seconds) 24243 * During that loop, it looks to see if there has been a bus device 24244 * reset or bus reset (both of which cause an existing reservation 24245 * to be lost). If the reservation is lost issue RESERVE until a 24246 * period of min_ownership_delay with no resets has gone by, or 24247 * until max_ownership_delay has expired. This loop ensures that 24248 * the host really did manage to reserve the device, in spite of 24249 * resets. The looping for min_ownership_delay (default six 24250 * seconds) is important to early generation clustering products, 24251 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24252 * MHIOCENFAILFAST periodic timer of two seconds. By having 24253 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24254 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24255 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24256 * have already noticed, via the MHIOCENFAILFAST polling, that it 24257 * no longer "owns" the disk and will have panicked itself. Thus, 24258 * the host issuing the MHIOCTKOWN is assured (with timing 24259 * dependencies) that by the time it actually starts to use the 24260 * disk for real work, the old owner is no longer accessing it. 24261 * 24262 * min_ownership_delay is the minimum amount of time for which the 24263 * disk must be reserved continuously devoid of resets before the 24264 * MHIOCTKOWN ioctl will return success. 24265 * 24266 * max_ownership_delay indicates the amount of time by which the 24267 * take ownership should succeed or timeout with an error. 24268 * 24269 * Arguments: dev - the device 'dev_t' 24270 * *p - struct containing timing info. 24271 * 24272 * Return Code: 0 for success or error code 24273 */ 24274 24275 static int 24276 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24277 { 24278 struct sd_lun *un; 24279 int rval; 24280 int err; 24281 int reservation_count = 0; 24282 int min_ownership_delay = 6000000; /* in usec */ 24283 int max_ownership_delay = 30000000; /* in usec */ 24284 clock_t start_time; /* starting time of this algorithm */ 24285 clock_t end_time; /* time limit for giving up */ 24286 clock_t ownership_time; /* time limit for stable ownership */ 24287 clock_t current_time; 24288 clock_t previous_current_time; 24289 24290 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24291 return (ENXIO); 24292 } 24293 24294 /* 24295 * Attempt a device reservation. A priority reservation is requested. 24296 */ 24297 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24298 != SD_SUCCESS) { 24299 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24300 "sd_take_ownership: return(1)=%d\n", rval); 24301 return (rval); 24302 } 24303 24304 /* Update the softstate reserved status to indicate the reservation */ 24305 mutex_enter(SD_MUTEX(un)); 24306 un->un_resvd_status |= SD_RESERVE; 24307 un->un_resvd_status &= 24308 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24309 mutex_exit(SD_MUTEX(un)); 24310 24311 if (p != NULL) { 24312 if (p->min_ownership_delay != 0) { 24313 min_ownership_delay = p->min_ownership_delay * 1000; 24314 } 24315 if (p->max_ownership_delay != 0) { 24316 max_ownership_delay = p->max_ownership_delay * 1000; 24317 } 24318 } 24319 SD_INFO(SD_LOG_IOCTL_MHD, un, 24320 "sd_take_ownership: min, max delays: %d, %d\n", 24321 min_ownership_delay, max_ownership_delay); 24322 24323 start_time = ddi_get_lbolt(); 24324 current_time = start_time; 24325 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24326 end_time = start_time + drv_usectohz(max_ownership_delay); 24327 24328 while (current_time - end_time < 0) { 24329 delay(drv_usectohz(500000)); 24330 24331 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24332 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24333 mutex_enter(SD_MUTEX(un)); 24334 rval = (un->un_resvd_status & 24335 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24336 mutex_exit(SD_MUTEX(un)); 24337 break; 24338 } 24339 } 24340 previous_current_time = current_time; 24341 current_time = ddi_get_lbolt(); 24342 mutex_enter(SD_MUTEX(un)); 24343 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24344 ownership_time = ddi_get_lbolt() + 24345 drv_usectohz(min_ownership_delay); 24346 reservation_count = 0; 24347 } else { 24348 reservation_count++; 24349 } 24350 un->un_resvd_status |= SD_RESERVE; 24351 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24352 mutex_exit(SD_MUTEX(un)); 24353 24354 SD_INFO(SD_LOG_IOCTL_MHD, un, 24355 "sd_take_ownership: ticks for loop iteration=%ld, " 24356 "reservation=%s\n", (current_time - previous_current_time), 24357 reservation_count ? "ok" : "reclaimed"); 24358 24359 if (current_time - ownership_time >= 0 && 24360 reservation_count >= 4) { 24361 rval = 0; /* Achieved a stable ownership */ 24362 break; 24363 } 24364 if (current_time - end_time >= 0) { 24365 rval = EACCES; /* No ownership in max possible time */ 24366 break; 24367 } 24368 } 24369 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24370 "sd_take_ownership: return(2)=%d\n", rval); 24371 return (rval); 24372 } 24373 24374 24375 /* 24376 * Function: sd_reserve_release() 24377 * 24378 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24379 * PRIORITY RESERVE commands based on a user specified command type 24380 * 24381 * Arguments: dev - the device 'dev_t' 24382 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24383 * SD_RESERVE, SD_RELEASE 24384 * 24385 * Return Code: 0 or Error Code 24386 */ 24387 24388 static int 24389 sd_reserve_release(dev_t dev, int cmd) 24390 { 24391 struct uscsi_cmd *com = NULL; 24392 struct sd_lun *un = NULL; 24393 char cdb[CDB_GROUP0]; 24394 int rval; 24395 24396 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24397 (cmd == SD_PRIORITY_RESERVE)); 24398 24399 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24400 return (ENXIO); 24401 } 24402 24403 /* instantiate and initialize the command and cdb */ 24404 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24405 bzero(cdb, CDB_GROUP0); 24406 com->uscsi_flags = USCSI_SILENT; 24407 com->uscsi_timeout = un->un_reserve_release_time; 24408 com->uscsi_cdblen = CDB_GROUP0; 24409 com->uscsi_cdb = cdb; 24410 if (cmd == SD_RELEASE) { 24411 cdb[0] = SCMD_RELEASE; 24412 } else { 24413 cdb[0] = SCMD_RESERVE; 24414 } 24415 24416 /* Send the command. */ 24417 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24418 SD_PATH_STANDARD); 24419 24420 /* 24421 * "break" a reservation that is held by another host, by issuing a 24422 * reset if priority reserve is desired, and we could not get the 24423 * device. 24424 */ 24425 if ((cmd == SD_PRIORITY_RESERVE) && 24426 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24427 /* 24428 * First try to reset the LUN. If we cannot, then try a target 24429 * reset, followed by a bus reset if the target reset fails. 24430 */ 24431 int reset_retval = 0; 24432 if (un->un_f_lun_reset_enabled == TRUE) { 24433 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24434 } 24435 if (reset_retval == 0) { 24436 /* The LUN reset either failed or was not issued */ 24437 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24438 } 24439 if ((reset_retval == 0) && 24440 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24441 rval = EIO; 24442 kmem_free(com, sizeof (*com)); 24443 return (rval); 24444 } 24445 24446 bzero(com, sizeof (struct uscsi_cmd)); 24447 com->uscsi_flags = USCSI_SILENT; 24448 com->uscsi_cdb = cdb; 24449 com->uscsi_cdblen = CDB_GROUP0; 24450 com->uscsi_timeout = 5; 24451 24452 /* 24453 * Reissue the last reserve command, this time without request 24454 * sense. Assume that it is just a regular reserve command. 24455 */ 24456 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24457 SD_PATH_STANDARD); 24458 } 24459 24460 /* Return an error if still getting a reservation conflict. */ 24461 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24462 rval = EACCES; 24463 } 24464 24465 kmem_free(com, sizeof (*com)); 24466 return (rval); 24467 } 24468 24469 24470 #define SD_NDUMP_RETRIES 12 24471 /* 24472 * System Crash Dump routine 24473 */ 24474 24475 static int 24476 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24477 { 24478 int instance; 24479 int partition; 24480 int i; 24481 int err; 24482 struct sd_lun *un; 24483 struct scsi_pkt *wr_pktp; 24484 struct buf *wr_bp; 24485 struct buf wr_buf; 24486 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24487 daddr_t tgt_blkno; /* rmw - blkno for target */ 24488 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24489 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24490 size_t io_start_offset; 24491 int doing_rmw = FALSE; 24492 int rval; 24493 ssize_t dma_resid; 24494 daddr_t oblkno; 24495 diskaddr_t nblks = 0; 24496 diskaddr_t start_block; 24497 24498 instance = SDUNIT(dev); 24499 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24500 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24501 return (ENXIO); 24502 } 24503 24504 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24505 24506 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24507 24508 partition = SDPART(dev); 24509 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24510 24511 /* Validate blocks to dump at against partition size. */ 24512 24513 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24514 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24515 24516 if ((blkno + nblk) > nblks) { 24517 SD_TRACE(SD_LOG_DUMP, un, 24518 "sddump: dump range larger than partition: " 24519 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24520 blkno, nblk, nblks); 24521 return (EINVAL); 24522 } 24523 24524 mutex_enter(&un->un_pm_mutex); 24525 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24526 struct scsi_pkt *start_pktp; 24527 24528 mutex_exit(&un->un_pm_mutex); 24529 24530 /* 24531 * use pm framework to power on HBA 1st 24532 */ 24533 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24534 24535 /* 24536 * Dump no long uses sdpower to power on a device, it's 24537 * in-line here so it can be done in polled mode. 24538 */ 24539 24540 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24541 24542 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24543 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24544 24545 if (start_pktp == NULL) { 24546 /* We were not given a SCSI packet, fail. */ 24547 return (EIO); 24548 } 24549 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24550 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24551 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24552 start_pktp->pkt_flags = FLAG_NOINTR; 24553 24554 mutex_enter(SD_MUTEX(un)); 24555 SD_FILL_SCSI1_LUN(un, start_pktp); 24556 mutex_exit(SD_MUTEX(un)); 24557 /* 24558 * Scsi_poll returns 0 (success) if the command completes and 24559 * the status block is STATUS_GOOD. 24560 */ 24561 if (sd_scsi_poll(un, start_pktp) != 0) { 24562 scsi_destroy_pkt(start_pktp); 24563 return (EIO); 24564 } 24565 scsi_destroy_pkt(start_pktp); 24566 (void) sd_ddi_pm_resume(un); 24567 } else { 24568 mutex_exit(&un->un_pm_mutex); 24569 } 24570 24571 mutex_enter(SD_MUTEX(un)); 24572 un->un_throttle = 0; 24573 24574 /* 24575 * The first time through, reset the specific target device. 24576 * However, when cpr calls sddump we know that sd is in a 24577 * a good state so no bus reset is required. 24578 * Clear sense data via Request Sense cmd. 24579 * In sddump we don't care about allow_bus_device_reset anymore 24580 */ 24581 24582 if ((un->un_state != SD_STATE_SUSPENDED) && 24583 (un->un_state != SD_STATE_DUMPING)) { 24584 24585 New_state(un, SD_STATE_DUMPING); 24586 24587 if (un->un_f_is_fibre == FALSE) { 24588 mutex_exit(SD_MUTEX(un)); 24589 /* 24590 * Attempt a bus reset for parallel scsi. 24591 * 24592 * Note: A bus reset is required because on some host 24593 * systems (i.e. E420R) a bus device reset is 24594 * insufficient to reset the state of the target. 24595 * 24596 * Note: Don't issue the reset for fibre-channel, 24597 * because this tends to hang the bus (loop) for 24598 * too long while everyone is logging out and in 24599 * and the deadman timer for dumping will fire 24600 * before the dump is complete. 24601 */ 24602 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24603 mutex_enter(SD_MUTEX(un)); 24604 Restore_state(un); 24605 mutex_exit(SD_MUTEX(un)); 24606 return (EIO); 24607 } 24608 24609 /* Delay to give the device some recovery time. */ 24610 drv_usecwait(10000); 24611 24612 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24613 SD_INFO(SD_LOG_DUMP, un, 24614 "sddump: sd_send_polled_RQS failed\n"); 24615 } 24616 mutex_enter(SD_MUTEX(un)); 24617 } 24618 } 24619 24620 /* 24621 * Convert the partition-relative block number to a 24622 * disk physical block number. 24623 */ 24624 blkno += start_block; 24625 24626 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24627 24628 24629 /* 24630 * Check if the device has a non-512 block size. 24631 */ 24632 wr_bp = NULL; 24633 if (NOT_DEVBSIZE(un)) { 24634 tgt_byte_offset = blkno * un->un_sys_blocksize; 24635 tgt_byte_count = nblk * un->un_sys_blocksize; 24636 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24637 (tgt_byte_count % un->un_tgt_blocksize)) { 24638 doing_rmw = TRUE; 24639 /* 24640 * Calculate the block number and number of block 24641 * in terms of the media block size. 24642 */ 24643 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24644 tgt_nblk = 24645 ((tgt_byte_offset + tgt_byte_count + 24646 (un->un_tgt_blocksize - 1)) / 24647 un->un_tgt_blocksize) - tgt_blkno; 24648 24649 /* 24650 * Invoke the routine which is going to do read part 24651 * of read-modify-write. 24652 * Note that this routine returns a pointer to 24653 * a valid bp in wr_bp. 24654 */ 24655 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24656 &wr_bp); 24657 if (err) { 24658 mutex_exit(SD_MUTEX(un)); 24659 return (err); 24660 } 24661 /* 24662 * Offset is being calculated as - 24663 * (original block # * system block size) - 24664 * (new block # * target block size) 24665 */ 24666 io_start_offset = 24667 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24668 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24669 24670 ASSERT((io_start_offset >= 0) && 24671 (io_start_offset < un->un_tgt_blocksize)); 24672 /* 24673 * Do the modify portion of read modify write. 24674 */ 24675 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24676 (size_t)nblk * un->un_sys_blocksize); 24677 } else { 24678 doing_rmw = FALSE; 24679 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24680 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24681 } 24682 24683 /* Convert blkno and nblk to target blocks */ 24684 blkno = tgt_blkno; 24685 nblk = tgt_nblk; 24686 } else { 24687 wr_bp = &wr_buf; 24688 bzero(wr_bp, sizeof (struct buf)); 24689 wr_bp->b_flags = B_BUSY; 24690 wr_bp->b_un.b_addr = addr; 24691 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24692 wr_bp->b_resid = 0; 24693 } 24694 24695 mutex_exit(SD_MUTEX(un)); 24696 24697 /* 24698 * Obtain a SCSI packet for the write command. 24699 * It should be safe to call the allocator here without 24700 * worrying about being locked for DVMA mapping because 24701 * the address we're passed is already a DVMA mapping 24702 * 24703 * We are also not going to worry about semaphore ownership 24704 * in the dump buffer. Dumping is single threaded at present. 24705 */ 24706 24707 wr_pktp = NULL; 24708 24709 dma_resid = wr_bp->b_bcount; 24710 oblkno = blkno; 24711 24712 while (dma_resid != 0) { 24713 24714 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24715 wr_bp->b_flags &= ~B_ERROR; 24716 24717 if (un->un_partial_dma_supported == 1) { 24718 blkno = oblkno + 24719 ((wr_bp->b_bcount - dma_resid) / 24720 un->un_tgt_blocksize); 24721 nblk = dma_resid / un->un_tgt_blocksize; 24722 24723 if (wr_pktp) { 24724 /* 24725 * Partial DMA transfers after initial transfer 24726 */ 24727 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24728 blkno, nblk); 24729 } else { 24730 /* Initial transfer */ 24731 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24732 un->un_pkt_flags, NULL_FUNC, NULL, 24733 blkno, nblk); 24734 } 24735 } else { 24736 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24737 0, NULL_FUNC, NULL, blkno, nblk); 24738 } 24739 24740 if (rval == 0) { 24741 /* We were given a SCSI packet, continue. */ 24742 break; 24743 } 24744 24745 if (i == 0) { 24746 if (wr_bp->b_flags & B_ERROR) { 24747 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24748 "no resources for dumping; " 24749 "error code: 0x%x, retrying", 24750 geterror(wr_bp)); 24751 } else { 24752 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24753 "no resources for dumping; retrying"); 24754 } 24755 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24756 if (wr_bp->b_flags & B_ERROR) { 24757 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24758 "no resources for dumping; error code: " 24759 "0x%x, retrying\n", geterror(wr_bp)); 24760 } 24761 } else { 24762 if (wr_bp->b_flags & B_ERROR) { 24763 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24764 "no resources for dumping; " 24765 "error code: 0x%x, retries failed, " 24766 "giving up.\n", geterror(wr_bp)); 24767 } else { 24768 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24769 "no resources for dumping; " 24770 "retries failed, giving up.\n"); 24771 } 24772 mutex_enter(SD_MUTEX(un)); 24773 Restore_state(un); 24774 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24775 mutex_exit(SD_MUTEX(un)); 24776 scsi_free_consistent_buf(wr_bp); 24777 } else { 24778 mutex_exit(SD_MUTEX(un)); 24779 } 24780 return (EIO); 24781 } 24782 drv_usecwait(10000); 24783 } 24784 24785 if (un->un_partial_dma_supported == 1) { 24786 /* 24787 * save the resid from PARTIAL_DMA 24788 */ 24789 dma_resid = wr_pktp->pkt_resid; 24790 if (dma_resid != 0) 24791 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24792 wr_pktp->pkt_resid = 0; 24793 } else { 24794 dma_resid = 0; 24795 } 24796 24797 /* SunBug 1222170 */ 24798 wr_pktp->pkt_flags = FLAG_NOINTR; 24799 24800 err = EIO; 24801 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24802 24803 /* 24804 * Scsi_poll returns 0 (success) if the command completes and 24805 * the status block is STATUS_GOOD. We should only check 24806 * errors if this condition is not true. Even then we should 24807 * send our own request sense packet only if we have a check 24808 * condition and auto request sense has not been performed by 24809 * the hba. 24810 */ 24811 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 24812 24813 if ((sd_scsi_poll(un, wr_pktp) == 0) && 24814 (wr_pktp->pkt_resid == 0)) { 24815 err = SD_SUCCESS; 24816 break; 24817 } 24818 24819 /* 24820 * Check CMD_DEV_GONE 1st, give up if device is gone. 24821 */ 24822 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 24823 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24824 "Error while dumping state...Device is gone\n"); 24825 break; 24826 } 24827 24828 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 24829 SD_INFO(SD_LOG_DUMP, un, 24830 "sddump: write failed with CHECK, try # %d\n", i); 24831 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 24832 (void) sd_send_polled_RQS(un); 24833 } 24834 24835 continue; 24836 } 24837 24838 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 24839 int reset_retval = 0; 24840 24841 SD_INFO(SD_LOG_DUMP, un, 24842 "sddump: write failed with BUSY, try # %d\n", i); 24843 24844 if (un->un_f_lun_reset_enabled == TRUE) { 24845 reset_retval = scsi_reset(SD_ADDRESS(un), 24846 RESET_LUN); 24847 } 24848 if (reset_retval == 0) { 24849 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24850 } 24851 (void) sd_send_polled_RQS(un); 24852 24853 } else { 24854 SD_INFO(SD_LOG_DUMP, un, 24855 "sddump: write failed with 0x%x, try # %d\n", 24856 SD_GET_PKT_STATUS(wr_pktp), i); 24857 mutex_enter(SD_MUTEX(un)); 24858 sd_reset_target(un, wr_pktp); 24859 mutex_exit(SD_MUTEX(un)); 24860 } 24861 24862 /* 24863 * If we are not getting anywhere with lun/target resets, 24864 * let's reset the bus. 24865 */ 24866 if (i == SD_NDUMP_RETRIES/2) { 24867 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24868 (void) sd_send_polled_RQS(un); 24869 } 24870 } 24871 } 24872 24873 scsi_destroy_pkt(wr_pktp); 24874 mutex_enter(SD_MUTEX(un)); 24875 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 24876 mutex_exit(SD_MUTEX(un)); 24877 scsi_free_consistent_buf(wr_bp); 24878 } else { 24879 mutex_exit(SD_MUTEX(un)); 24880 } 24881 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 24882 return (err); 24883 } 24884 24885 /* 24886 * Function: sd_scsi_poll() 24887 * 24888 * Description: This is a wrapper for the scsi_poll call. 24889 * 24890 * Arguments: sd_lun - The unit structure 24891 * scsi_pkt - The scsi packet being sent to the device. 24892 * 24893 * Return Code: 0 - Command completed successfully with good status 24894 * -1 - Command failed. This could indicate a check condition 24895 * or other status value requiring recovery action. 24896 * 24897 * NOTE: This code is only called off sddump(). 24898 */ 24899 24900 static int 24901 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 24902 { 24903 int status; 24904 24905 ASSERT(un != NULL); 24906 ASSERT(!mutex_owned(SD_MUTEX(un))); 24907 ASSERT(pktp != NULL); 24908 24909 status = SD_SUCCESS; 24910 24911 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 24912 pktp->pkt_flags |= un->un_tagflags; 24913 pktp->pkt_flags &= ~FLAG_NODISCON; 24914 } 24915 24916 status = sd_ddi_scsi_poll(pktp); 24917 /* 24918 * Scsi_poll returns 0 (success) if the command completes and the 24919 * status block is STATUS_GOOD. We should only check errors if this 24920 * condition is not true. Even then we should send our own request 24921 * sense packet only if we have a check condition and auto 24922 * request sense has not been performed by the hba. 24923 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 24924 */ 24925 if ((status != SD_SUCCESS) && 24926 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 24927 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 24928 (pktp->pkt_reason != CMD_DEV_GONE)) 24929 (void) sd_send_polled_RQS(un); 24930 24931 return (status); 24932 } 24933 24934 /* 24935 * Function: sd_send_polled_RQS() 24936 * 24937 * Description: This sends the request sense command to a device. 24938 * 24939 * Arguments: sd_lun - The unit structure 24940 * 24941 * Return Code: 0 - Command completed successfully with good status 24942 * -1 - Command failed. 24943 * 24944 */ 24945 24946 static int 24947 sd_send_polled_RQS(struct sd_lun *un) 24948 { 24949 int ret_val; 24950 struct scsi_pkt *rqs_pktp; 24951 struct buf *rqs_bp; 24952 24953 ASSERT(un != NULL); 24954 ASSERT(!mutex_owned(SD_MUTEX(un))); 24955 24956 ret_val = SD_SUCCESS; 24957 24958 rqs_pktp = un->un_rqs_pktp; 24959 rqs_bp = un->un_rqs_bp; 24960 24961 mutex_enter(SD_MUTEX(un)); 24962 24963 if (un->un_sense_isbusy) { 24964 ret_val = SD_FAILURE; 24965 mutex_exit(SD_MUTEX(un)); 24966 return (ret_val); 24967 } 24968 24969 /* 24970 * If the request sense buffer (and packet) is not in use, 24971 * let's set the un_sense_isbusy and send our packet 24972 */ 24973 un->un_sense_isbusy = 1; 24974 rqs_pktp->pkt_resid = 0; 24975 rqs_pktp->pkt_reason = 0; 24976 rqs_pktp->pkt_flags |= FLAG_NOINTR; 24977 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 24978 24979 mutex_exit(SD_MUTEX(un)); 24980 24981 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 24982 " 0x%p\n", rqs_bp->b_un.b_addr); 24983 24984 /* 24985 * Can't send this to sd_scsi_poll, we wrap ourselves around the 24986 * axle - it has a call into us! 24987 */ 24988 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 24989 SD_INFO(SD_LOG_COMMON, un, 24990 "sd_send_polled_RQS: RQS failed\n"); 24991 } 24992 24993 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 24994 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 24995 24996 mutex_enter(SD_MUTEX(un)); 24997 un->un_sense_isbusy = 0; 24998 mutex_exit(SD_MUTEX(un)); 24999 25000 return (ret_val); 25001 } 25002 25003 /* 25004 * Defines needed for localized version of the scsi_poll routine. 25005 */ 25006 #define CSEC 10000 /* usecs */ 25007 #define SEC_TO_CSEC (1000000/CSEC) 25008 25009 /* 25010 * Function: sd_ddi_scsi_poll() 25011 * 25012 * Description: Localized version of the scsi_poll routine. The purpose is to 25013 * send a scsi_pkt to a device as a polled command. This version 25014 * is to ensure more robust handling of transport errors. 25015 * Specifically this routine cures not ready, coming ready 25016 * transition for power up and reset of sonoma's. This can take 25017 * up to 45 seconds for power-on and 20 seconds for reset of a 25018 * sonoma lun. 25019 * 25020 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25021 * 25022 * Return Code: 0 - Command completed successfully with good status 25023 * -1 - Command failed. 25024 * 25025 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25026 * be fixed (removing this code), we need to determine how to handle the 25027 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25028 * 25029 * NOTE: This code is only called off sddump(). 25030 */ 25031 static int 25032 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25033 { 25034 int rval = -1; 25035 int savef; 25036 long savet; 25037 void (*savec)(); 25038 int timeout; 25039 int busy_count; 25040 int poll_delay; 25041 int rc; 25042 uint8_t *sensep; 25043 struct scsi_arq_status *arqstat; 25044 extern int do_polled_io; 25045 25046 ASSERT(pkt->pkt_scbp); 25047 25048 /* 25049 * save old flags.. 25050 */ 25051 savef = pkt->pkt_flags; 25052 savec = pkt->pkt_comp; 25053 savet = pkt->pkt_time; 25054 25055 pkt->pkt_flags |= FLAG_NOINTR; 25056 25057 /* 25058 * XXX there is nothing in the SCSA spec that states that we should not 25059 * do a callback for polled cmds; however, removing this will break sd 25060 * and probably other target drivers 25061 */ 25062 pkt->pkt_comp = NULL; 25063 25064 /* 25065 * we don't like a polled command without timeout. 25066 * 60 seconds seems long enough. 25067 */ 25068 if (pkt->pkt_time == 0) 25069 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25070 25071 /* 25072 * Send polled cmd. 25073 * 25074 * We do some error recovery for various errors. Tran_busy, 25075 * queue full, and non-dispatched commands are retried every 10 msec. 25076 * as they are typically transient failures. Busy status and Not 25077 * Ready are retried every second as this status takes a while to 25078 * change. 25079 */ 25080 timeout = pkt->pkt_time * SEC_TO_CSEC; 25081 25082 for (busy_count = 0; busy_count < timeout; busy_count++) { 25083 /* 25084 * Initialize pkt status variables. 25085 */ 25086 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25087 25088 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25089 if (rc != TRAN_BUSY) { 25090 /* Transport failed - give up. */ 25091 break; 25092 } else { 25093 /* Transport busy - try again. */ 25094 poll_delay = 1 * CSEC; /* 10 msec. */ 25095 } 25096 } else { 25097 /* 25098 * Transport accepted - check pkt status. 25099 */ 25100 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25101 if ((pkt->pkt_reason == CMD_CMPLT) && 25102 (rc == STATUS_CHECK) && 25103 (pkt->pkt_state & STATE_ARQ_DONE)) { 25104 arqstat = 25105 (struct scsi_arq_status *)(pkt->pkt_scbp); 25106 sensep = (uint8_t *)&arqstat->sts_sensedata; 25107 } else { 25108 sensep = NULL; 25109 } 25110 25111 if ((pkt->pkt_reason == CMD_CMPLT) && 25112 (rc == STATUS_GOOD)) { 25113 /* No error - we're done */ 25114 rval = 0; 25115 break; 25116 25117 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25118 /* Lost connection - give up */ 25119 break; 25120 25121 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25122 (pkt->pkt_state == 0)) { 25123 /* Pkt not dispatched - try again. */ 25124 poll_delay = 1 * CSEC; /* 10 msec. */ 25125 25126 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25127 (rc == STATUS_QFULL)) { 25128 /* Queue full - try again. */ 25129 poll_delay = 1 * CSEC; /* 10 msec. */ 25130 25131 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25132 (rc == STATUS_BUSY)) { 25133 /* Busy - try again. */ 25134 poll_delay = 100 * CSEC; /* 1 sec. */ 25135 busy_count += (SEC_TO_CSEC - 1); 25136 25137 } else if ((sensep != NULL) && 25138 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25139 /* 25140 * Unit Attention - try again. 25141 * Pretend it took 1 sec. 25142 * NOTE: 'continue' avoids poll_delay 25143 */ 25144 busy_count += (SEC_TO_CSEC - 1); 25145 continue; 25146 25147 } else if ((sensep != NULL) && 25148 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25149 (scsi_sense_asc(sensep) == 0x04) && 25150 (scsi_sense_ascq(sensep) == 0x01)) { 25151 /* 25152 * Not ready -> ready - try again. 25153 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25154 * ...same as STATUS_BUSY 25155 */ 25156 poll_delay = 100 * CSEC; /* 1 sec. */ 25157 busy_count += (SEC_TO_CSEC - 1); 25158 25159 } else { 25160 /* BAD status - give up. */ 25161 break; 25162 } 25163 } 25164 25165 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25166 !do_polled_io) { 25167 delay(drv_usectohz(poll_delay)); 25168 } else { 25169 /* we busy wait during cpr_dump or interrupt threads */ 25170 drv_usecwait(poll_delay); 25171 } 25172 } 25173 25174 pkt->pkt_flags = savef; 25175 pkt->pkt_comp = savec; 25176 pkt->pkt_time = savet; 25177 25178 /* return on error */ 25179 if (rval) 25180 return (rval); 25181 25182 /* 25183 * This is not a performance critical code path. 25184 * 25185 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25186 * issues associated with looking at DMA memory prior to 25187 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25188 */ 25189 scsi_sync_pkt(pkt); 25190 return (0); 25191 } 25192 25193 25194 25195 /* 25196 * Function: sd_persistent_reservation_in_read_keys 25197 * 25198 * Description: This routine is the driver entry point for handling CD-ROM 25199 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25200 * by sending the SCSI-3 PRIN commands to the device. 25201 * Processes the read keys command response by copying the 25202 * reservation key information into the user provided buffer. 25203 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25204 * 25205 * Arguments: un - Pointer to soft state struct for the target. 25206 * usrp - user provided pointer to multihost Persistent In Read 25207 * Keys structure (mhioc_inkeys_t) 25208 * flag - this argument is a pass through to ddi_copyxxx() 25209 * directly from the mode argument of ioctl(). 25210 * 25211 * Return Code: 0 - Success 25212 * EACCES 25213 * ENOTSUP 25214 * errno return code from sd_send_scsi_cmd() 25215 * 25216 * Context: Can sleep. Does not return until command is completed. 25217 */ 25218 25219 static int 25220 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25221 mhioc_inkeys_t *usrp, int flag) 25222 { 25223 #ifdef _MULTI_DATAMODEL 25224 struct mhioc_key_list32 li32; 25225 #endif 25226 sd_prin_readkeys_t *in; 25227 mhioc_inkeys_t *ptr; 25228 mhioc_key_list_t li; 25229 uchar_t *data_bufp; 25230 int data_len; 25231 int rval = 0; 25232 size_t copysz; 25233 sd_ssc_t *ssc; 25234 25235 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25236 return (EINVAL); 25237 } 25238 bzero(&li, sizeof (mhioc_key_list_t)); 25239 25240 ssc = sd_ssc_init(un); 25241 25242 /* 25243 * Get the listsize from user 25244 */ 25245 #ifdef _MULTI_DATAMODEL 25246 25247 switch (ddi_model_convert_from(flag & FMODELS)) { 25248 case DDI_MODEL_ILP32: 25249 copysz = sizeof (struct mhioc_key_list32); 25250 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25251 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25252 "sd_persistent_reservation_in_read_keys: " 25253 "failed ddi_copyin: mhioc_key_list32_t\n"); 25254 rval = EFAULT; 25255 goto done; 25256 } 25257 li.listsize = li32.listsize; 25258 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25259 break; 25260 25261 case DDI_MODEL_NONE: 25262 copysz = sizeof (mhioc_key_list_t); 25263 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25264 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25265 "sd_persistent_reservation_in_read_keys: " 25266 "failed ddi_copyin: mhioc_key_list_t\n"); 25267 rval = EFAULT; 25268 goto done; 25269 } 25270 break; 25271 } 25272 25273 #else /* ! _MULTI_DATAMODEL */ 25274 copysz = sizeof (mhioc_key_list_t); 25275 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25276 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25277 "sd_persistent_reservation_in_read_keys: " 25278 "failed ddi_copyin: mhioc_key_list_t\n"); 25279 rval = EFAULT; 25280 goto done; 25281 } 25282 #endif 25283 25284 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25285 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25286 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25287 25288 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25289 data_len, data_bufp); 25290 if (rval != 0) { 25291 if (rval == EIO) 25292 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25293 else 25294 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25295 goto done; 25296 } 25297 in = (sd_prin_readkeys_t *)data_bufp; 25298 ptr->generation = BE_32(in->generation); 25299 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25300 25301 /* 25302 * Return the min(listsize, listlen) keys 25303 */ 25304 #ifdef _MULTI_DATAMODEL 25305 25306 switch (ddi_model_convert_from(flag & FMODELS)) { 25307 case DDI_MODEL_ILP32: 25308 li32.listlen = li.listlen; 25309 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25310 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25311 "sd_persistent_reservation_in_read_keys: " 25312 "failed ddi_copyout: mhioc_key_list32_t\n"); 25313 rval = EFAULT; 25314 goto done; 25315 } 25316 break; 25317 25318 case DDI_MODEL_NONE: 25319 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25320 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25321 "sd_persistent_reservation_in_read_keys: " 25322 "failed ddi_copyout: mhioc_key_list_t\n"); 25323 rval = EFAULT; 25324 goto done; 25325 } 25326 break; 25327 } 25328 25329 #else /* ! _MULTI_DATAMODEL */ 25330 25331 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25332 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25333 "sd_persistent_reservation_in_read_keys: " 25334 "failed ddi_copyout: mhioc_key_list_t\n"); 25335 rval = EFAULT; 25336 goto done; 25337 } 25338 25339 #endif /* _MULTI_DATAMODEL */ 25340 25341 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25342 li.listsize * MHIOC_RESV_KEY_SIZE); 25343 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25344 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25345 "sd_persistent_reservation_in_read_keys: " 25346 "failed ddi_copyout: keylist\n"); 25347 rval = EFAULT; 25348 } 25349 done: 25350 sd_ssc_fini(ssc); 25351 kmem_free(data_bufp, data_len); 25352 return (rval); 25353 } 25354 25355 25356 /* 25357 * Function: sd_persistent_reservation_in_read_resv 25358 * 25359 * Description: This routine is the driver entry point for handling CD-ROM 25360 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25361 * by sending the SCSI-3 PRIN commands to the device. 25362 * Process the read persistent reservations command response by 25363 * copying the reservation information into the user provided 25364 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25365 * 25366 * Arguments: un - Pointer to soft state struct for the target. 25367 * usrp - user provided pointer to multihost Persistent In Read 25368 * Keys structure (mhioc_inkeys_t) 25369 * flag - this argument is a pass through to ddi_copyxxx() 25370 * directly from the mode argument of ioctl(). 25371 * 25372 * Return Code: 0 - Success 25373 * EACCES 25374 * ENOTSUP 25375 * errno return code from sd_send_scsi_cmd() 25376 * 25377 * Context: Can sleep. Does not return until command is completed. 25378 */ 25379 25380 static int 25381 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25382 mhioc_inresvs_t *usrp, int flag) 25383 { 25384 #ifdef _MULTI_DATAMODEL 25385 struct mhioc_resv_desc_list32 resvlist32; 25386 #endif 25387 sd_prin_readresv_t *in; 25388 mhioc_inresvs_t *ptr; 25389 sd_readresv_desc_t *readresv_ptr; 25390 mhioc_resv_desc_list_t resvlist; 25391 mhioc_resv_desc_t resvdesc; 25392 uchar_t *data_bufp = NULL; 25393 int data_len; 25394 int rval = 0; 25395 int i; 25396 size_t copysz; 25397 mhioc_resv_desc_t *bufp; 25398 sd_ssc_t *ssc; 25399 25400 if ((ptr = usrp) == NULL) { 25401 return (EINVAL); 25402 } 25403 25404 ssc = sd_ssc_init(un); 25405 25406 /* 25407 * Get the listsize from user 25408 */ 25409 #ifdef _MULTI_DATAMODEL 25410 switch (ddi_model_convert_from(flag & FMODELS)) { 25411 case DDI_MODEL_ILP32: 25412 copysz = sizeof (struct mhioc_resv_desc_list32); 25413 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25414 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25415 "sd_persistent_reservation_in_read_resv: " 25416 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25417 rval = EFAULT; 25418 goto done; 25419 } 25420 resvlist.listsize = resvlist32.listsize; 25421 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25422 break; 25423 25424 case DDI_MODEL_NONE: 25425 copysz = sizeof (mhioc_resv_desc_list_t); 25426 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25427 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25428 "sd_persistent_reservation_in_read_resv: " 25429 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25430 rval = EFAULT; 25431 goto done; 25432 } 25433 break; 25434 } 25435 #else /* ! _MULTI_DATAMODEL */ 25436 copysz = sizeof (mhioc_resv_desc_list_t); 25437 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25438 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25439 "sd_persistent_reservation_in_read_resv: " 25440 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25441 rval = EFAULT; 25442 goto done; 25443 } 25444 #endif /* ! _MULTI_DATAMODEL */ 25445 25446 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25447 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25448 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25449 25450 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25451 data_len, data_bufp); 25452 if (rval != 0) { 25453 if (rval == EIO) 25454 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25455 else 25456 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25457 goto done; 25458 } 25459 in = (sd_prin_readresv_t *)data_bufp; 25460 ptr->generation = BE_32(in->generation); 25461 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25462 25463 /* 25464 * Return the min(listsize, listlen( keys 25465 */ 25466 #ifdef _MULTI_DATAMODEL 25467 25468 switch (ddi_model_convert_from(flag & FMODELS)) { 25469 case DDI_MODEL_ILP32: 25470 resvlist32.listlen = resvlist.listlen; 25471 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25472 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25473 "sd_persistent_reservation_in_read_resv: " 25474 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25475 rval = EFAULT; 25476 goto done; 25477 } 25478 break; 25479 25480 case DDI_MODEL_NONE: 25481 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25482 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25483 "sd_persistent_reservation_in_read_resv: " 25484 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25485 rval = EFAULT; 25486 goto done; 25487 } 25488 break; 25489 } 25490 25491 #else /* ! _MULTI_DATAMODEL */ 25492 25493 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25494 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25495 "sd_persistent_reservation_in_read_resv: " 25496 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25497 rval = EFAULT; 25498 goto done; 25499 } 25500 25501 #endif /* ! _MULTI_DATAMODEL */ 25502 25503 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25504 bufp = resvlist.list; 25505 copysz = sizeof (mhioc_resv_desc_t); 25506 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25507 i++, readresv_ptr++, bufp++) { 25508 25509 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25510 MHIOC_RESV_KEY_SIZE); 25511 resvdesc.type = readresv_ptr->type; 25512 resvdesc.scope = readresv_ptr->scope; 25513 resvdesc.scope_specific_addr = 25514 BE_32(readresv_ptr->scope_specific_addr); 25515 25516 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25517 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25518 "sd_persistent_reservation_in_read_resv: " 25519 "failed ddi_copyout: resvlist\n"); 25520 rval = EFAULT; 25521 goto done; 25522 } 25523 } 25524 done: 25525 sd_ssc_fini(ssc); 25526 /* only if data_bufp is allocated, we need to free it */ 25527 if (data_bufp) { 25528 kmem_free(data_bufp, data_len); 25529 } 25530 return (rval); 25531 } 25532 25533 25534 /* 25535 * Function: sr_change_blkmode() 25536 * 25537 * Description: This routine is the driver entry point for handling CD-ROM 25538 * block mode ioctl requests. Support for returning and changing 25539 * the current block size in use by the device is implemented. The 25540 * LBA size is changed via a MODE SELECT Block Descriptor. 25541 * 25542 * This routine issues a mode sense with an allocation length of 25543 * 12 bytes for the mode page header and a single block descriptor. 25544 * 25545 * Arguments: dev - the device 'dev_t' 25546 * cmd - the request type; one of CDROMGBLKMODE (get) or 25547 * CDROMSBLKMODE (set) 25548 * data - current block size or requested block size 25549 * flag - this argument is a pass through to ddi_copyxxx() directly 25550 * from the mode argument of ioctl(). 25551 * 25552 * Return Code: the code returned by sd_send_scsi_cmd() 25553 * EINVAL if invalid arguments are provided 25554 * EFAULT if ddi_copyxxx() fails 25555 * ENXIO if fail ddi_get_soft_state 25556 * EIO if invalid mode sense block descriptor length 25557 * 25558 */ 25559 25560 static int 25561 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25562 { 25563 struct sd_lun *un = NULL; 25564 struct mode_header *sense_mhp, *select_mhp; 25565 struct block_descriptor *sense_desc, *select_desc; 25566 int current_bsize; 25567 int rval = EINVAL; 25568 uchar_t *sense = NULL; 25569 uchar_t *select = NULL; 25570 sd_ssc_t *ssc; 25571 25572 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25573 25574 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25575 return (ENXIO); 25576 } 25577 25578 /* 25579 * The block length is changed via the Mode Select block descriptor, the 25580 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25581 * required as part of this routine. Therefore the mode sense allocation 25582 * length is specified to be the length of a mode page header and a 25583 * block descriptor. 25584 */ 25585 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25586 25587 ssc = sd_ssc_init(un); 25588 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25589 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25590 sd_ssc_fini(ssc); 25591 if (rval != 0) { 25592 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25593 "sr_change_blkmode: Mode Sense Failed\n"); 25594 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25595 return (rval); 25596 } 25597 25598 /* Check the block descriptor len to handle only 1 block descriptor */ 25599 sense_mhp = (struct mode_header *)sense; 25600 if ((sense_mhp->bdesc_length == 0) || 25601 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25602 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25603 "sr_change_blkmode: Mode Sense returned invalid block" 25604 " descriptor length\n"); 25605 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25606 return (EIO); 25607 } 25608 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25609 current_bsize = ((sense_desc->blksize_hi << 16) | 25610 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25611 25612 /* Process command */ 25613 switch (cmd) { 25614 case CDROMGBLKMODE: 25615 /* Return the block size obtained during the mode sense */ 25616 if (ddi_copyout(¤t_bsize, (void *)data, 25617 sizeof (int), flag) != 0) 25618 rval = EFAULT; 25619 break; 25620 case CDROMSBLKMODE: 25621 /* Validate the requested block size */ 25622 switch (data) { 25623 case CDROM_BLK_512: 25624 case CDROM_BLK_1024: 25625 case CDROM_BLK_2048: 25626 case CDROM_BLK_2056: 25627 case CDROM_BLK_2336: 25628 case CDROM_BLK_2340: 25629 case CDROM_BLK_2352: 25630 case CDROM_BLK_2368: 25631 case CDROM_BLK_2448: 25632 case CDROM_BLK_2646: 25633 case CDROM_BLK_2647: 25634 break; 25635 default: 25636 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25637 "sr_change_blkmode: " 25638 "Block Size '%ld' Not Supported\n", data); 25639 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25640 return (EINVAL); 25641 } 25642 25643 /* 25644 * The current block size matches the requested block size so 25645 * there is no need to send the mode select to change the size 25646 */ 25647 if (current_bsize == data) { 25648 break; 25649 } 25650 25651 /* Build the select data for the requested block size */ 25652 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25653 select_mhp = (struct mode_header *)select; 25654 select_desc = 25655 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25656 /* 25657 * The LBA size is changed via the block descriptor, so the 25658 * descriptor is built according to the user data 25659 */ 25660 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25661 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25662 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25663 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25664 25665 /* Send the mode select for the requested block size */ 25666 ssc = sd_ssc_init(un); 25667 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25668 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25669 SD_PATH_STANDARD); 25670 sd_ssc_fini(ssc); 25671 if (rval != 0) { 25672 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25673 "sr_change_blkmode: Mode Select Failed\n"); 25674 /* 25675 * The mode select failed for the requested block size, 25676 * so reset the data for the original block size and 25677 * send it to the target. The error is indicated by the 25678 * return value for the failed mode select. 25679 */ 25680 select_desc->blksize_hi = sense_desc->blksize_hi; 25681 select_desc->blksize_mid = sense_desc->blksize_mid; 25682 select_desc->blksize_lo = sense_desc->blksize_lo; 25683 ssc = sd_ssc_init(un); 25684 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25685 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25686 SD_PATH_STANDARD); 25687 sd_ssc_fini(ssc); 25688 } else { 25689 ASSERT(!mutex_owned(SD_MUTEX(un))); 25690 mutex_enter(SD_MUTEX(un)); 25691 sd_update_block_info(un, (uint32_t)data, 0); 25692 mutex_exit(SD_MUTEX(un)); 25693 } 25694 break; 25695 default: 25696 /* should not reach here, but check anyway */ 25697 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25698 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25699 rval = EINVAL; 25700 break; 25701 } 25702 25703 if (select) { 25704 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25705 } 25706 if (sense) { 25707 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25708 } 25709 return (rval); 25710 } 25711 25712 25713 /* 25714 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25715 * implement driver support for getting and setting the CD speed. The command 25716 * set used will be based on the device type. If the device has not been 25717 * identified as MMC the Toshiba vendor specific mode page will be used. If 25718 * the device is MMC but does not support the Real Time Streaming feature 25719 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25720 * be used to read the speed. 25721 */ 25722 25723 /* 25724 * Function: sr_change_speed() 25725 * 25726 * Description: This routine is the driver entry point for handling CD-ROM 25727 * drive speed ioctl requests for devices supporting the Toshiba 25728 * vendor specific drive speed mode page. Support for returning 25729 * and changing the current drive speed in use by the device is 25730 * implemented. 25731 * 25732 * Arguments: dev - the device 'dev_t' 25733 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25734 * CDROMSDRVSPEED (set) 25735 * data - current drive speed or requested drive speed 25736 * flag - this argument is a pass through to ddi_copyxxx() directly 25737 * from the mode argument of ioctl(). 25738 * 25739 * Return Code: the code returned by sd_send_scsi_cmd() 25740 * EINVAL if invalid arguments are provided 25741 * EFAULT if ddi_copyxxx() fails 25742 * ENXIO if fail ddi_get_soft_state 25743 * EIO if invalid mode sense block descriptor length 25744 */ 25745 25746 static int 25747 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25748 { 25749 struct sd_lun *un = NULL; 25750 struct mode_header *sense_mhp, *select_mhp; 25751 struct mode_speed *sense_page, *select_page; 25752 int current_speed; 25753 int rval = EINVAL; 25754 int bd_len; 25755 uchar_t *sense = NULL; 25756 uchar_t *select = NULL; 25757 sd_ssc_t *ssc; 25758 25759 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25760 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25761 return (ENXIO); 25762 } 25763 25764 /* 25765 * Note: The drive speed is being modified here according to a Toshiba 25766 * vendor specific mode page (0x31). 25767 */ 25768 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25769 25770 ssc = sd_ssc_init(un); 25771 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25772 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25773 SD_PATH_STANDARD); 25774 sd_ssc_fini(ssc); 25775 if (rval != 0) { 25776 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25777 "sr_change_speed: Mode Sense Failed\n"); 25778 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25779 return (rval); 25780 } 25781 sense_mhp = (struct mode_header *)sense; 25782 25783 /* Check the block descriptor len to handle only 1 block descriptor */ 25784 bd_len = sense_mhp->bdesc_length; 25785 if (bd_len > MODE_BLK_DESC_LENGTH) { 25786 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25787 "sr_change_speed: Mode Sense returned invalid block " 25788 "descriptor length\n"); 25789 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25790 return (EIO); 25791 } 25792 25793 sense_page = (struct mode_speed *) 25794 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25795 current_speed = sense_page->speed; 25796 25797 /* Process command */ 25798 switch (cmd) { 25799 case CDROMGDRVSPEED: 25800 /* Return the drive speed obtained during the mode sense */ 25801 if (current_speed == 0x2) { 25802 current_speed = CDROM_TWELVE_SPEED; 25803 } 25804 if (ddi_copyout(¤t_speed, (void *)data, 25805 sizeof (int), flag) != 0) { 25806 rval = EFAULT; 25807 } 25808 break; 25809 case CDROMSDRVSPEED: 25810 /* Validate the requested drive speed */ 25811 switch ((uchar_t)data) { 25812 case CDROM_TWELVE_SPEED: 25813 data = 0x2; 25814 /*FALLTHROUGH*/ 25815 case CDROM_NORMAL_SPEED: 25816 case CDROM_DOUBLE_SPEED: 25817 case CDROM_QUAD_SPEED: 25818 case CDROM_MAXIMUM_SPEED: 25819 break; 25820 default: 25821 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25822 "sr_change_speed: " 25823 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 25824 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25825 return (EINVAL); 25826 } 25827 25828 /* 25829 * The current drive speed matches the requested drive speed so 25830 * there is no need to send the mode select to change the speed 25831 */ 25832 if (current_speed == data) { 25833 break; 25834 } 25835 25836 /* Build the select data for the requested drive speed */ 25837 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25838 select_mhp = (struct mode_header *)select; 25839 select_mhp->bdesc_length = 0; 25840 select_page = 25841 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25842 select_page = 25843 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25844 select_page->mode_page.code = CDROM_MODE_SPEED; 25845 select_page->mode_page.length = 2; 25846 select_page->speed = (uchar_t)data; 25847 25848 /* Send the mode select for the requested block size */ 25849 ssc = sd_ssc_init(un); 25850 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25851 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25852 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25853 sd_ssc_fini(ssc); 25854 if (rval != 0) { 25855 /* 25856 * The mode select failed for the requested drive speed, 25857 * so reset the data for the original drive speed and 25858 * send it to the target. The error is indicated by the 25859 * return value for the failed mode select. 25860 */ 25861 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25862 "sr_drive_speed: Mode Select Failed\n"); 25863 select_page->speed = sense_page->speed; 25864 ssc = sd_ssc_init(un); 25865 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25866 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25867 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25868 sd_ssc_fini(ssc); 25869 } 25870 break; 25871 default: 25872 /* should not reach here, but check anyway */ 25873 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25874 "sr_change_speed: Command '%x' Not Supported\n", cmd); 25875 rval = EINVAL; 25876 break; 25877 } 25878 25879 if (select) { 25880 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 25881 } 25882 if (sense) { 25883 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25884 } 25885 25886 return (rval); 25887 } 25888 25889 25890 /* 25891 * Function: sr_atapi_change_speed() 25892 * 25893 * Description: This routine is the driver entry point for handling CD-ROM 25894 * drive speed ioctl requests for MMC devices that do not support 25895 * the Real Time Streaming feature (0x107). 25896 * 25897 * Note: This routine will use the SET SPEED command which may not 25898 * be supported by all devices. 25899 * 25900 * Arguments: dev- the device 'dev_t' 25901 * cmd- the request type; one of CDROMGDRVSPEED (get) or 25902 * CDROMSDRVSPEED (set) 25903 * data- current drive speed or requested drive speed 25904 * flag- this argument is a pass through to ddi_copyxxx() directly 25905 * from the mode argument of ioctl(). 25906 * 25907 * Return Code: the code returned by sd_send_scsi_cmd() 25908 * EINVAL if invalid arguments are provided 25909 * EFAULT if ddi_copyxxx() fails 25910 * ENXIO if fail ddi_get_soft_state 25911 * EIO if invalid mode sense block descriptor length 25912 */ 25913 25914 static int 25915 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25916 { 25917 struct sd_lun *un; 25918 struct uscsi_cmd *com = NULL; 25919 struct mode_header_grp2 *sense_mhp; 25920 uchar_t *sense_page; 25921 uchar_t *sense = NULL; 25922 char cdb[CDB_GROUP5]; 25923 int bd_len; 25924 int current_speed = 0; 25925 int max_speed = 0; 25926 int rval; 25927 sd_ssc_t *ssc; 25928 25929 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25930 25931 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25932 return (ENXIO); 25933 } 25934 25935 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 25936 25937 ssc = sd_ssc_init(un); 25938 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 25939 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 25940 SD_PATH_STANDARD); 25941 sd_ssc_fini(ssc); 25942 if (rval != 0) { 25943 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25944 "sr_atapi_change_speed: Mode Sense Failed\n"); 25945 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25946 return (rval); 25947 } 25948 25949 /* Check the block descriptor len to handle only 1 block descriptor */ 25950 sense_mhp = (struct mode_header_grp2 *)sense; 25951 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 25952 if (bd_len > MODE_BLK_DESC_LENGTH) { 25953 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25954 "sr_atapi_change_speed: Mode Sense returned invalid " 25955 "block descriptor length\n"); 25956 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25957 return (EIO); 25958 } 25959 25960 /* Calculate the current and maximum drive speeds */ 25961 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25962 current_speed = (sense_page[14] << 8) | sense_page[15]; 25963 max_speed = (sense_page[8] << 8) | sense_page[9]; 25964 25965 /* Process the command */ 25966 switch (cmd) { 25967 case CDROMGDRVSPEED: 25968 current_speed /= SD_SPEED_1X; 25969 if (ddi_copyout(¤t_speed, (void *)data, 25970 sizeof (int), flag) != 0) 25971 rval = EFAULT; 25972 break; 25973 case CDROMSDRVSPEED: 25974 /* Convert the speed code to KB/sec */ 25975 switch ((uchar_t)data) { 25976 case CDROM_NORMAL_SPEED: 25977 current_speed = SD_SPEED_1X; 25978 break; 25979 case CDROM_DOUBLE_SPEED: 25980 current_speed = 2 * SD_SPEED_1X; 25981 break; 25982 case CDROM_QUAD_SPEED: 25983 current_speed = 4 * SD_SPEED_1X; 25984 break; 25985 case CDROM_TWELVE_SPEED: 25986 current_speed = 12 * SD_SPEED_1X; 25987 break; 25988 case CDROM_MAXIMUM_SPEED: 25989 current_speed = 0xffff; 25990 break; 25991 default: 25992 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25993 "sr_atapi_change_speed: invalid drive speed %d\n", 25994 (uchar_t)data); 25995 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25996 return (EINVAL); 25997 } 25998 25999 /* Check the request against the drive's max speed. */ 26000 if (current_speed != 0xffff) { 26001 if (current_speed > max_speed) { 26002 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26003 return (EINVAL); 26004 } 26005 } 26006 26007 /* 26008 * Build and send the SET SPEED command 26009 * 26010 * Note: The SET SPEED (0xBB) command used in this routine is 26011 * obsolete per the SCSI MMC spec but still supported in the 26012 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26013 * therefore the command is still implemented in this routine. 26014 */ 26015 bzero(cdb, sizeof (cdb)); 26016 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26017 cdb[2] = (uchar_t)(current_speed >> 8); 26018 cdb[3] = (uchar_t)current_speed; 26019 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26020 com->uscsi_cdb = (caddr_t)cdb; 26021 com->uscsi_cdblen = CDB_GROUP5; 26022 com->uscsi_bufaddr = NULL; 26023 com->uscsi_buflen = 0; 26024 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26025 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26026 break; 26027 default: 26028 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26029 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26030 rval = EINVAL; 26031 } 26032 26033 if (sense) { 26034 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26035 } 26036 if (com) { 26037 kmem_free(com, sizeof (*com)); 26038 } 26039 return (rval); 26040 } 26041 26042 26043 /* 26044 * Function: sr_pause_resume() 26045 * 26046 * Description: This routine is the driver entry point for handling CD-ROM 26047 * pause/resume ioctl requests. This only affects the audio play 26048 * operation. 26049 * 26050 * Arguments: dev - the device 'dev_t' 26051 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26052 * for setting the resume bit of the cdb. 26053 * 26054 * Return Code: the code returned by sd_send_scsi_cmd() 26055 * EINVAL if invalid mode specified 26056 * 26057 */ 26058 26059 static int 26060 sr_pause_resume(dev_t dev, int cmd) 26061 { 26062 struct sd_lun *un; 26063 struct uscsi_cmd *com; 26064 char cdb[CDB_GROUP1]; 26065 int rval; 26066 26067 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26068 return (ENXIO); 26069 } 26070 26071 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26072 bzero(cdb, CDB_GROUP1); 26073 cdb[0] = SCMD_PAUSE_RESUME; 26074 switch (cmd) { 26075 case CDROMRESUME: 26076 cdb[8] = 1; 26077 break; 26078 case CDROMPAUSE: 26079 cdb[8] = 0; 26080 break; 26081 default: 26082 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26083 " Command '%x' Not Supported\n", cmd); 26084 rval = EINVAL; 26085 goto done; 26086 } 26087 26088 com->uscsi_cdb = cdb; 26089 com->uscsi_cdblen = CDB_GROUP1; 26090 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26091 26092 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26093 SD_PATH_STANDARD); 26094 26095 done: 26096 kmem_free(com, sizeof (*com)); 26097 return (rval); 26098 } 26099 26100 26101 /* 26102 * Function: sr_play_msf() 26103 * 26104 * Description: This routine is the driver entry point for handling CD-ROM 26105 * ioctl requests to output the audio signals at the specified 26106 * starting address and continue the audio play until the specified 26107 * ending address (CDROMPLAYMSF) The address is in Minute Second 26108 * Frame (MSF) format. 26109 * 26110 * Arguments: dev - the device 'dev_t' 26111 * data - pointer to user provided audio msf structure, 26112 * specifying start/end addresses. 26113 * flag - this argument is a pass through to ddi_copyxxx() 26114 * directly from the mode argument of ioctl(). 26115 * 26116 * Return Code: the code returned by sd_send_scsi_cmd() 26117 * EFAULT if ddi_copyxxx() fails 26118 * ENXIO if fail ddi_get_soft_state 26119 * EINVAL if data pointer is NULL 26120 */ 26121 26122 static int 26123 sr_play_msf(dev_t dev, caddr_t data, int flag) 26124 { 26125 struct sd_lun *un; 26126 struct uscsi_cmd *com; 26127 struct cdrom_msf msf_struct; 26128 struct cdrom_msf *msf = &msf_struct; 26129 char cdb[CDB_GROUP1]; 26130 int rval; 26131 26132 if (data == NULL) { 26133 return (EINVAL); 26134 } 26135 26136 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26137 return (ENXIO); 26138 } 26139 26140 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26141 return (EFAULT); 26142 } 26143 26144 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26145 bzero(cdb, CDB_GROUP1); 26146 cdb[0] = SCMD_PLAYAUDIO_MSF; 26147 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26148 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26149 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26150 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26151 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26152 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26153 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26154 } else { 26155 cdb[3] = msf->cdmsf_min0; 26156 cdb[4] = msf->cdmsf_sec0; 26157 cdb[5] = msf->cdmsf_frame0; 26158 cdb[6] = msf->cdmsf_min1; 26159 cdb[7] = msf->cdmsf_sec1; 26160 cdb[8] = msf->cdmsf_frame1; 26161 } 26162 com->uscsi_cdb = cdb; 26163 com->uscsi_cdblen = CDB_GROUP1; 26164 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26165 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26166 SD_PATH_STANDARD); 26167 kmem_free(com, sizeof (*com)); 26168 return (rval); 26169 } 26170 26171 26172 /* 26173 * Function: sr_play_trkind() 26174 * 26175 * Description: This routine is the driver entry point for handling CD-ROM 26176 * ioctl requests to output the audio signals at the specified 26177 * starting address and continue the audio play until the specified 26178 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26179 * format. 26180 * 26181 * Arguments: dev - the device 'dev_t' 26182 * data - pointer to user provided audio track/index structure, 26183 * specifying start/end addresses. 26184 * flag - this argument is a pass through to ddi_copyxxx() 26185 * directly from the mode argument of ioctl(). 26186 * 26187 * Return Code: the code returned by sd_send_scsi_cmd() 26188 * EFAULT if ddi_copyxxx() fails 26189 * ENXIO if fail ddi_get_soft_state 26190 * EINVAL if data pointer is NULL 26191 */ 26192 26193 static int 26194 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26195 { 26196 struct cdrom_ti ti_struct; 26197 struct cdrom_ti *ti = &ti_struct; 26198 struct uscsi_cmd *com = NULL; 26199 char cdb[CDB_GROUP1]; 26200 int rval; 26201 26202 if (data == NULL) { 26203 return (EINVAL); 26204 } 26205 26206 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26207 return (EFAULT); 26208 } 26209 26210 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26211 bzero(cdb, CDB_GROUP1); 26212 cdb[0] = SCMD_PLAYAUDIO_TI; 26213 cdb[4] = ti->cdti_trk0; 26214 cdb[5] = ti->cdti_ind0; 26215 cdb[7] = ti->cdti_trk1; 26216 cdb[8] = ti->cdti_ind1; 26217 com->uscsi_cdb = cdb; 26218 com->uscsi_cdblen = CDB_GROUP1; 26219 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26220 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26221 SD_PATH_STANDARD); 26222 kmem_free(com, sizeof (*com)); 26223 return (rval); 26224 } 26225 26226 26227 /* 26228 * Function: sr_read_all_subcodes() 26229 * 26230 * Description: This routine is the driver entry point for handling CD-ROM 26231 * ioctl requests to return raw subcode data while the target is 26232 * playing audio (CDROMSUBCODE). 26233 * 26234 * Arguments: dev - the device 'dev_t' 26235 * data - pointer to user provided cdrom subcode structure, 26236 * specifying the transfer length and address. 26237 * flag - this argument is a pass through to ddi_copyxxx() 26238 * directly from the mode argument of ioctl(). 26239 * 26240 * Return Code: the code returned by sd_send_scsi_cmd() 26241 * EFAULT if ddi_copyxxx() fails 26242 * ENXIO if fail ddi_get_soft_state 26243 * EINVAL if data pointer is NULL 26244 */ 26245 26246 static int 26247 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26248 { 26249 struct sd_lun *un = NULL; 26250 struct uscsi_cmd *com = NULL; 26251 struct cdrom_subcode *subcode = NULL; 26252 int rval; 26253 size_t buflen; 26254 char cdb[CDB_GROUP5]; 26255 26256 #ifdef _MULTI_DATAMODEL 26257 /* To support ILP32 applications in an LP64 world */ 26258 struct cdrom_subcode32 cdrom_subcode32; 26259 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26260 #endif 26261 if (data == NULL) { 26262 return (EINVAL); 26263 } 26264 26265 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26266 return (ENXIO); 26267 } 26268 26269 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26270 26271 #ifdef _MULTI_DATAMODEL 26272 switch (ddi_model_convert_from(flag & FMODELS)) { 26273 case DDI_MODEL_ILP32: 26274 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26275 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26276 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26277 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26278 return (EFAULT); 26279 } 26280 /* Convert the ILP32 uscsi data from the application to LP64 */ 26281 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26282 break; 26283 case DDI_MODEL_NONE: 26284 if (ddi_copyin(data, subcode, 26285 sizeof (struct cdrom_subcode), flag)) { 26286 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26287 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26288 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26289 return (EFAULT); 26290 } 26291 break; 26292 } 26293 #else /* ! _MULTI_DATAMODEL */ 26294 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26295 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26296 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26297 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26298 return (EFAULT); 26299 } 26300 #endif /* _MULTI_DATAMODEL */ 26301 26302 /* 26303 * Since MMC-2 expects max 3 bytes for length, check if the 26304 * length input is greater than 3 bytes 26305 */ 26306 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26307 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26308 "sr_read_all_subcodes: " 26309 "cdrom transfer length too large: %d (limit %d)\n", 26310 subcode->cdsc_length, 0xFFFFFF); 26311 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26312 return (EINVAL); 26313 } 26314 26315 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26316 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26317 bzero(cdb, CDB_GROUP5); 26318 26319 if (un->un_f_mmc_cap == TRUE) { 26320 cdb[0] = (char)SCMD_READ_CD; 26321 cdb[2] = (char)0xff; 26322 cdb[3] = (char)0xff; 26323 cdb[4] = (char)0xff; 26324 cdb[5] = (char)0xff; 26325 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26326 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26327 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26328 cdb[10] = 1; 26329 } else { 26330 /* 26331 * Note: A vendor specific command (0xDF) is being used her to 26332 * request a read of all subcodes. 26333 */ 26334 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26335 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26336 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26337 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26338 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26339 } 26340 com->uscsi_cdb = cdb; 26341 com->uscsi_cdblen = CDB_GROUP5; 26342 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26343 com->uscsi_buflen = buflen; 26344 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26345 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26346 SD_PATH_STANDARD); 26347 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26348 kmem_free(com, sizeof (*com)); 26349 return (rval); 26350 } 26351 26352 26353 /* 26354 * Function: sr_read_subchannel() 26355 * 26356 * Description: This routine is the driver entry point for handling CD-ROM 26357 * ioctl requests to return the Q sub-channel data of the CD 26358 * current position block. (CDROMSUBCHNL) The data includes the 26359 * track number, index number, absolute CD-ROM address (LBA or MSF 26360 * format per the user) , track relative CD-ROM address (LBA or MSF 26361 * format per the user), control data and audio status. 26362 * 26363 * Arguments: dev - the device 'dev_t' 26364 * data - pointer to user provided cdrom sub-channel structure 26365 * flag - this argument is a pass through to ddi_copyxxx() 26366 * directly from the mode argument of ioctl(). 26367 * 26368 * Return Code: the code returned by sd_send_scsi_cmd() 26369 * EFAULT if ddi_copyxxx() fails 26370 * ENXIO if fail ddi_get_soft_state 26371 * EINVAL if data pointer is NULL 26372 */ 26373 26374 static int 26375 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26376 { 26377 struct sd_lun *un; 26378 struct uscsi_cmd *com; 26379 struct cdrom_subchnl subchanel; 26380 struct cdrom_subchnl *subchnl = &subchanel; 26381 char cdb[CDB_GROUP1]; 26382 caddr_t buffer; 26383 int rval; 26384 26385 if (data == NULL) { 26386 return (EINVAL); 26387 } 26388 26389 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26390 (un->un_state == SD_STATE_OFFLINE)) { 26391 return (ENXIO); 26392 } 26393 26394 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26395 return (EFAULT); 26396 } 26397 26398 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26399 bzero(cdb, CDB_GROUP1); 26400 cdb[0] = SCMD_READ_SUBCHANNEL; 26401 /* Set the MSF bit based on the user requested address format */ 26402 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26403 /* 26404 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26405 * returned 26406 */ 26407 cdb[2] = 0x40; 26408 /* 26409 * Set byte 3 to specify the return data format. A value of 0x01 26410 * indicates that the CD-ROM current position should be returned. 26411 */ 26412 cdb[3] = 0x01; 26413 cdb[8] = 0x10; 26414 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26415 com->uscsi_cdb = cdb; 26416 com->uscsi_cdblen = CDB_GROUP1; 26417 com->uscsi_bufaddr = buffer; 26418 com->uscsi_buflen = 16; 26419 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26420 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26421 SD_PATH_STANDARD); 26422 if (rval != 0) { 26423 kmem_free(buffer, 16); 26424 kmem_free(com, sizeof (*com)); 26425 return (rval); 26426 } 26427 26428 /* Process the returned Q sub-channel data */ 26429 subchnl->cdsc_audiostatus = buffer[1]; 26430 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26431 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26432 subchnl->cdsc_trk = buffer[6]; 26433 subchnl->cdsc_ind = buffer[7]; 26434 if (subchnl->cdsc_format & CDROM_LBA) { 26435 subchnl->cdsc_absaddr.lba = 26436 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26437 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26438 subchnl->cdsc_reladdr.lba = 26439 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26440 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26441 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26442 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26443 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26444 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26445 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26446 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26447 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26448 } else { 26449 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26450 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26451 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26452 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26453 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26454 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26455 } 26456 kmem_free(buffer, 16); 26457 kmem_free(com, sizeof (*com)); 26458 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26459 != 0) { 26460 return (EFAULT); 26461 } 26462 return (rval); 26463 } 26464 26465 26466 /* 26467 * Function: sr_read_tocentry() 26468 * 26469 * Description: This routine is the driver entry point for handling CD-ROM 26470 * ioctl requests to read from the Table of Contents (TOC) 26471 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26472 * fields, the starting address (LBA or MSF format per the user) 26473 * and the data mode if the user specified track is a data track. 26474 * 26475 * Note: The READ HEADER (0x44) command used in this routine is 26476 * obsolete per the SCSI MMC spec but still supported in the 26477 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26478 * therefore the command is still implemented in this routine. 26479 * 26480 * Arguments: dev - the device 'dev_t' 26481 * data - pointer to user provided toc entry structure, 26482 * specifying the track # and the address format 26483 * (LBA or MSF). 26484 * flag - this argument is a pass through to ddi_copyxxx() 26485 * directly from the mode argument of ioctl(). 26486 * 26487 * Return Code: the code returned by sd_send_scsi_cmd() 26488 * EFAULT if ddi_copyxxx() fails 26489 * ENXIO if fail ddi_get_soft_state 26490 * EINVAL if data pointer is NULL 26491 */ 26492 26493 static int 26494 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26495 { 26496 struct sd_lun *un = NULL; 26497 struct uscsi_cmd *com; 26498 struct cdrom_tocentry toc_entry; 26499 struct cdrom_tocentry *entry = &toc_entry; 26500 caddr_t buffer; 26501 int rval; 26502 char cdb[CDB_GROUP1]; 26503 26504 if (data == NULL) { 26505 return (EINVAL); 26506 } 26507 26508 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26509 (un->un_state == SD_STATE_OFFLINE)) { 26510 return (ENXIO); 26511 } 26512 26513 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26514 return (EFAULT); 26515 } 26516 26517 /* Validate the requested track and address format */ 26518 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26519 return (EINVAL); 26520 } 26521 26522 if (entry->cdte_track == 0) { 26523 return (EINVAL); 26524 } 26525 26526 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26527 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26528 bzero(cdb, CDB_GROUP1); 26529 26530 cdb[0] = SCMD_READ_TOC; 26531 /* Set the MSF bit based on the user requested address format */ 26532 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26533 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26534 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26535 } else { 26536 cdb[6] = entry->cdte_track; 26537 } 26538 26539 /* 26540 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26541 * (4 byte TOC response header + 8 byte track descriptor) 26542 */ 26543 cdb[8] = 12; 26544 com->uscsi_cdb = cdb; 26545 com->uscsi_cdblen = CDB_GROUP1; 26546 com->uscsi_bufaddr = buffer; 26547 com->uscsi_buflen = 0x0C; 26548 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26549 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26550 SD_PATH_STANDARD); 26551 if (rval != 0) { 26552 kmem_free(buffer, 12); 26553 kmem_free(com, sizeof (*com)); 26554 return (rval); 26555 } 26556 26557 /* Process the toc entry */ 26558 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26559 entry->cdte_ctrl = (buffer[5] & 0x0F); 26560 if (entry->cdte_format & CDROM_LBA) { 26561 entry->cdte_addr.lba = 26562 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26563 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26564 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26565 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26566 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26567 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26568 /* 26569 * Send a READ TOC command using the LBA address format to get 26570 * the LBA for the track requested so it can be used in the 26571 * READ HEADER request 26572 * 26573 * Note: The MSF bit of the READ HEADER command specifies the 26574 * output format. The block address specified in that command 26575 * must be in LBA format. 26576 */ 26577 cdb[1] = 0; 26578 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26579 SD_PATH_STANDARD); 26580 if (rval != 0) { 26581 kmem_free(buffer, 12); 26582 kmem_free(com, sizeof (*com)); 26583 return (rval); 26584 } 26585 } else { 26586 entry->cdte_addr.msf.minute = buffer[9]; 26587 entry->cdte_addr.msf.second = buffer[10]; 26588 entry->cdte_addr.msf.frame = buffer[11]; 26589 /* 26590 * Send a READ TOC command using the LBA address format to get 26591 * the LBA for the track requested so it can be used in the 26592 * READ HEADER request 26593 * 26594 * Note: The MSF bit of the READ HEADER command specifies the 26595 * output format. The block address specified in that command 26596 * must be in LBA format. 26597 */ 26598 cdb[1] = 0; 26599 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26600 SD_PATH_STANDARD); 26601 if (rval != 0) { 26602 kmem_free(buffer, 12); 26603 kmem_free(com, sizeof (*com)); 26604 return (rval); 26605 } 26606 } 26607 26608 /* 26609 * Build and send the READ HEADER command to determine the data mode of 26610 * the user specified track. 26611 */ 26612 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26613 (entry->cdte_track != CDROM_LEADOUT)) { 26614 bzero(cdb, CDB_GROUP1); 26615 cdb[0] = SCMD_READ_HEADER; 26616 cdb[2] = buffer[8]; 26617 cdb[3] = buffer[9]; 26618 cdb[4] = buffer[10]; 26619 cdb[5] = buffer[11]; 26620 cdb[8] = 0x08; 26621 com->uscsi_buflen = 0x08; 26622 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26623 SD_PATH_STANDARD); 26624 if (rval == 0) { 26625 entry->cdte_datamode = buffer[0]; 26626 } else { 26627 /* 26628 * READ HEADER command failed, since this is 26629 * obsoleted in one spec, its better to return 26630 * -1 for an invlid track so that we can still 26631 * receive the rest of the TOC data. 26632 */ 26633 entry->cdte_datamode = (uchar_t)-1; 26634 } 26635 } else { 26636 entry->cdte_datamode = (uchar_t)-1; 26637 } 26638 26639 kmem_free(buffer, 12); 26640 kmem_free(com, sizeof (*com)); 26641 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26642 return (EFAULT); 26643 26644 return (rval); 26645 } 26646 26647 26648 /* 26649 * Function: sr_read_tochdr() 26650 * 26651 * Description: This routine is the driver entry point for handling CD-ROM 26652 * ioctl requests to read the Table of Contents (TOC) header 26653 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26654 * and ending track numbers 26655 * 26656 * Arguments: dev - the device 'dev_t' 26657 * data - pointer to user provided toc header structure, 26658 * specifying the starting and ending track numbers. 26659 * flag - this argument is a pass through to ddi_copyxxx() 26660 * directly from the mode argument of ioctl(). 26661 * 26662 * Return Code: the code returned by sd_send_scsi_cmd() 26663 * EFAULT if ddi_copyxxx() fails 26664 * ENXIO if fail ddi_get_soft_state 26665 * EINVAL if data pointer is NULL 26666 */ 26667 26668 static int 26669 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26670 { 26671 struct sd_lun *un; 26672 struct uscsi_cmd *com; 26673 struct cdrom_tochdr toc_header; 26674 struct cdrom_tochdr *hdr = &toc_header; 26675 char cdb[CDB_GROUP1]; 26676 int rval; 26677 caddr_t buffer; 26678 26679 if (data == NULL) { 26680 return (EINVAL); 26681 } 26682 26683 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26684 (un->un_state == SD_STATE_OFFLINE)) { 26685 return (ENXIO); 26686 } 26687 26688 buffer = kmem_zalloc(4, KM_SLEEP); 26689 bzero(cdb, CDB_GROUP1); 26690 cdb[0] = SCMD_READ_TOC; 26691 /* 26692 * Specifying a track number of 0x00 in the READ TOC command indicates 26693 * that the TOC header should be returned 26694 */ 26695 cdb[6] = 0x00; 26696 /* 26697 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26698 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26699 */ 26700 cdb[8] = 0x04; 26701 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26702 com->uscsi_cdb = cdb; 26703 com->uscsi_cdblen = CDB_GROUP1; 26704 com->uscsi_bufaddr = buffer; 26705 com->uscsi_buflen = 0x04; 26706 com->uscsi_timeout = 300; 26707 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26708 26709 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26710 SD_PATH_STANDARD); 26711 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26712 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26713 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26714 } else { 26715 hdr->cdth_trk0 = buffer[2]; 26716 hdr->cdth_trk1 = buffer[3]; 26717 } 26718 kmem_free(buffer, 4); 26719 kmem_free(com, sizeof (*com)); 26720 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26721 return (EFAULT); 26722 } 26723 return (rval); 26724 } 26725 26726 26727 /* 26728 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26729 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26730 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26731 * digital audio and extended architecture digital audio. These modes are 26732 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26733 * MMC specs. 26734 * 26735 * In addition to support for the various data formats these routines also 26736 * include support for devices that implement only the direct access READ 26737 * commands (0x08, 0x28), devices that implement the READ_CD commands 26738 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26739 * READ CDXA commands (0xD8, 0xDB) 26740 */ 26741 26742 /* 26743 * Function: sr_read_mode1() 26744 * 26745 * Description: This routine is the driver entry point for handling CD-ROM 26746 * ioctl read mode1 requests (CDROMREADMODE1). 26747 * 26748 * Arguments: dev - the device 'dev_t' 26749 * data - pointer to user provided cd read structure specifying 26750 * the lba buffer address and length. 26751 * flag - this argument is a pass through to ddi_copyxxx() 26752 * directly from the mode argument of ioctl(). 26753 * 26754 * Return Code: the code returned by sd_send_scsi_cmd() 26755 * EFAULT if ddi_copyxxx() fails 26756 * ENXIO if fail ddi_get_soft_state 26757 * EINVAL if data pointer is NULL 26758 */ 26759 26760 static int 26761 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26762 { 26763 struct sd_lun *un; 26764 struct cdrom_read mode1_struct; 26765 struct cdrom_read *mode1 = &mode1_struct; 26766 int rval; 26767 sd_ssc_t *ssc; 26768 26769 #ifdef _MULTI_DATAMODEL 26770 /* To support ILP32 applications in an LP64 world */ 26771 struct cdrom_read32 cdrom_read32; 26772 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26773 #endif /* _MULTI_DATAMODEL */ 26774 26775 if (data == NULL) { 26776 return (EINVAL); 26777 } 26778 26779 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26780 (un->un_state == SD_STATE_OFFLINE)) { 26781 return (ENXIO); 26782 } 26783 26784 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26785 "sd_read_mode1: entry: un:0x%p\n", un); 26786 26787 #ifdef _MULTI_DATAMODEL 26788 switch (ddi_model_convert_from(flag & FMODELS)) { 26789 case DDI_MODEL_ILP32: 26790 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26791 return (EFAULT); 26792 } 26793 /* Convert the ILP32 uscsi data from the application to LP64 */ 26794 cdrom_read32tocdrom_read(cdrd32, mode1); 26795 break; 26796 case DDI_MODEL_NONE: 26797 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26798 return (EFAULT); 26799 } 26800 } 26801 #else /* ! _MULTI_DATAMODEL */ 26802 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26803 return (EFAULT); 26804 } 26805 #endif /* _MULTI_DATAMODEL */ 26806 26807 ssc = sd_ssc_init(un); 26808 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 26809 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 26810 sd_ssc_fini(ssc); 26811 26812 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26813 "sd_read_mode1: exit: un:0x%p\n", un); 26814 26815 return (rval); 26816 } 26817 26818 26819 /* 26820 * Function: sr_read_cd_mode2() 26821 * 26822 * Description: This routine is the driver entry point for handling CD-ROM 26823 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26824 * support the READ CD (0xBE) command or the 1st generation 26825 * READ CD (0xD4) command. 26826 * 26827 * Arguments: dev - the device 'dev_t' 26828 * data - pointer to user provided cd read structure specifying 26829 * the lba buffer address and length. 26830 * flag - this argument is a pass through to ddi_copyxxx() 26831 * directly from the mode argument of ioctl(). 26832 * 26833 * Return Code: the code returned by sd_send_scsi_cmd() 26834 * EFAULT if ddi_copyxxx() fails 26835 * ENXIO if fail ddi_get_soft_state 26836 * EINVAL if data pointer is NULL 26837 */ 26838 26839 static int 26840 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 26841 { 26842 struct sd_lun *un; 26843 struct uscsi_cmd *com; 26844 struct cdrom_read mode2_struct; 26845 struct cdrom_read *mode2 = &mode2_struct; 26846 uchar_t cdb[CDB_GROUP5]; 26847 int nblocks; 26848 int rval; 26849 #ifdef _MULTI_DATAMODEL 26850 /* To support ILP32 applications in an LP64 world */ 26851 struct cdrom_read32 cdrom_read32; 26852 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26853 #endif /* _MULTI_DATAMODEL */ 26854 26855 if (data == NULL) { 26856 return (EINVAL); 26857 } 26858 26859 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26860 (un->un_state == SD_STATE_OFFLINE)) { 26861 return (ENXIO); 26862 } 26863 26864 #ifdef _MULTI_DATAMODEL 26865 switch (ddi_model_convert_from(flag & FMODELS)) { 26866 case DDI_MODEL_ILP32: 26867 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26868 return (EFAULT); 26869 } 26870 /* Convert the ILP32 uscsi data from the application to LP64 */ 26871 cdrom_read32tocdrom_read(cdrd32, mode2); 26872 break; 26873 case DDI_MODEL_NONE: 26874 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26875 return (EFAULT); 26876 } 26877 break; 26878 } 26879 26880 #else /* ! _MULTI_DATAMODEL */ 26881 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26882 return (EFAULT); 26883 } 26884 #endif /* _MULTI_DATAMODEL */ 26885 26886 bzero(cdb, sizeof (cdb)); 26887 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 26888 /* Read command supported by 1st generation atapi drives */ 26889 cdb[0] = SCMD_READ_CDD4; 26890 } else { 26891 /* Universal CD Access Command */ 26892 cdb[0] = SCMD_READ_CD; 26893 } 26894 26895 /* 26896 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 26897 */ 26898 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 26899 26900 /* set the start address */ 26901 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 26902 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 26903 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 26904 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 26905 26906 /* set the transfer length */ 26907 nblocks = mode2->cdread_buflen / 2336; 26908 cdb[6] = (uchar_t)(nblocks >> 16); 26909 cdb[7] = (uchar_t)(nblocks >> 8); 26910 cdb[8] = (uchar_t)nblocks; 26911 26912 /* set the filter bits */ 26913 cdb[9] = CDROM_READ_CD_USERDATA; 26914 26915 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26916 com->uscsi_cdb = (caddr_t)cdb; 26917 com->uscsi_cdblen = sizeof (cdb); 26918 com->uscsi_bufaddr = mode2->cdread_bufaddr; 26919 com->uscsi_buflen = mode2->cdread_buflen; 26920 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26921 26922 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26923 SD_PATH_STANDARD); 26924 kmem_free(com, sizeof (*com)); 26925 return (rval); 26926 } 26927 26928 26929 /* 26930 * Function: sr_read_mode2() 26931 * 26932 * Description: This routine is the driver entry point for handling CD-ROM 26933 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26934 * do not support the READ CD (0xBE) command. 26935 * 26936 * Arguments: dev - the device 'dev_t' 26937 * data - pointer to user provided cd read structure specifying 26938 * the lba buffer address and length. 26939 * flag - this argument is a pass through to ddi_copyxxx() 26940 * directly from the mode argument of ioctl(). 26941 * 26942 * Return Code: the code returned by sd_send_scsi_cmd() 26943 * EFAULT if ddi_copyxxx() fails 26944 * ENXIO if fail ddi_get_soft_state 26945 * EINVAL if data pointer is NULL 26946 * EIO if fail to reset block size 26947 * EAGAIN if commands are in progress in the driver 26948 */ 26949 26950 static int 26951 sr_read_mode2(dev_t dev, caddr_t data, int flag) 26952 { 26953 struct sd_lun *un; 26954 struct cdrom_read mode2_struct; 26955 struct cdrom_read *mode2 = &mode2_struct; 26956 int rval; 26957 uint32_t restore_blksize; 26958 struct uscsi_cmd *com; 26959 uchar_t cdb[CDB_GROUP0]; 26960 int nblocks; 26961 26962 #ifdef _MULTI_DATAMODEL 26963 /* To support ILP32 applications in an LP64 world */ 26964 struct cdrom_read32 cdrom_read32; 26965 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26966 #endif /* _MULTI_DATAMODEL */ 26967 26968 if (data == NULL) { 26969 return (EINVAL); 26970 } 26971 26972 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26973 (un->un_state == SD_STATE_OFFLINE)) { 26974 return (ENXIO); 26975 } 26976 26977 /* 26978 * Because this routine will update the device and driver block size 26979 * being used we want to make sure there are no commands in progress. 26980 * If commands are in progress the user will have to try again. 26981 * 26982 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 26983 * in sdioctl to protect commands from sdioctl through to the top of 26984 * sd_uscsi_strategy. See sdioctl for details. 26985 */ 26986 mutex_enter(SD_MUTEX(un)); 26987 if (un->un_ncmds_in_driver != 1) { 26988 mutex_exit(SD_MUTEX(un)); 26989 return (EAGAIN); 26990 } 26991 mutex_exit(SD_MUTEX(un)); 26992 26993 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26994 "sd_read_mode2: entry: un:0x%p\n", un); 26995 26996 #ifdef _MULTI_DATAMODEL 26997 switch (ddi_model_convert_from(flag & FMODELS)) { 26998 case DDI_MODEL_ILP32: 26999 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27000 return (EFAULT); 27001 } 27002 /* Convert the ILP32 uscsi data from the application to LP64 */ 27003 cdrom_read32tocdrom_read(cdrd32, mode2); 27004 break; 27005 case DDI_MODEL_NONE: 27006 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27007 return (EFAULT); 27008 } 27009 break; 27010 } 27011 #else /* ! _MULTI_DATAMODEL */ 27012 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27013 return (EFAULT); 27014 } 27015 #endif /* _MULTI_DATAMODEL */ 27016 27017 /* Store the current target block size for restoration later */ 27018 restore_blksize = un->un_tgt_blocksize; 27019 27020 /* Change the device and soft state target block size to 2336 */ 27021 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27022 rval = EIO; 27023 goto done; 27024 } 27025 27026 27027 bzero(cdb, sizeof (cdb)); 27028 27029 /* set READ operation */ 27030 cdb[0] = SCMD_READ; 27031 27032 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27033 mode2->cdread_lba >>= 2; 27034 27035 /* set the start address */ 27036 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27037 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27038 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27039 27040 /* set the transfer length */ 27041 nblocks = mode2->cdread_buflen / 2336; 27042 cdb[4] = (uchar_t)nblocks & 0xFF; 27043 27044 /* build command */ 27045 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27046 com->uscsi_cdb = (caddr_t)cdb; 27047 com->uscsi_cdblen = sizeof (cdb); 27048 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27049 com->uscsi_buflen = mode2->cdread_buflen; 27050 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27051 27052 /* 27053 * Issue SCSI command with user space address for read buffer. 27054 * 27055 * This sends the command through main channel in the driver. 27056 * 27057 * Since this is accessed via an IOCTL call, we go through the 27058 * standard path, so that if the device was powered down, then 27059 * it would be 'awakened' to handle the command. 27060 */ 27061 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27062 SD_PATH_STANDARD); 27063 27064 kmem_free(com, sizeof (*com)); 27065 27066 /* Restore the device and soft state target block size */ 27067 if (sr_sector_mode(dev, restore_blksize) != 0) { 27068 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27069 "can't do switch back to mode 1\n"); 27070 /* 27071 * If sd_send_scsi_READ succeeded we still need to report 27072 * an error because we failed to reset the block size 27073 */ 27074 if (rval == 0) { 27075 rval = EIO; 27076 } 27077 } 27078 27079 done: 27080 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27081 "sd_read_mode2: exit: un:0x%p\n", un); 27082 27083 return (rval); 27084 } 27085 27086 27087 /* 27088 * Function: sr_sector_mode() 27089 * 27090 * Description: This utility function is used by sr_read_mode2 to set the target 27091 * block size based on the user specified size. This is a legacy 27092 * implementation based upon a vendor specific mode page 27093 * 27094 * Arguments: dev - the device 'dev_t' 27095 * data - flag indicating if block size is being set to 2336 or 27096 * 512. 27097 * 27098 * Return Code: the code returned by sd_send_scsi_cmd() 27099 * EFAULT if ddi_copyxxx() fails 27100 * ENXIO if fail ddi_get_soft_state 27101 * EINVAL if data pointer is NULL 27102 */ 27103 27104 static int 27105 sr_sector_mode(dev_t dev, uint32_t blksize) 27106 { 27107 struct sd_lun *un; 27108 uchar_t *sense; 27109 uchar_t *select; 27110 int rval; 27111 sd_ssc_t *ssc; 27112 27113 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27114 (un->un_state == SD_STATE_OFFLINE)) { 27115 return (ENXIO); 27116 } 27117 27118 sense = kmem_zalloc(20, KM_SLEEP); 27119 27120 /* Note: This is a vendor specific mode page (0x81) */ 27121 ssc = sd_ssc_init(un); 27122 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27123 SD_PATH_STANDARD); 27124 sd_ssc_fini(ssc); 27125 if (rval != 0) { 27126 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27127 "sr_sector_mode: Mode Sense failed\n"); 27128 kmem_free(sense, 20); 27129 return (rval); 27130 } 27131 select = kmem_zalloc(20, KM_SLEEP); 27132 select[3] = 0x08; 27133 select[10] = ((blksize >> 8) & 0xff); 27134 select[11] = (blksize & 0xff); 27135 select[12] = 0x01; 27136 select[13] = 0x06; 27137 select[14] = sense[14]; 27138 select[15] = sense[15]; 27139 if (blksize == SD_MODE2_BLKSIZE) { 27140 select[14] |= 0x01; 27141 } 27142 27143 ssc = sd_ssc_init(un); 27144 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27145 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27146 sd_ssc_fini(ssc); 27147 if (rval != 0) { 27148 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27149 "sr_sector_mode: Mode Select failed\n"); 27150 } else { 27151 /* 27152 * Only update the softstate block size if we successfully 27153 * changed the device block mode. 27154 */ 27155 mutex_enter(SD_MUTEX(un)); 27156 sd_update_block_info(un, blksize, 0); 27157 mutex_exit(SD_MUTEX(un)); 27158 } 27159 kmem_free(sense, 20); 27160 kmem_free(select, 20); 27161 return (rval); 27162 } 27163 27164 27165 /* 27166 * Function: sr_read_cdda() 27167 * 27168 * Description: This routine is the driver entry point for handling CD-ROM 27169 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27170 * the target supports CDDA these requests are handled via a vendor 27171 * specific command (0xD8) If the target does not support CDDA 27172 * these requests are handled via the READ CD command (0xBE). 27173 * 27174 * Arguments: dev - the device 'dev_t' 27175 * data - pointer to user provided CD-DA structure specifying 27176 * the track starting address, transfer length, and 27177 * subcode options. 27178 * flag - this argument is a pass through to ddi_copyxxx() 27179 * directly from the mode argument of ioctl(). 27180 * 27181 * Return Code: the code returned by sd_send_scsi_cmd() 27182 * EFAULT if ddi_copyxxx() fails 27183 * ENXIO if fail ddi_get_soft_state 27184 * EINVAL if invalid arguments are provided 27185 * ENOTTY 27186 */ 27187 27188 static int 27189 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27190 { 27191 struct sd_lun *un; 27192 struct uscsi_cmd *com; 27193 struct cdrom_cdda *cdda; 27194 int rval; 27195 size_t buflen; 27196 char cdb[CDB_GROUP5]; 27197 27198 #ifdef _MULTI_DATAMODEL 27199 /* To support ILP32 applications in an LP64 world */ 27200 struct cdrom_cdda32 cdrom_cdda32; 27201 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27202 #endif /* _MULTI_DATAMODEL */ 27203 27204 if (data == NULL) { 27205 return (EINVAL); 27206 } 27207 27208 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27209 return (ENXIO); 27210 } 27211 27212 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27213 27214 #ifdef _MULTI_DATAMODEL 27215 switch (ddi_model_convert_from(flag & FMODELS)) { 27216 case DDI_MODEL_ILP32: 27217 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27218 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27219 "sr_read_cdda: ddi_copyin Failed\n"); 27220 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27221 return (EFAULT); 27222 } 27223 /* Convert the ILP32 uscsi data from the application to LP64 */ 27224 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27225 break; 27226 case DDI_MODEL_NONE: 27227 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27228 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27229 "sr_read_cdda: ddi_copyin Failed\n"); 27230 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27231 return (EFAULT); 27232 } 27233 break; 27234 } 27235 #else /* ! _MULTI_DATAMODEL */ 27236 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27237 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27238 "sr_read_cdda: ddi_copyin Failed\n"); 27239 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27240 return (EFAULT); 27241 } 27242 #endif /* _MULTI_DATAMODEL */ 27243 27244 /* 27245 * Since MMC-2 expects max 3 bytes for length, check if the 27246 * length input is greater than 3 bytes 27247 */ 27248 if ((cdda->cdda_length & 0xFF000000) != 0) { 27249 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27250 "cdrom transfer length too large: %d (limit %d)\n", 27251 cdda->cdda_length, 0xFFFFFF); 27252 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27253 return (EINVAL); 27254 } 27255 27256 switch (cdda->cdda_subcode) { 27257 case CDROM_DA_NO_SUBCODE: 27258 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27259 break; 27260 case CDROM_DA_SUBQ: 27261 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27262 break; 27263 case CDROM_DA_ALL_SUBCODE: 27264 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27265 break; 27266 case CDROM_DA_SUBCODE_ONLY: 27267 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27268 break; 27269 default: 27270 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27271 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27272 cdda->cdda_subcode); 27273 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27274 return (EINVAL); 27275 } 27276 27277 /* Build and send the command */ 27278 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27279 bzero(cdb, CDB_GROUP5); 27280 27281 if (un->un_f_cfg_cdda == TRUE) { 27282 cdb[0] = (char)SCMD_READ_CD; 27283 cdb[1] = 0x04; 27284 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27285 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27286 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27287 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27288 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27289 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27290 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27291 cdb[9] = 0x10; 27292 switch (cdda->cdda_subcode) { 27293 case CDROM_DA_NO_SUBCODE : 27294 cdb[10] = 0x0; 27295 break; 27296 case CDROM_DA_SUBQ : 27297 cdb[10] = 0x2; 27298 break; 27299 case CDROM_DA_ALL_SUBCODE : 27300 cdb[10] = 0x1; 27301 break; 27302 case CDROM_DA_SUBCODE_ONLY : 27303 /* FALLTHROUGH */ 27304 default : 27305 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27306 kmem_free(com, sizeof (*com)); 27307 return (ENOTTY); 27308 } 27309 } else { 27310 cdb[0] = (char)SCMD_READ_CDDA; 27311 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27312 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27313 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27314 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27315 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27316 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27317 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27318 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27319 cdb[10] = cdda->cdda_subcode; 27320 } 27321 27322 com->uscsi_cdb = cdb; 27323 com->uscsi_cdblen = CDB_GROUP5; 27324 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27325 com->uscsi_buflen = buflen; 27326 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27327 27328 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27329 SD_PATH_STANDARD); 27330 27331 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27332 kmem_free(com, sizeof (*com)); 27333 return (rval); 27334 } 27335 27336 27337 /* 27338 * Function: sr_read_cdxa() 27339 * 27340 * Description: This routine is the driver entry point for handling CD-ROM 27341 * ioctl requests to return CD-XA (Extended Architecture) data. 27342 * (CDROMCDXA). 27343 * 27344 * Arguments: dev - the device 'dev_t' 27345 * data - pointer to user provided CD-XA structure specifying 27346 * the data starting address, transfer length, and format 27347 * flag - this argument is a pass through to ddi_copyxxx() 27348 * directly from the mode argument of ioctl(). 27349 * 27350 * Return Code: the code returned by sd_send_scsi_cmd() 27351 * EFAULT if ddi_copyxxx() fails 27352 * ENXIO if fail ddi_get_soft_state 27353 * EINVAL if data pointer is NULL 27354 */ 27355 27356 static int 27357 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27358 { 27359 struct sd_lun *un; 27360 struct uscsi_cmd *com; 27361 struct cdrom_cdxa *cdxa; 27362 int rval; 27363 size_t buflen; 27364 char cdb[CDB_GROUP5]; 27365 uchar_t read_flags; 27366 27367 #ifdef _MULTI_DATAMODEL 27368 /* To support ILP32 applications in an LP64 world */ 27369 struct cdrom_cdxa32 cdrom_cdxa32; 27370 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27371 #endif /* _MULTI_DATAMODEL */ 27372 27373 if (data == NULL) { 27374 return (EINVAL); 27375 } 27376 27377 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27378 return (ENXIO); 27379 } 27380 27381 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27382 27383 #ifdef _MULTI_DATAMODEL 27384 switch (ddi_model_convert_from(flag & FMODELS)) { 27385 case DDI_MODEL_ILP32: 27386 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27387 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27388 return (EFAULT); 27389 } 27390 /* 27391 * Convert the ILP32 uscsi data from the 27392 * application to LP64 for internal use. 27393 */ 27394 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27395 break; 27396 case DDI_MODEL_NONE: 27397 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27398 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27399 return (EFAULT); 27400 } 27401 break; 27402 } 27403 #else /* ! _MULTI_DATAMODEL */ 27404 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27405 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27406 return (EFAULT); 27407 } 27408 #endif /* _MULTI_DATAMODEL */ 27409 27410 /* 27411 * Since MMC-2 expects max 3 bytes for length, check if the 27412 * length input is greater than 3 bytes 27413 */ 27414 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27415 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27416 "cdrom transfer length too large: %d (limit %d)\n", 27417 cdxa->cdxa_length, 0xFFFFFF); 27418 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27419 return (EINVAL); 27420 } 27421 27422 switch (cdxa->cdxa_format) { 27423 case CDROM_XA_DATA: 27424 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27425 read_flags = 0x10; 27426 break; 27427 case CDROM_XA_SECTOR_DATA: 27428 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27429 read_flags = 0xf8; 27430 break; 27431 case CDROM_XA_DATA_W_ERROR: 27432 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27433 read_flags = 0xfc; 27434 break; 27435 default: 27436 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27437 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27438 cdxa->cdxa_format); 27439 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27440 return (EINVAL); 27441 } 27442 27443 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27444 bzero(cdb, CDB_GROUP5); 27445 if (un->un_f_mmc_cap == TRUE) { 27446 cdb[0] = (char)SCMD_READ_CD; 27447 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27448 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27449 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27450 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27451 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27452 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27453 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27454 cdb[9] = (char)read_flags; 27455 } else { 27456 /* 27457 * Note: A vendor specific command (0xDB) is being used her to 27458 * request a read of all subcodes. 27459 */ 27460 cdb[0] = (char)SCMD_READ_CDXA; 27461 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27462 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27463 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27464 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27465 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27466 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27467 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27468 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27469 cdb[10] = cdxa->cdxa_format; 27470 } 27471 com->uscsi_cdb = cdb; 27472 com->uscsi_cdblen = CDB_GROUP5; 27473 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27474 com->uscsi_buflen = buflen; 27475 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27476 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27477 SD_PATH_STANDARD); 27478 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27479 kmem_free(com, sizeof (*com)); 27480 return (rval); 27481 } 27482 27483 27484 /* 27485 * Function: sr_eject() 27486 * 27487 * Description: This routine is the driver entry point for handling CD-ROM 27488 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27489 * 27490 * Arguments: dev - the device 'dev_t' 27491 * 27492 * Return Code: the code returned by sd_send_scsi_cmd() 27493 */ 27494 27495 static int 27496 sr_eject(dev_t dev) 27497 { 27498 struct sd_lun *un; 27499 int rval; 27500 sd_ssc_t *ssc; 27501 27502 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27503 (un->un_state == SD_STATE_OFFLINE)) { 27504 return (ENXIO); 27505 } 27506 27507 /* 27508 * To prevent race conditions with the eject 27509 * command, keep track of an eject command as 27510 * it progresses. If we are already handling 27511 * an eject command in the driver for the given 27512 * unit and another request to eject is received 27513 * immediately return EAGAIN so we don't lose 27514 * the command if the current eject command fails. 27515 */ 27516 mutex_enter(SD_MUTEX(un)); 27517 if (un->un_f_ejecting == TRUE) { 27518 mutex_exit(SD_MUTEX(un)); 27519 return (EAGAIN); 27520 } 27521 un->un_f_ejecting = TRUE; 27522 mutex_exit(SD_MUTEX(un)); 27523 27524 ssc = sd_ssc_init(un); 27525 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27526 SD_PATH_STANDARD); 27527 sd_ssc_fini(ssc); 27528 27529 if (rval != 0) { 27530 mutex_enter(SD_MUTEX(un)); 27531 un->un_f_ejecting = FALSE; 27532 mutex_exit(SD_MUTEX(un)); 27533 return (rval); 27534 } 27535 27536 ssc = sd_ssc_init(un); 27537 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27538 SD_PATH_STANDARD); 27539 sd_ssc_fini(ssc); 27540 27541 if (rval == 0) { 27542 mutex_enter(SD_MUTEX(un)); 27543 sr_ejected(un); 27544 un->un_mediastate = DKIO_EJECTED; 27545 un->un_f_ejecting = FALSE; 27546 cv_broadcast(&un->un_state_cv); 27547 mutex_exit(SD_MUTEX(un)); 27548 } else { 27549 mutex_enter(SD_MUTEX(un)); 27550 un->un_f_ejecting = FALSE; 27551 mutex_exit(SD_MUTEX(un)); 27552 } 27553 return (rval); 27554 } 27555 27556 27557 /* 27558 * Function: sr_ejected() 27559 * 27560 * Description: This routine updates the soft state structure to invalidate the 27561 * geometry information after the media has been ejected or a 27562 * media eject has been detected. 27563 * 27564 * Arguments: un - driver soft state (unit) structure 27565 */ 27566 27567 static void 27568 sr_ejected(struct sd_lun *un) 27569 { 27570 struct sd_errstats *stp; 27571 27572 ASSERT(un != NULL); 27573 ASSERT(mutex_owned(SD_MUTEX(un))); 27574 27575 un->un_f_blockcount_is_valid = FALSE; 27576 un->un_f_tgt_blocksize_is_valid = FALSE; 27577 mutex_exit(SD_MUTEX(un)); 27578 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27579 mutex_enter(SD_MUTEX(un)); 27580 27581 if (un->un_errstats != NULL) { 27582 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27583 stp->sd_capacity.value.ui64 = 0; 27584 } 27585 } 27586 27587 27588 /* 27589 * Function: sr_check_wp() 27590 * 27591 * Description: This routine checks the write protection of a removable 27592 * media disk and hotpluggable devices via the write protect bit of 27593 * the Mode Page Header device specific field. Some devices choke 27594 * on unsupported mode page. In order to workaround this issue, 27595 * this routine has been implemented to use 0x3f mode page(request 27596 * for all pages) for all device types. 27597 * 27598 * Arguments: dev - the device 'dev_t' 27599 * 27600 * Return Code: int indicating if the device is write protected (1) or not (0) 27601 * 27602 * Context: Kernel thread. 27603 * 27604 */ 27605 27606 static int 27607 sr_check_wp(dev_t dev) 27608 { 27609 struct sd_lun *un; 27610 uchar_t device_specific; 27611 uchar_t *sense; 27612 int hdrlen; 27613 int rval = FALSE; 27614 int status; 27615 sd_ssc_t *ssc; 27616 27617 /* 27618 * Note: The return codes for this routine should be reworked to 27619 * properly handle the case of a NULL softstate. 27620 */ 27621 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27622 return (FALSE); 27623 } 27624 27625 if (un->un_f_cfg_is_atapi == TRUE) { 27626 /* 27627 * The mode page contents are not required; set the allocation 27628 * length for the mode page header only 27629 */ 27630 hdrlen = MODE_HEADER_LENGTH_GRP2; 27631 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27632 ssc = sd_ssc_init(un); 27633 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27634 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27635 sd_ssc_fini(ssc); 27636 if (status != 0) 27637 goto err_exit; 27638 device_specific = 27639 ((struct mode_header_grp2 *)sense)->device_specific; 27640 } else { 27641 hdrlen = MODE_HEADER_LENGTH; 27642 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27643 ssc = sd_ssc_init(un); 27644 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27645 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27646 sd_ssc_fini(ssc); 27647 if (status != 0) 27648 goto err_exit; 27649 device_specific = 27650 ((struct mode_header *)sense)->device_specific; 27651 } 27652 27653 27654 /* 27655 * Write protect mode sense failed; not all disks 27656 * understand this query. Return FALSE assuming that 27657 * these devices are not writable. 27658 */ 27659 if (device_specific & WRITE_PROTECT) { 27660 rval = TRUE; 27661 } 27662 27663 err_exit: 27664 kmem_free(sense, hdrlen); 27665 return (rval); 27666 } 27667 27668 /* 27669 * Function: sr_volume_ctrl() 27670 * 27671 * Description: This routine is the driver entry point for handling CD-ROM 27672 * audio output volume ioctl requests. (CDROMVOLCTRL) 27673 * 27674 * Arguments: dev - the device 'dev_t' 27675 * data - pointer to user audio volume control structure 27676 * flag - this argument is a pass through to ddi_copyxxx() 27677 * directly from the mode argument of ioctl(). 27678 * 27679 * Return Code: the code returned by sd_send_scsi_cmd() 27680 * EFAULT if ddi_copyxxx() fails 27681 * ENXIO if fail ddi_get_soft_state 27682 * EINVAL if data pointer is NULL 27683 * 27684 */ 27685 27686 static int 27687 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27688 { 27689 struct sd_lun *un; 27690 struct cdrom_volctrl volume; 27691 struct cdrom_volctrl *vol = &volume; 27692 uchar_t *sense_page; 27693 uchar_t *select_page; 27694 uchar_t *sense; 27695 uchar_t *select; 27696 int sense_buflen; 27697 int select_buflen; 27698 int rval; 27699 sd_ssc_t *ssc; 27700 27701 if (data == NULL) { 27702 return (EINVAL); 27703 } 27704 27705 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27706 (un->un_state == SD_STATE_OFFLINE)) { 27707 return (ENXIO); 27708 } 27709 27710 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27711 return (EFAULT); 27712 } 27713 27714 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27715 struct mode_header_grp2 *sense_mhp; 27716 struct mode_header_grp2 *select_mhp; 27717 int bd_len; 27718 27719 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27720 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27721 MODEPAGE_AUDIO_CTRL_LEN; 27722 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27723 select = kmem_zalloc(select_buflen, KM_SLEEP); 27724 ssc = sd_ssc_init(un); 27725 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27726 sense_buflen, MODEPAGE_AUDIO_CTRL, 27727 SD_PATH_STANDARD); 27728 sd_ssc_fini(ssc); 27729 27730 if (rval != 0) { 27731 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27732 "sr_volume_ctrl: Mode Sense Failed\n"); 27733 kmem_free(sense, sense_buflen); 27734 kmem_free(select, select_buflen); 27735 return (rval); 27736 } 27737 sense_mhp = (struct mode_header_grp2 *)sense; 27738 select_mhp = (struct mode_header_grp2 *)select; 27739 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27740 sense_mhp->bdesc_length_lo; 27741 if (bd_len > MODE_BLK_DESC_LENGTH) { 27742 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27743 "sr_volume_ctrl: Mode Sense returned invalid " 27744 "block descriptor length\n"); 27745 kmem_free(sense, sense_buflen); 27746 kmem_free(select, select_buflen); 27747 return (EIO); 27748 } 27749 sense_page = (uchar_t *) 27750 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27751 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27752 select_mhp->length_msb = 0; 27753 select_mhp->length_lsb = 0; 27754 select_mhp->bdesc_length_hi = 0; 27755 select_mhp->bdesc_length_lo = 0; 27756 } else { 27757 struct mode_header *sense_mhp, *select_mhp; 27758 27759 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27760 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27761 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27762 select = kmem_zalloc(select_buflen, KM_SLEEP); 27763 ssc = sd_ssc_init(un); 27764 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27765 sense_buflen, MODEPAGE_AUDIO_CTRL, 27766 SD_PATH_STANDARD); 27767 sd_ssc_fini(ssc); 27768 27769 if (rval != 0) { 27770 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27771 "sr_volume_ctrl: Mode Sense Failed\n"); 27772 kmem_free(sense, sense_buflen); 27773 kmem_free(select, select_buflen); 27774 return (rval); 27775 } 27776 sense_mhp = (struct mode_header *)sense; 27777 select_mhp = (struct mode_header *)select; 27778 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27779 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27780 "sr_volume_ctrl: Mode Sense returned invalid " 27781 "block descriptor length\n"); 27782 kmem_free(sense, sense_buflen); 27783 kmem_free(select, select_buflen); 27784 return (EIO); 27785 } 27786 sense_page = (uchar_t *) 27787 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27788 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27789 select_mhp->length = 0; 27790 select_mhp->bdesc_length = 0; 27791 } 27792 /* 27793 * Note: An audio control data structure could be created and overlayed 27794 * on the following in place of the array indexing method implemented. 27795 */ 27796 27797 /* Build the select data for the user volume data */ 27798 select_page[0] = MODEPAGE_AUDIO_CTRL; 27799 select_page[1] = 0xE; 27800 /* Set the immediate bit */ 27801 select_page[2] = 0x04; 27802 /* Zero out reserved fields */ 27803 select_page[3] = 0x00; 27804 select_page[4] = 0x00; 27805 /* Return sense data for fields not to be modified */ 27806 select_page[5] = sense_page[5]; 27807 select_page[6] = sense_page[6]; 27808 select_page[7] = sense_page[7]; 27809 /* Set the user specified volume levels for channel 0 and 1 */ 27810 select_page[8] = 0x01; 27811 select_page[9] = vol->channel0; 27812 select_page[10] = 0x02; 27813 select_page[11] = vol->channel1; 27814 /* Channel 2 and 3 are currently unsupported so return the sense data */ 27815 select_page[12] = sense_page[12]; 27816 select_page[13] = sense_page[13]; 27817 select_page[14] = sense_page[14]; 27818 select_page[15] = sense_page[15]; 27819 27820 ssc = sd_ssc_init(un); 27821 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27822 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 27823 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27824 } else { 27825 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27826 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27827 } 27828 sd_ssc_fini(ssc); 27829 27830 kmem_free(sense, sense_buflen); 27831 kmem_free(select, select_buflen); 27832 return (rval); 27833 } 27834 27835 27836 /* 27837 * Function: sr_read_sony_session_offset() 27838 * 27839 * Description: This routine is the driver entry point for handling CD-ROM 27840 * ioctl requests for session offset information. (CDROMREADOFFSET) 27841 * The address of the first track in the last session of a 27842 * multi-session CD-ROM is returned 27843 * 27844 * Note: This routine uses a vendor specific key value in the 27845 * command control field without implementing any vendor check here 27846 * or in the ioctl routine. 27847 * 27848 * Arguments: dev - the device 'dev_t' 27849 * data - pointer to an int to hold the requested address 27850 * flag - this argument is a pass through to ddi_copyxxx() 27851 * directly from the mode argument of ioctl(). 27852 * 27853 * Return Code: the code returned by sd_send_scsi_cmd() 27854 * EFAULT if ddi_copyxxx() fails 27855 * ENXIO if fail ddi_get_soft_state 27856 * EINVAL if data pointer is NULL 27857 */ 27858 27859 static int 27860 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 27861 { 27862 struct sd_lun *un; 27863 struct uscsi_cmd *com; 27864 caddr_t buffer; 27865 char cdb[CDB_GROUP1]; 27866 int session_offset = 0; 27867 int rval; 27868 27869 if (data == NULL) { 27870 return (EINVAL); 27871 } 27872 27873 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27874 (un->un_state == SD_STATE_OFFLINE)) { 27875 return (ENXIO); 27876 } 27877 27878 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 27879 bzero(cdb, CDB_GROUP1); 27880 cdb[0] = SCMD_READ_TOC; 27881 /* 27882 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27883 * (4 byte TOC response header + 8 byte response data) 27884 */ 27885 cdb[8] = SONY_SESSION_OFFSET_LEN; 27886 /* Byte 9 is the control byte. A vendor specific value is used */ 27887 cdb[9] = SONY_SESSION_OFFSET_KEY; 27888 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27889 com->uscsi_cdb = cdb; 27890 com->uscsi_cdblen = CDB_GROUP1; 27891 com->uscsi_bufaddr = buffer; 27892 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 27893 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27894 27895 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27896 SD_PATH_STANDARD); 27897 if (rval != 0) { 27898 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27899 kmem_free(com, sizeof (*com)); 27900 return (rval); 27901 } 27902 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 27903 session_offset = 27904 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27905 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27906 /* 27907 * Offset returned offset in current lbasize block's. Convert to 27908 * 2k block's to return to the user 27909 */ 27910 if (un->un_tgt_blocksize == CDROM_BLK_512) { 27911 session_offset >>= 2; 27912 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 27913 session_offset >>= 1; 27914 } 27915 } 27916 27917 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 27918 rval = EFAULT; 27919 } 27920 27921 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27922 kmem_free(com, sizeof (*com)); 27923 return (rval); 27924 } 27925 27926 27927 /* 27928 * Function: sd_wm_cache_constructor() 27929 * 27930 * Description: Cache Constructor for the wmap cache for the read/modify/write 27931 * devices. 27932 * 27933 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27934 * un - sd_lun structure for the device. 27935 * flag - the km flags passed to constructor 27936 * 27937 * Return Code: 0 on success. 27938 * -1 on failure. 27939 */ 27940 27941 /*ARGSUSED*/ 27942 static int 27943 sd_wm_cache_constructor(void *wm, void *un, int flags) 27944 { 27945 bzero(wm, sizeof (struct sd_w_map)); 27946 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 27947 return (0); 27948 } 27949 27950 27951 /* 27952 * Function: sd_wm_cache_destructor() 27953 * 27954 * Description: Cache destructor for the wmap cache for the read/modify/write 27955 * devices. 27956 * 27957 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27958 * un - sd_lun structure for the device. 27959 */ 27960 /*ARGSUSED*/ 27961 static void 27962 sd_wm_cache_destructor(void *wm, void *un) 27963 { 27964 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 27965 } 27966 27967 27968 /* 27969 * Function: sd_range_lock() 27970 * 27971 * Description: Lock the range of blocks specified as parameter to ensure 27972 * that read, modify write is atomic and no other i/o writes 27973 * to the same location. The range is specified in terms 27974 * of start and end blocks. Block numbers are the actual 27975 * media block numbers and not system. 27976 * 27977 * Arguments: un - sd_lun structure for the device. 27978 * startb - The starting block number 27979 * endb - The end block number 27980 * typ - type of i/o - simple/read_modify_write 27981 * 27982 * Return Code: wm - pointer to the wmap structure. 27983 * 27984 * Context: This routine can sleep. 27985 */ 27986 27987 static struct sd_w_map * 27988 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 27989 { 27990 struct sd_w_map *wmp = NULL; 27991 struct sd_w_map *sl_wmp = NULL; 27992 struct sd_w_map *tmp_wmp; 27993 wm_state state = SD_WM_CHK_LIST; 27994 27995 27996 ASSERT(un != NULL); 27997 ASSERT(!mutex_owned(SD_MUTEX(un))); 27998 27999 mutex_enter(SD_MUTEX(un)); 28000 28001 while (state != SD_WM_DONE) { 28002 28003 switch (state) { 28004 case SD_WM_CHK_LIST: 28005 /* 28006 * This is the starting state. Check the wmap list 28007 * to see if the range is currently available. 28008 */ 28009 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28010 /* 28011 * If this is a simple write and no rmw 28012 * i/o is pending then try to lock the 28013 * range as the range should be available. 28014 */ 28015 state = SD_WM_LOCK_RANGE; 28016 } else { 28017 tmp_wmp = sd_get_range(un, startb, endb); 28018 if (tmp_wmp != NULL) { 28019 if ((wmp != NULL) && ONLIST(un, wmp)) { 28020 /* 28021 * Should not keep onlist wmps 28022 * while waiting this macro 28023 * will also do wmp = NULL; 28024 */ 28025 FREE_ONLIST_WMAP(un, wmp); 28026 } 28027 /* 28028 * sl_wmp is the wmap on which wait 28029 * is done, since the tmp_wmp points 28030 * to the inuse wmap, set sl_wmp to 28031 * tmp_wmp and change the state to sleep 28032 */ 28033 sl_wmp = tmp_wmp; 28034 state = SD_WM_WAIT_MAP; 28035 } else { 28036 state = SD_WM_LOCK_RANGE; 28037 } 28038 28039 } 28040 break; 28041 28042 case SD_WM_LOCK_RANGE: 28043 ASSERT(un->un_wm_cache); 28044 /* 28045 * The range need to be locked, try to get a wmap. 28046 * First attempt it with NO_SLEEP, want to avoid a sleep 28047 * if possible as we will have to release the sd mutex 28048 * if we have to sleep. 28049 */ 28050 if (wmp == NULL) 28051 wmp = kmem_cache_alloc(un->un_wm_cache, 28052 KM_NOSLEEP); 28053 if (wmp == NULL) { 28054 mutex_exit(SD_MUTEX(un)); 28055 _NOTE(DATA_READABLE_WITHOUT_LOCK 28056 (sd_lun::un_wm_cache)) 28057 wmp = kmem_cache_alloc(un->un_wm_cache, 28058 KM_SLEEP); 28059 mutex_enter(SD_MUTEX(un)); 28060 /* 28061 * we released the mutex so recheck and go to 28062 * check list state. 28063 */ 28064 state = SD_WM_CHK_LIST; 28065 } else { 28066 /* 28067 * We exit out of state machine since we 28068 * have the wmap. Do the housekeeping first. 28069 * place the wmap on the wmap list if it is not 28070 * on it already and then set the state to done. 28071 */ 28072 wmp->wm_start = startb; 28073 wmp->wm_end = endb; 28074 wmp->wm_flags = typ | SD_WM_BUSY; 28075 if (typ & SD_WTYPE_RMW) { 28076 un->un_rmw_count++; 28077 } 28078 /* 28079 * If not already on the list then link 28080 */ 28081 if (!ONLIST(un, wmp)) { 28082 wmp->wm_next = un->un_wm; 28083 wmp->wm_prev = NULL; 28084 if (wmp->wm_next) 28085 wmp->wm_next->wm_prev = wmp; 28086 un->un_wm = wmp; 28087 } 28088 state = SD_WM_DONE; 28089 } 28090 break; 28091 28092 case SD_WM_WAIT_MAP: 28093 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28094 /* 28095 * Wait is done on sl_wmp, which is set in the 28096 * check_list state. 28097 */ 28098 sl_wmp->wm_wanted_count++; 28099 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28100 sl_wmp->wm_wanted_count--; 28101 /* 28102 * We can reuse the memory from the completed sl_wmp 28103 * lock range for our new lock, but only if noone is 28104 * waiting for it. 28105 */ 28106 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28107 if (sl_wmp->wm_wanted_count == 0) { 28108 if (wmp != NULL) 28109 CHK_N_FREEWMP(un, wmp); 28110 wmp = sl_wmp; 28111 } 28112 sl_wmp = NULL; 28113 /* 28114 * After waking up, need to recheck for availability of 28115 * range. 28116 */ 28117 state = SD_WM_CHK_LIST; 28118 break; 28119 28120 default: 28121 panic("sd_range_lock: " 28122 "Unknown state %d in sd_range_lock", state); 28123 /*NOTREACHED*/ 28124 } /* switch(state) */ 28125 28126 } /* while(state != SD_WM_DONE) */ 28127 28128 mutex_exit(SD_MUTEX(un)); 28129 28130 ASSERT(wmp != NULL); 28131 28132 return (wmp); 28133 } 28134 28135 28136 /* 28137 * Function: sd_get_range() 28138 * 28139 * Description: Find if there any overlapping I/O to this one 28140 * Returns the write-map of 1st such I/O, NULL otherwise. 28141 * 28142 * Arguments: un - sd_lun structure for the device. 28143 * startb - The starting block number 28144 * endb - The end block number 28145 * 28146 * Return Code: wm - pointer to the wmap structure. 28147 */ 28148 28149 static struct sd_w_map * 28150 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28151 { 28152 struct sd_w_map *wmp; 28153 28154 ASSERT(un != NULL); 28155 28156 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28157 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28158 continue; 28159 } 28160 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28161 break; 28162 } 28163 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28164 break; 28165 } 28166 } 28167 28168 return (wmp); 28169 } 28170 28171 28172 /* 28173 * Function: sd_free_inlist_wmap() 28174 * 28175 * Description: Unlink and free a write map struct. 28176 * 28177 * Arguments: un - sd_lun structure for the device. 28178 * wmp - sd_w_map which needs to be unlinked. 28179 */ 28180 28181 static void 28182 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28183 { 28184 ASSERT(un != NULL); 28185 28186 if (un->un_wm == wmp) { 28187 un->un_wm = wmp->wm_next; 28188 } else { 28189 wmp->wm_prev->wm_next = wmp->wm_next; 28190 } 28191 28192 if (wmp->wm_next) { 28193 wmp->wm_next->wm_prev = wmp->wm_prev; 28194 } 28195 28196 wmp->wm_next = wmp->wm_prev = NULL; 28197 28198 kmem_cache_free(un->un_wm_cache, wmp); 28199 } 28200 28201 28202 /* 28203 * Function: sd_range_unlock() 28204 * 28205 * Description: Unlock the range locked by wm. 28206 * Free write map if nobody else is waiting on it. 28207 * 28208 * Arguments: un - sd_lun structure for the device. 28209 * wmp - sd_w_map which needs to be unlinked. 28210 */ 28211 28212 static void 28213 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28214 { 28215 ASSERT(un != NULL); 28216 ASSERT(wm != NULL); 28217 ASSERT(!mutex_owned(SD_MUTEX(un))); 28218 28219 mutex_enter(SD_MUTEX(un)); 28220 28221 if (wm->wm_flags & SD_WTYPE_RMW) { 28222 un->un_rmw_count--; 28223 } 28224 28225 if (wm->wm_wanted_count) { 28226 wm->wm_flags = 0; 28227 /* 28228 * Broadcast that the wmap is available now. 28229 */ 28230 cv_broadcast(&wm->wm_avail); 28231 } else { 28232 /* 28233 * If no one is waiting on the map, it should be free'ed. 28234 */ 28235 sd_free_inlist_wmap(un, wm); 28236 } 28237 28238 mutex_exit(SD_MUTEX(un)); 28239 } 28240 28241 28242 /* 28243 * Function: sd_read_modify_write_task 28244 * 28245 * Description: Called from a taskq thread to initiate the write phase of 28246 * a read-modify-write request. This is used for targets where 28247 * un->un_sys_blocksize != un->un_tgt_blocksize. 28248 * 28249 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28250 * 28251 * Context: Called under taskq thread context. 28252 */ 28253 28254 static void 28255 sd_read_modify_write_task(void *arg) 28256 { 28257 struct sd_mapblocksize_info *bsp; 28258 struct buf *bp; 28259 struct sd_xbuf *xp; 28260 struct sd_lun *un; 28261 28262 bp = arg; /* The bp is given in arg */ 28263 ASSERT(bp != NULL); 28264 28265 /* Get the pointer to the layer-private data struct */ 28266 xp = SD_GET_XBUF(bp); 28267 ASSERT(xp != NULL); 28268 bsp = xp->xb_private; 28269 ASSERT(bsp != NULL); 28270 28271 un = SD_GET_UN(bp); 28272 ASSERT(un != NULL); 28273 ASSERT(!mutex_owned(SD_MUTEX(un))); 28274 28275 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28276 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28277 28278 /* 28279 * This is the write phase of a read-modify-write request, called 28280 * under the context of a taskq thread in response to the completion 28281 * of the read portion of the rmw request completing under interrupt 28282 * context. The write request must be sent from here down the iostart 28283 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28284 * we use the layer index saved in the layer-private data area. 28285 */ 28286 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28287 28288 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28289 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28290 } 28291 28292 28293 /* 28294 * Function: sddump_do_read_of_rmw() 28295 * 28296 * Description: This routine will be called from sddump, If sddump is called 28297 * with an I/O which not aligned on device blocksize boundary 28298 * then the write has to be converted to read-modify-write. 28299 * Do the read part here in order to keep sddump simple. 28300 * Note - That the sd_mutex is held across the call to this 28301 * routine. 28302 * 28303 * Arguments: un - sd_lun 28304 * blkno - block number in terms of media block size. 28305 * nblk - number of blocks. 28306 * bpp - pointer to pointer to the buf structure. On return 28307 * from this function, *bpp points to the valid buffer 28308 * to which the write has to be done. 28309 * 28310 * Return Code: 0 for success or errno-type return code 28311 */ 28312 28313 static int 28314 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28315 struct buf **bpp) 28316 { 28317 int err; 28318 int i; 28319 int rval; 28320 struct buf *bp; 28321 struct scsi_pkt *pkt = NULL; 28322 uint32_t target_blocksize; 28323 28324 ASSERT(un != NULL); 28325 ASSERT(mutex_owned(SD_MUTEX(un))); 28326 28327 target_blocksize = un->un_tgt_blocksize; 28328 28329 mutex_exit(SD_MUTEX(un)); 28330 28331 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28332 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28333 if (bp == NULL) { 28334 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28335 "no resources for dumping; giving up"); 28336 err = ENOMEM; 28337 goto done; 28338 } 28339 28340 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28341 blkno, nblk); 28342 if (rval != 0) { 28343 scsi_free_consistent_buf(bp); 28344 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28345 "no resources for dumping; giving up"); 28346 err = ENOMEM; 28347 goto done; 28348 } 28349 28350 pkt->pkt_flags |= FLAG_NOINTR; 28351 28352 err = EIO; 28353 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28354 28355 /* 28356 * Scsi_poll returns 0 (success) if the command completes and 28357 * the status block is STATUS_GOOD. We should only check 28358 * errors if this condition is not true. Even then we should 28359 * send our own request sense packet only if we have a check 28360 * condition and auto request sense has not been performed by 28361 * the hba. 28362 */ 28363 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28364 28365 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28366 err = 0; 28367 break; 28368 } 28369 28370 /* 28371 * Check CMD_DEV_GONE 1st, give up if device is gone, 28372 * no need to read RQS data. 28373 */ 28374 if (pkt->pkt_reason == CMD_DEV_GONE) { 28375 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28376 "Error while dumping state with rmw..." 28377 "Device is gone\n"); 28378 break; 28379 } 28380 28381 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28382 SD_INFO(SD_LOG_DUMP, un, 28383 "sddump: read failed with CHECK, try # %d\n", i); 28384 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28385 (void) sd_send_polled_RQS(un); 28386 } 28387 28388 continue; 28389 } 28390 28391 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28392 int reset_retval = 0; 28393 28394 SD_INFO(SD_LOG_DUMP, un, 28395 "sddump: read failed with BUSY, try # %d\n", i); 28396 28397 if (un->un_f_lun_reset_enabled == TRUE) { 28398 reset_retval = scsi_reset(SD_ADDRESS(un), 28399 RESET_LUN); 28400 } 28401 if (reset_retval == 0) { 28402 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28403 } 28404 (void) sd_send_polled_RQS(un); 28405 28406 } else { 28407 SD_INFO(SD_LOG_DUMP, un, 28408 "sddump: read failed with 0x%x, try # %d\n", 28409 SD_GET_PKT_STATUS(pkt), i); 28410 mutex_enter(SD_MUTEX(un)); 28411 sd_reset_target(un, pkt); 28412 mutex_exit(SD_MUTEX(un)); 28413 } 28414 28415 /* 28416 * If we are not getting anywhere with lun/target resets, 28417 * let's reset the bus. 28418 */ 28419 if (i > SD_NDUMP_RETRIES/2) { 28420 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28421 (void) sd_send_polled_RQS(un); 28422 } 28423 28424 } 28425 scsi_destroy_pkt(pkt); 28426 28427 if (err != 0) { 28428 scsi_free_consistent_buf(bp); 28429 *bpp = NULL; 28430 } else { 28431 *bpp = bp; 28432 } 28433 28434 done: 28435 mutex_enter(SD_MUTEX(un)); 28436 return (err); 28437 } 28438 28439 28440 /* 28441 * Function: sd_failfast_flushq 28442 * 28443 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28444 * in b_flags and move them onto the failfast queue, then kick 28445 * off a thread to return all bp's on the failfast queue to 28446 * their owners with an error set. 28447 * 28448 * Arguments: un - pointer to the soft state struct for the instance. 28449 * 28450 * Context: may execute in interrupt context. 28451 */ 28452 28453 static void 28454 sd_failfast_flushq(struct sd_lun *un) 28455 { 28456 struct buf *bp; 28457 struct buf *next_waitq_bp; 28458 struct buf *prev_waitq_bp = NULL; 28459 28460 ASSERT(un != NULL); 28461 ASSERT(mutex_owned(SD_MUTEX(un))); 28462 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28463 ASSERT(un->un_failfast_bp == NULL); 28464 28465 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28466 "sd_failfast_flushq: entry: un:0x%p\n", un); 28467 28468 /* 28469 * Check if we should flush all bufs when entering failfast state, or 28470 * just those with B_FAILFAST set. 28471 */ 28472 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28473 /* 28474 * Move *all* bp's on the wait queue to the failfast flush 28475 * queue, including those that do NOT have B_FAILFAST set. 28476 */ 28477 if (un->un_failfast_headp == NULL) { 28478 ASSERT(un->un_failfast_tailp == NULL); 28479 un->un_failfast_headp = un->un_waitq_headp; 28480 } else { 28481 ASSERT(un->un_failfast_tailp != NULL); 28482 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28483 } 28484 28485 un->un_failfast_tailp = un->un_waitq_tailp; 28486 28487 /* update kstat for each bp moved out of the waitq */ 28488 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28489 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28490 } 28491 28492 /* empty the waitq */ 28493 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28494 28495 } else { 28496 /* 28497 * Go thru the wait queue, pick off all entries with 28498 * B_FAILFAST set, and move these onto the failfast queue. 28499 */ 28500 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28501 /* 28502 * Save the pointer to the next bp on the wait queue, 28503 * so we get to it on the next iteration of this loop. 28504 */ 28505 next_waitq_bp = bp->av_forw; 28506 28507 /* 28508 * If this bp from the wait queue does NOT have 28509 * B_FAILFAST set, just move on to the next element 28510 * in the wait queue. Note, this is the only place 28511 * where it is correct to set prev_waitq_bp. 28512 */ 28513 if ((bp->b_flags & B_FAILFAST) == 0) { 28514 prev_waitq_bp = bp; 28515 continue; 28516 } 28517 28518 /* 28519 * Remove the bp from the wait queue. 28520 */ 28521 if (bp == un->un_waitq_headp) { 28522 /* The bp is the first element of the waitq. */ 28523 un->un_waitq_headp = next_waitq_bp; 28524 if (un->un_waitq_headp == NULL) { 28525 /* The wait queue is now empty */ 28526 un->un_waitq_tailp = NULL; 28527 } 28528 } else { 28529 /* 28530 * The bp is either somewhere in the middle 28531 * or at the end of the wait queue. 28532 */ 28533 ASSERT(un->un_waitq_headp != NULL); 28534 ASSERT(prev_waitq_bp != NULL); 28535 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28536 == 0); 28537 if (bp == un->un_waitq_tailp) { 28538 /* bp is the last entry on the waitq. */ 28539 ASSERT(next_waitq_bp == NULL); 28540 un->un_waitq_tailp = prev_waitq_bp; 28541 } 28542 prev_waitq_bp->av_forw = next_waitq_bp; 28543 } 28544 bp->av_forw = NULL; 28545 28546 /* 28547 * update kstat since the bp is moved out of 28548 * the waitq 28549 */ 28550 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28551 28552 /* 28553 * Now put the bp onto the failfast queue. 28554 */ 28555 if (un->un_failfast_headp == NULL) { 28556 /* failfast queue is currently empty */ 28557 ASSERT(un->un_failfast_tailp == NULL); 28558 un->un_failfast_headp = 28559 un->un_failfast_tailp = bp; 28560 } else { 28561 /* Add the bp to the end of the failfast q */ 28562 ASSERT(un->un_failfast_tailp != NULL); 28563 ASSERT(un->un_failfast_tailp->b_flags & 28564 B_FAILFAST); 28565 un->un_failfast_tailp->av_forw = bp; 28566 un->un_failfast_tailp = bp; 28567 } 28568 } 28569 } 28570 28571 /* 28572 * Now return all bp's on the failfast queue to their owners. 28573 */ 28574 while ((bp = un->un_failfast_headp) != NULL) { 28575 28576 un->un_failfast_headp = bp->av_forw; 28577 if (un->un_failfast_headp == NULL) { 28578 un->un_failfast_tailp = NULL; 28579 } 28580 28581 /* 28582 * We want to return the bp with a failure error code, but 28583 * we do not want a call to sd_start_cmds() to occur here, 28584 * so use sd_return_failed_command_no_restart() instead of 28585 * sd_return_failed_command(). 28586 */ 28587 sd_return_failed_command_no_restart(un, bp, EIO); 28588 } 28589 28590 /* Flush the xbuf queues if required. */ 28591 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28592 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28593 } 28594 28595 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28596 "sd_failfast_flushq: exit: un:0x%p\n", un); 28597 } 28598 28599 28600 /* 28601 * Function: sd_failfast_flushq_callback 28602 * 28603 * Description: Return TRUE if the given bp meets the criteria for failfast 28604 * flushing. Used with ddi_xbuf_flushq(9F). 28605 * 28606 * Arguments: bp - ptr to buf struct to be examined. 28607 * 28608 * Context: Any 28609 */ 28610 28611 static int 28612 sd_failfast_flushq_callback(struct buf *bp) 28613 { 28614 /* 28615 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28616 * state is entered; OR (2) the given bp has B_FAILFAST set. 28617 */ 28618 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28619 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28620 } 28621 28622 28623 28624 /* 28625 * Function: sd_setup_next_xfer 28626 * 28627 * Description: Prepare next I/O operation using DMA_PARTIAL 28628 * 28629 */ 28630 28631 static int 28632 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28633 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28634 { 28635 ssize_t num_blks_not_xfered; 28636 daddr_t strt_blk_num; 28637 ssize_t bytes_not_xfered; 28638 int rval; 28639 28640 ASSERT(pkt->pkt_resid == 0); 28641 28642 /* 28643 * Calculate next block number and amount to be transferred. 28644 * 28645 * How much data NOT transfered to the HBA yet. 28646 */ 28647 bytes_not_xfered = xp->xb_dma_resid; 28648 28649 /* 28650 * figure how many blocks NOT transfered to the HBA yet. 28651 */ 28652 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28653 28654 /* 28655 * set starting block number to the end of what WAS transfered. 28656 */ 28657 strt_blk_num = xp->xb_blkno + 28658 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28659 28660 /* 28661 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28662 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28663 * the disk mutex here. 28664 */ 28665 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28666 strt_blk_num, num_blks_not_xfered); 28667 28668 if (rval == 0) { 28669 28670 /* 28671 * Success. 28672 * 28673 * Adjust things if there are still more blocks to be 28674 * transfered. 28675 */ 28676 xp->xb_dma_resid = pkt->pkt_resid; 28677 pkt->pkt_resid = 0; 28678 28679 return (1); 28680 } 28681 28682 /* 28683 * There's really only one possible return value from 28684 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28685 * returns NULL. 28686 */ 28687 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28688 28689 bp->b_resid = bp->b_bcount; 28690 bp->b_flags |= B_ERROR; 28691 28692 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28693 "Error setting up next portion of DMA transfer\n"); 28694 28695 return (0); 28696 } 28697 28698 /* 28699 * Function: sd_panic_for_res_conflict 28700 * 28701 * Description: Call panic with a string formatted with "Reservation Conflict" 28702 * and a human readable identifier indicating the SD instance 28703 * that experienced the reservation conflict. 28704 * 28705 * Arguments: un - pointer to the soft state struct for the instance. 28706 * 28707 * Context: may execute in interrupt context. 28708 */ 28709 28710 #define SD_RESV_CONFLICT_FMT_LEN 40 28711 void 28712 sd_panic_for_res_conflict(struct sd_lun *un) 28713 { 28714 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28715 char path_str[MAXPATHLEN]; 28716 28717 (void) snprintf(panic_str, sizeof (panic_str), 28718 "Reservation Conflict\nDisk: %s", 28719 ddi_pathname(SD_DEVINFO(un), path_str)); 28720 28721 panic(panic_str); 28722 } 28723 28724 /* 28725 * Note: The following sd_faultinjection_ioctl( ) routines implement 28726 * driver support for handling fault injection for error analysis 28727 * causing faults in multiple layers of the driver. 28728 * 28729 */ 28730 28731 #ifdef SD_FAULT_INJECTION 28732 static uint_t sd_fault_injection_on = 0; 28733 28734 /* 28735 * Function: sd_faultinjection_ioctl() 28736 * 28737 * Description: This routine is the driver entry point for handling 28738 * faultinjection ioctls to inject errors into the 28739 * layer model 28740 * 28741 * Arguments: cmd - the ioctl cmd received 28742 * arg - the arguments from user and returns 28743 */ 28744 28745 static void 28746 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28747 28748 uint_t i = 0; 28749 uint_t rval; 28750 28751 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28752 28753 mutex_enter(SD_MUTEX(un)); 28754 28755 switch (cmd) { 28756 case SDIOCRUN: 28757 /* Allow pushed faults to be injected */ 28758 SD_INFO(SD_LOG_SDTEST, un, 28759 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28760 28761 sd_fault_injection_on = 1; 28762 28763 SD_INFO(SD_LOG_IOERR, un, 28764 "sd_faultinjection_ioctl: run finished\n"); 28765 break; 28766 28767 case SDIOCSTART: 28768 /* Start Injection Session */ 28769 SD_INFO(SD_LOG_SDTEST, un, 28770 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28771 28772 sd_fault_injection_on = 0; 28773 un->sd_injection_mask = 0xFFFFFFFF; 28774 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28775 un->sd_fi_fifo_pkt[i] = NULL; 28776 un->sd_fi_fifo_xb[i] = NULL; 28777 un->sd_fi_fifo_un[i] = NULL; 28778 un->sd_fi_fifo_arq[i] = NULL; 28779 } 28780 un->sd_fi_fifo_start = 0; 28781 un->sd_fi_fifo_end = 0; 28782 28783 mutex_enter(&(un->un_fi_mutex)); 28784 un->sd_fi_log[0] = '\0'; 28785 un->sd_fi_buf_len = 0; 28786 mutex_exit(&(un->un_fi_mutex)); 28787 28788 SD_INFO(SD_LOG_IOERR, un, 28789 "sd_faultinjection_ioctl: start finished\n"); 28790 break; 28791 28792 case SDIOCSTOP: 28793 /* Stop Injection Session */ 28794 SD_INFO(SD_LOG_SDTEST, un, 28795 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28796 sd_fault_injection_on = 0; 28797 un->sd_injection_mask = 0x0; 28798 28799 /* Empty stray or unuseds structs from fifo */ 28800 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28801 if (un->sd_fi_fifo_pkt[i] != NULL) { 28802 kmem_free(un->sd_fi_fifo_pkt[i], 28803 sizeof (struct sd_fi_pkt)); 28804 } 28805 if (un->sd_fi_fifo_xb[i] != NULL) { 28806 kmem_free(un->sd_fi_fifo_xb[i], 28807 sizeof (struct sd_fi_xb)); 28808 } 28809 if (un->sd_fi_fifo_un[i] != NULL) { 28810 kmem_free(un->sd_fi_fifo_un[i], 28811 sizeof (struct sd_fi_un)); 28812 } 28813 if (un->sd_fi_fifo_arq[i] != NULL) { 28814 kmem_free(un->sd_fi_fifo_arq[i], 28815 sizeof (struct sd_fi_arq)); 28816 } 28817 un->sd_fi_fifo_pkt[i] = NULL; 28818 un->sd_fi_fifo_un[i] = NULL; 28819 un->sd_fi_fifo_xb[i] = NULL; 28820 un->sd_fi_fifo_arq[i] = NULL; 28821 } 28822 un->sd_fi_fifo_start = 0; 28823 un->sd_fi_fifo_end = 0; 28824 28825 SD_INFO(SD_LOG_IOERR, un, 28826 "sd_faultinjection_ioctl: stop finished\n"); 28827 break; 28828 28829 case SDIOCINSERTPKT: 28830 /* Store a packet struct to be pushed onto fifo */ 28831 SD_INFO(SD_LOG_SDTEST, un, 28832 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 28833 28834 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28835 28836 sd_fault_injection_on = 0; 28837 28838 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 28839 if (un->sd_fi_fifo_pkt[i] != NULL) { 28840 kmem_free(un->sd_fi_fifo_pkt[i], 28841 sizeof (struct sd_fi_pkt)); 28842 } 28843 if (arg != NULL) { 28844 un->sd_fi_fifo_pkt[i] = 28845 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 28846 if (un->sd_fi_fifo_pkt[i] == NULL) { 28847 /* Alloc failed don't store anything */ 28848 break; 28849 } 28850 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 28851 sizeof (struct sd_fi_pkt), 0); 28852 if (rval == -1) { 28853 kmem_free(un->sd_fi_fifo_pkt[i], 28854 sizeof (struct sd_fi_pkt)); 28855 un->sd_fi_fifo_pkt[i] = NULL; 28856 } 28857 } else { 28858 SD_INFO(SD_LOG_IOERR, un, 28859 "sd_faultinjection_ioctl: pkt null\n"); 28860 } 28861 break; 28862 28863 case SDIOCINSERTXB: 28864 /* Store a xb struct to be pushed onto fifo */ 28865 SD_INFO(SD_LOG_SDTEST, un, 28866 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 28867 28868 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28869 28870 sd_fault_injection_on = 0; 28871 28872 if (un->sd_fi_fifo_xb[i] != NULL) { 28873 kmem_free(un->sd_fi_fifo_xb[i], 28874 sizeof (struct sd_fi_xb)); 28875 un->sd_fi_fifo_xb[i] = NULL; 28876 } 28877 if (arg != NULL) { 28878 un->sd_fi_fifo_xb[i] = 28879 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 28880 if (un->sd_fi_fifo_xb[i] == NULL) { 28881 /* Alloc failed don't store anything */ 28882 break; 28883 } 28884 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 28885 sizeof (struct sd_fi_xb), 0); 28886 28887 if (rval == -1) { 28888 kmem_free(un->sd_fi_fifo_xb[i], 28889 sizeof (struct sd_fi_xb)); 28890 un->sd_fi_fifo_xb[i] = NULL; 28891 } 28892 } else { 28893 SD_INFO(SD_LOG_IOERR, un, 28894 "sd_faultinjection_ioctl: xb null\n"); 28895 } 28896 break; 28897 28898 case SDIOCINSERTUN: 28899 /* Store a un struct to be pushed onto fifo */ 28900 SD_INFO(SD_LOG_SDTEST, un, 28901 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 28902 28903 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28904 28905 sd_fault_injection_on = 0; 28906 28907 if (un->sd_fi_fifo_un[i] != NULL) { 28908 kmem_free(un->sd_fi_fifo_un[i], 28909 sizeof (struct sd_fi_un)); 28910 un->sd_fi_fifo_un[i] = NULL; 28911 } 28912 if (arg != NULL) { 28913 un->sd_fi_fifo_un[i] = 28914 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 28915 if (un->sd_fi_fifo_un[i] == NULL) { 28916 /* Alloc failed don't store anything */ 28917 break; 28918 } 28919 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 28920 sizeof (struct sd_fi_un), 0); 28921 if (rval == -1) { 28922 kmem_free(un->sd_fi_fifo_un[i], 28923 sizeof (struct sd_fi_un)); 28924 un->sd_fi_fifo_un[i] = NULL; 28925 } 28926 28927 } else { 28928 SD_INFO(SD_LOG_IOERR, un, 28929 "sd_faultinjection_ioctl: un null\n"); 28930 } 28931 28932 break; 28933 28934 case SDIOCINSERTARQ: 28935 /* Store a arq struct to be pushed onto fifo */ 28936 SD_INFO(SD_LOG_SDTEST, un, 28937 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 28938 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28939 28940 sd_fault_injection_on = 0; 28941 28942 if (un->sd_fi_fifo_arq[i] != NULL) { 28943 kmem_free(un->sd_fi_fifo_arq[i], 28944 sizeof (struct sd_fi_arq)); 28945 un->sd_fi_fifo_arq[i] = NULL; 28946 } 28947 if (arg != NULL) { 28948 un->sd_fi_fifo_arq[i] = 28949 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 28950 if (un->sd_fi_fifo_arq[i] == NULL) { 28951 /* Alloc failed don't store anything */ 28952 break; 28953 } 28954 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 28955 sizeof (struct sd_fi_arq), 0); 28956 if (rval == -1) { 28957 kmem_free(un->sd_fi_fifo_arq[i], 28958 sizeof (struct sd_fi_arq)); 28959 un->sd_fi_fifo_arq[i] = NULL; 28960 } 28961 28962 } else { 28963 SD_INFO(SD_LOG_IOERR, un, 28964 "sd_faultinjection_ioctl: arq null\n"); 28965 } 28966 28967 break; 28968 28969 case SDIOCPUSH: 28970 /* Push stored xb, pkt, un, and arq onto fifo */ 28971 sd_fault_injection_on = 0; 28972 28973 if (arg != NULL) { 28974 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 28975 if (rval != -1 && 28976 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28977 un->sd_fi_fifo_end += i; 28978 } 28979 } else { 28980 SD_INFO(SD_LOG_IOERR, un, 28981 "sd_faultinjection_ioctl: push arg null\n"); 28982 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28983 un->sd_fi_fifo_end++; 28984 } 28985 } 28986 SD_INFO(SD_LOG_IOERR, un, 28987 "sd_faultinjection_ioctl: push to end=%d\n", 28988 un->sd_fi_fifo_end); 28989 break; 28990 28991 case SDIOCRETRIEVE: 28992 /* Return buffer of log from Injection session */ 28993 SD_INFO(SD_LOG_SDTEST, un, 28994 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 28995 28996 sd_fault_injection_on = 0; 28997 28998 mutex_enter(&(un->un_fi_mutex)); 28999 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29000 un->sd_fi_buf_len+1, 0); 29001 mutex_exit(&(un->un_fi_mutex)); 29002 29003 if (rval == -1) { 29004 /* 29005 * arg is possibly invalid setting 29006 * it to NULL for return 29007 */ 29008 arg = NULL; 29009 } 29010 break; 29011 } 29012 29013 mutex_exit(SD_MUTEX(un)); 29014 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29015 " exit\n"); 29016 } 29017 29018 29019 /* 29020 * Function: sd_injection_log() 29021 * 29022 * Description: This routine adds buff to the already existing injection log 29023 * for retrieval via faultinjection_ioctl for use in fault 29024 * detection and recovery 29025 * 29026 * Arguments: buf - the string to add to the log 29027 */ 29028 29029 static void 29030 sd_injection_log(char *buf, struct sd_lun *un) 29031 { 29032 uint_t len; 29033 29034 ASSERT(un != NULL); 29035 ASSERT(buf != NULL); 29036 29037 mutex_enter(&(un->un_fi_mutex)); 29038 29039 len = min(strlen(buf), 255); 29040 /* Add logged value to Injection log to be returned later */ 29041 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29042 uint_t offset = strlen((char *)un->sd_fi_log); 29043 char *destp = (char *)un->sd_fi_log + offset; 29044 int i; 29045 for (i = 0; i < len; i++) { 29046 *destp++ = *buf++; 29047 } 29048 un->sd_fi_buf_len += len; 29049 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29050 } 29051 29052 mutex_exit(&(un->un_fi_mutex)); 29053 } 29054 29055 29056 /* 29057 * Function: sd_faultinjection() 29058 * 29059 * Description: This routine takes the pkt and changes its 29060 * content based on error injection scenerio. 29061 * 29062 * Arguments: pktp - packet to be changed 29063 */ 29064 29065 static void 29066 sd_faultinjection(struct scsi_pkt *pktp) 29067 { 29068 uint_t i; 29069 struct sd_fi_pkt *fi_pkt; 29070 struct sd_fi_xb *fi_xb; 29071 struct sd_fi_un *fi_un; 29072 struct sd_fi_arq *fi_arq; 29073 struct buf *bp; 29074 struct sd_xbuf *xb; 29075 struct sd_lun *un; 29076 29077 ASSERT(pktp != NULL); 29078 29079 /* pull bp xb and un from pktp */ 29080 bp = (struct buf *)pktp->pkt_private; 29081 xb = SD_GET_XBUF(bp); 29082 un = SD_GET_UN(bp); 29083 29084 ASSERT(un != NULL); 29085 29086 mutex_enter(SD_MUTEX(un)); 29087 29088 SD_TRACE(SD_LOG_SDTEST, un, 29089 "sd_faultinjection: entry Injection from sdintr\n"); 29090 29091 /* if injection is off return */ 29092 if (sd_fault_injection_on == 0 || 29093 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29094 mutex_exit(SD_MUTEX(un)); 29095 return; 29096 } 29097 29098 SD_INFO(SD_LOG_SDTEST, un, 29099 "sd_faultinjection: is working for copying\n"); 29100 29101 /* take next set off fifo */ 29102 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29103 29104 fi_pkt = un->sd_fi_fifo_pkt[i]; 29105 fi_xb = un->sd_fi_fifo_xb[i]; 29106 fi_un = un->sd_fi_fifo_un[i]; 29107 fi_arq = un->sd_fi_fifo_arq[i]; 29108 29109 29110 /* set variables accordingly */ 29111 /* set pkt if it was on fifo */ 29112 if (fi_pkt != NULL) { 29113 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29114 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29115 if (fi_pkt->pkt_cdbp != 0xff) 29116 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29117 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29118 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29119 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29120 29121 } 29122 /* set xb if it was on fifo */ 29123 if (fi_xb != NULL) { 29124 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29125 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29126 if (fi_xb->xb_retry_count != 0) 29127 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29128 SD_CONDSET(xb, xb, xb_victim_retry_count, 29129 "xb_victim_retry_count"); 29130 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29131 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29132 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29133 29134 /* copy in block data from sense */ 29135 /* 29136 * if (fi_xb->xb_sense_data[0] != -1) { 29137 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29138 * SENSE_LENGTH); 29139 * } 29140 */ 29141 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29142 29143 /* copy in extended sense codes */ 29144 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29145 xb, es_code, "es_code"); 29146 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29147 xb, es_key, "es_key"); 29148 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29149 xb, es_add_code, "es_add_code"); 29150 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29151 xb, es_qual_code, "es_qual_code"); 29152 struct scsi_extended_sense *esp; 29153 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29154 esp->es_class = CLASS_EXTENDED_SENSE; 29155 } 29156 29157 /* set un if it was on fifo */ 29158 if (fi_un != NULL) { 29159 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29160 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29161 SD_CONDSET(un, un, un_reset_retry_count, 29162 "un_reset_retry_count"); 29163 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29164 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29165 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29166 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29167 "un_f_allow_bus_device_reset"); 29168 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29169 29170 } 29171 29172 /* copy in auto request sense if it was on fifo */ 29173 if (fi_arq != NULL) { 29174 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29175 } 29176 29177 /* free structs */ 29178 if (un->sd_fi_fifo_pkt[i] != NULL) { 29179 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29180 } 29181 if (un->sd_fi_fifo_xb[i] != NULL) { 29182 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29183 } 29184 if (un->sd_fi_fifo_un[i] != NULL) { 29185 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29186 } 29187 if (un->sd_fi_fifo_arq[i] != NULL) { 29188 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29189 } 29190 29191 /* 29192 * kmem_free does not gurantee to set to NULL 29193 * since we uses these to determine if we set 29194 * values or not lets confirm they are always 29195 * NULL after free 29196 */ 29197 un->sd_fi_fifo_pkt[i] = NULL; 29198 un->sd_fi_fifo_un[i] = NULL; 29199 un->sd_fi_fifo_xb[i] = NULL; 29200 un->sd_fi_fifo_arq[i] = NULL; 29201 29202 un->sd_fi_fifo_start++; 29203 29204 mutex_exit(SD_MUTEX(un)); 29205 29206 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29207 } 29208 29209 #endif /* SD_FAULT_INJECTION */ 29210 29211 /* 29212 * This routine is invoked in sd_unit_attach(). Before calling it, the 29213 * properties in conf file should be processed already, and "hotpluggable" 29214 * property was processed also. 29215 * 29216 * The sd driver distinguishes 3 different type of devices: removable media, 29217 * non-removable media, and hotpluggable. Below the differences are defined: 29218 * 29219 * 1. Device ID 29220 * 29221 * The device ID of a device is used to identify this device. Refer to 29222 * ddi_devid_register(9F). 29223 * 29224 * For a non-removable media disk device which can provide 0x80 or 0x83 29225 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29226 * device ID is created to identify this device. For other non-removable 29227 * media devices, a default device ID is created only if this device has 29228 * at least 2 alter cylinders. Otherwise, this device has no devid. 29229 * 29230 * ------------------------------------------------------- 29231 * removable media hotpluggable | Can Have Device ID 29232 * ------------------------------------------------------- 29233 * false false | Yes 29234 * false true | Yes 29235 * true x | No 29236 * ------------------------------------------------------ 29237 * 29238 * 29239 * 2. SCSI group 4 commands 29240 * 29241 * In SCSI specs, only some commands in group 4 command set can use 29242 * 8-byte addresses that can be used to access >2TB storage spaces. 29243 * Other commands have no such capability. Without supporting group4, 29244 * it is impossible to make full use of storage spaces of a disk with 29245 * capacity larger than 2TB. 29246 * 29247 * ----------------------------------------------- 29248 * removable media hotpluggable LP64 | Group 29249 * ----------------------------------------------- 29250 * false false false | 1 29251 * false false true | 4 29252 * false true false | 1 29253 * false true true | 4 29254 * true x x | 5 29255 * ----------------------------------------------- 29256 * 29257 * 29258 * 3. Check for VTOC Label 29259 * 29260 * If a direct-access disk has no EFI label, sd will check if it has a 29261 * valid VTOC label. Now, sd also does that check for removable media 29262 * and hotpluggable devices. 29263 * 29264 * -------------------------------------------------------------- 29265 * Direct-Access removable media hotpluggable | Check Label 29266 * ------------------------------------------------------------- 29267 * false false false | No 29268 * false false true | No 29269 * false true false | Yes 29270 * false true true | Yes 29271 * true x x | Yes 29272 * -------------------------------------------------------------- 29273 * 29274 * 29275 * 4. Building default VTOC label 29276 * 29277 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29278 * If those devices have no valid VTOC label, sd(7d) will attempt to 29279 * create default VTOC for them. Currently sd creates default VTOC label 29280 * for all devices on x86 platform (VTOC_16), but only for removable 29281 * media devices on SPARC (VTOC_8). 29282 * 29283 * ----------------------------------------------------------- 29284 * removable media hotpluggable platform | Default Label 29285 * ----------------------------------------------------------- 29286 * false false sparc | No 29287 * false true x86 | Yes 29288 * false true sparc | Yes 29289 * true x x | Yes 29290 * ---------------------------------------------------------- 29291 * 29292 * 29293 * 5. Supported blocksizes of target devices 29294 * 29295 * Sd supports non-512-byte blocksize for removable media devices only. 29296 * For other devices, only 512-byte blocksize is supported. This may be 29297 * changed in near future because some RAID devices require non-512-byte 29298 * blocksize 29299 * 29300 * ----------------------------------------------------------- 29301 * removable media hotpluggable | non-512-byte blocksize 29302 * ----------------------------------------------------------- 29303 * false false | No 29304 * false true | No 29305 * true x | Yes 29306 * ----------------------------------------------------------- 29307 * 29308 * 29309 * 6. Automatic mount & unmount 29310 * 29311 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29312 * if a device is removable media device. It return 1 for removable media 29313 * devices, and 0 for others. 29314 * 29315 * The automatic mounting subsystem should distinguish between the types 29316 * of devices and apply automounting policies to each. 29317 * 29318 * 29319 * 7. fdisk partition management 29320 * 29321 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29322 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29323 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29324 * fdisk partitions on both x86 and SPARC platform. 29325 * 29326 * ----------------------------------------------------------- 29327 * platform removable media USB/1394 | fdisk supported 29328 * ----------------------------------------------------------- 29329 * x86 X X | true 29330 * ------------------------------------------------------------ 29331 * sparc X X | false 29332 * ------------------------------------------------------------ 29333 * 29334 * 29335 * 8. MBOOT/MBR 29336 * 29337 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29338 * read/write mboot for removable media devices on sparc platform. 29339 * 29340 * ----------------------------------------------------------- 29341 * platform removable media USB/1394 | mboot supported 29342 * ----------------------------------------------------------- 29343 * x86 X X | true 29344 * ------------------------------------------------------------ 29345 * sparc false false | false 29346 * sparc false true | true 29347 * sparc true false | true 29348 * sparc true true | true 29349 * ------------------------------------------------------------ 29350 * 29351 * 29352 * 9. error handling during opening device 29353 * 29354 * If failed to open a disk device, an errno is returned. For some kinds 29355 * of errors, different errno is returned depending on if this device is 29356 * a removable media device. This brings USB/1394 hard disks in line with 29357 * expected hard disk behavior. It is not expected that this breaks any 29358 * application. 29359 * 29360 * ------------------------------------------------------ 29361 * removable media hotpluggable | errno 29362 * ------------------------------------------------------ 29363 * false false | EIO 29364 * false true | EIO 29365 * true x | ENXIO 29366 * ------------------------------------------------------ 29367 * 29368 * 29369 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29370 * 29371 * These IOCTLs are applicable only to removable media devices. 29372 * 29373 * ----------------------------------------------------------- 29374 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29375 * ----------------------------------------------------------- 29376 * false false | No 29377 * false true | No 29378 * true x | Yes 29379 * ----------------------------------------------------------- 29380 * 29381 * 29382 * 12. Kstats for partitions 29383 * 29384 * sd creates partition kstat for non-removable media devices. USB and 29385 * Firewire hard disks now have partition kstats 29386 * 29387 * ------------------------------------------------------ 29388 * removable media hotpluggable | kstat 29389 * ------------------------------------------------------ 29390 * false false | Yes 29391 * false true | Yes 29392 * true x | No 29393 * ------------------------------------------------------ 29394 * 29395 * 29396 * 13. Removable media & hotpluggable properties 29397 * 29398 * Sd driver creates a "removable-media" property for removable media 29399 * devices. Parent nexus drivers create a "hotpluggable" property if 29400 * it supports hotplugging. 29401 * 29402 * --------------------------------------------------------------------- 29403 * removable media hotpluggable | "removable-media" " hotpluggable" 29404 * --------------------------------------------------------------------- 29405 * false false | No No 29406 * false true | No Yes 29407 * true false | Yes No 29408 * true true | Yes Yes 29409 * --------------------------------------------------------------------- 29410 * 29411 * 29412 * 14. Power Management 29413 * 29414 * sd only power manages removable media devices or devices that support 29415 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29416 * 29417 * A parent nexus that supports hotplugging can also set "pm-capable" 29418 * if the disk can be power managed. 29419 * 29420 * ------------------------------------------------------------ 29421 * removable media hotpluggable pm-capable | power manage 29422 * ------------------------------------------------------------ 29423 * false false false | No 29424 * false false true | Yes 29425 * false true false | No 29426 * false true true | Yes 29427 * true x x | Yes 29428 * ------------------------------------------------------------ 29429 * 29430 * USB and firewire hard disks can now be power managed independently 29431 * of the framebuffer 29432 * 29433 * 29434 * 15. Support for USB disks with capacity larger than 1TB 29435 * 29436 * Currently, sd doesn't permit a fixed disk device with capacity 29437 * larger than 1TB to be used in a 32-bit operating system environment. 29438 * However, sd doesn't do that for removable media devices. Instead, it 29439 * assumes that removable media devices cannot have a capacity larger 29440 * than 1TB. Therefore, using those devices on 32-bit system is partially 29441 * supported, which can cause some unexpected results. 29442 * 29443 * --------------------------------------------------------------------- 29444 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29445 * --------------------------------------------------------------------- 29446 * false false | true | no 29447 * false true | true | no 29448 * true false | true | Yes 29449 * true true | true | Yes 29450 * --------------------------------------------------------------------- 29451 * 29452 * 29453 * 16. Check write-protection at open time 29454 * 29455 * When a removable media device is being opened for writing without NDELAY 29456 * flag, sd will check if this device is writable. If attempting to open 29457 * without NDELAY flag a write-protected device, this operation will abort. 29458 * 29459 * ------------------------------------------------------------ 29460 * removable media USB/1394 | WP Check 29461 * ------------------------------------------------------------ 29462 * false false | No 29463 * false true | No 29464 * true false | Yes 29465 * true true | Yes 29466 * ------------------------------------------------------------ 29467 * 29468 * 29469 * 17. syslog when corrupted VTOC is encountered 29470 * 29471 * Currently, if an invalid VTOC is encountered, sd only print syslog 29472 * for fixed SCSI disks. 29473 * ------------------------------------------------------------ 29474 * removable media USB/1394 | print syslog 29475 * ------------------------------------------------------------ 29476 * false false | Yes 29477 * false true | No 29478 * true false | No 29479 * true true | No 29480 * ------------------------------------------------------------ 29481 */ 29482 static void 29483 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29484 { 29485 int pm_capable_prop; 29486 29487 ASSERT(un->un_sd); 29488 ASSERT(un->un_sd->sd_inq); 29489 29490 /* 29491 * Enable SYNC CACHE support for all devices. 29492 */ 29493 un->un_f_sync_cache_supported = TRUE; 29494 29495 /* 29496 * Set the sync cache required flag to false. 29497 * This would ensure that there is no SYNC CACHE 29498 * sent when there are no writes 29499 */ 29500 un->un_f_sync_cache_required = FALSE; 29501 29502 if (un->un_sd->sd_inq->inq_rmb) { 29503 /* 29504 * The media of this device is removable. And for this kind 29505 * of devices, it is possible to change medium after opening 29506 * devices. Thus we should support this operation. 29507 */ 29508 un->un_f_has_removable_media = TRUE; 29509 29510 /* 29511 * support non-512-byte blocksize of removable media devices 29512 */ 29513 un->un_f_non_devbsize_supported = TRUE; 29514 29515 /* 29516 * Assume that all removable media devices support DOOR_LOCK 29517 */ 29518 un->un_f_doorlock_supported = TRUE; 29519 29520 /* 29521 * For a removable media device, it is possible to be opened 29522 * with NDELAY flag when there is no media in drive, in this 29523 * case we don't care if device is writable. But if without 29524 * NDELAY flag, we need to check if media is write-protected. 29525 */ 29526 un->un_f_chk_wp_open = TRUE; 29527 29528 /* 29529 * need to start a SCSI watch thread to monitor media state, 29530 * when media is being inserted or ejected, notify syseventd. 29531 */ 29532 un->un_f_monitor_media_state = TRUE; 29533 29534 /* 29535 * Some devices don't support START_STOP_UNIT command. 29536 * Therefore, we'd better check if a device supports it 29537 * before sending it. 29538 */ 29539 un->un_f_check_start_stop = TRUE; 29540 29541 /* 29542 * support eject media ioctl: 29543 * FDEJECT, DKIOCEJECT, CDROMEJECT 29544 */ 29545 un->un_f_eject_media_supported = TRUE; 29546 29547 /* 29548 * Because many removable-media devices don't support 29549 * LOG_SENSE, we couldn't use this command to check if 29550 * a removable media device support power-management. 29551 * We assume that they support power-management via 29552 * START_STOP_UNIT command and can be spun up and down 29553 * without limitations. 29554 */ 29555 un->un_f_pm_supported = TRUE; 29556 29557 /* 29558 * Need to create a zero length (Boolean) property 29559 * removable-media for the removable media devices. 29560 * Note that the return value of the property is not being 29561 * checked, since if unable to create the property 29562 * then do not want the attach to fail altogether. Consistent 29563 * with other property creation in attach. 29564 */ 29565 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29566 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29567 29568 } else { 29569 /* 29570 * create device ID for device 29571 */ 29572 un->un_f_devid_supported = TRUE; 29573 29574 /* 29575 * Spin up non-removable-media devices once it is attached 29576 */ 29577 un->un_f_attach_spinup = TRUE; 29578 29579 /* 29580 * According to SCSI specification, Sense data has two kinds of 29581 * format: fixed format, and descriptor format. At present, we 29582 * don't support descriptor format sense data for removable 29583 * media. 29584 */ 29585 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29586 un->un_f_descr_format_supported = TRUE; 29587 } 29588 29589 /* 29590 * kstats are created only for non-removable media devices. 29591 * 29592 * Set this in sd.conf to 0 in order to disable kstats. The 29593 * default is 1, so they are enabled by default. 29594 */ 29595 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29596 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29597 "enable-partition-kstats", 1)); 29598 29599 /* 29600 * Check if HBA has set the "pm-capable" property. 29601 * If "pm-capable" exists and is non-zero then we can 29602 * power manage the device without checking the start/stop 29603 * cycle count log sense page. 29604 * 29605 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29606 * then we should not power manage the device. 29607 * 29608 * If "pm-capable" doesn't exist then pm_capable_prop will 29609 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29610 * sd will check the start/stop cycle count log sense page 29611 * and power manage the device if the cycle count limit has 29612 * not been exceeded. 29613 */ 29614 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29615 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29616 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29617 un->un_f_log_sense_supported = TRUE; 29618 } else { 29619 /* 29620 * pm-capable property exists. 29621 * 29622 * Convert "TRUE" values for pm_capable_prop to 29623 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29624 * later. "TRUE" values are any values except 29625 * SD_PM_CAPABLE_FALSE (0) and 29626 * SD_PM_CAPABLE_UNDEFINED (-1) 29627 */ 29628 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29629 un->un_f_log_sense_supported = FALSE; 29630 } else { 29631 un->un_f_pm_supported = TRUE; 29632 } 29633 29634 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29635 "sd_unit_attach: un:0x%p pm-capable " 29636 "property set to %d.\n", un, un->un_f_pm_supported); 29637 } 29638 } 29639 29640 if (un->un_f_is_hotpluggable) { 29641 29642 /* 29643 * Have to watch hotpluggable devices as well, since 29644 * that's the only way for userland applications to 29645 * detect hot removal while device is busy/mounted. 29646 */ 29647 un->un_f_monitor_media_state = TRUE; 29648 29649 un->un_f_check_start_stop = TRUE; 29650 29651 } 29652 } 29653 29654 /* 29655 * sd_tg_rdwr: 29656 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29657 * in sys block size, req_length in bytes. 29658 * 29659 */ 29660 static int 29661 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29662 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29663 { 29664 struct sd_lun *un; 29665 int path_flag = (int)(uintptr_t)tg_cookie; 29666 char *dkl = NULL; 29667 diskaddr_t real_addr = start_block; 29668 diskaddr_t first_byte, end_block; 29669 29670 size_t buffer_size = reqlength; 29671 int rval = 0; 29672 diskaddr_t cap; 29673 uint32_t lbasize; 29674 sd_ssc_t *ssc; 29675 29676 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29677 if (un == NULL) 29678 return (ENXIO); 29679 29680 if (cmd != TG_READ && cmd != TG_WRITE) 29681 return (EINVAL); 29682 29683 ssc = sd_ssc_init(un); 29684 mutex_enter(SD_MUTEX(un)); 29685 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29686 mutex_exit(SD_MUTEX(un)); 29687 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29688 &lbasize, path_flag); 29689 if (rval != 0) 29690 goto done1; 29691 mutex_enter(SD_MUTEX(un)); 29692 sd_update_block_info(un, lbasize, cap); 29693 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29694 mutex_exit(SD_MUTEX(un)); 29695 rval = EIO; 29696 goto done; 29697 } 29698 } 29699 29700 if (NOT_DEVBSIZE(un)) { 29701 /* 29702 * sys_blocksize != tgt_blocksize, need to re-adjust 29703 * blkno and save the index to beginning of dk_label 29704 */ 29705 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29706 real_addr = first_byte / un->un_tgt_blocksize; 29707 29708 end_block = (first_byte + reqlength + 29709 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29710 29711 /* round up buffer size to multiple of target block size */ 29712 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29713 29714 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29715 "label_addr: 0x%x allocation size: 0x%x\n", 29716 real_addr, buffer_size); 29717 29718 if (((first_byte % un->un_tgt_blocksize) != 0) || 29719 (reqlength % un->un_tgt_blocksize) != 0) 29720 /* the request is not aligned */ 29721 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29722 } 29723 29724 /* 29725 * The MMC standard allows READ CAPACITY to be 29726 * inaccurate by a bounded amount (in the interest of 29727 * response latency). As a result, failed READs are 29728 * commonplace (due to the reading of metadata and not 29729 * data). Depending on the per-Vendor/drive Sense data, 29730 * the failed READ can cause many (unnecessary) retries. 29731 */ 29732 29733 if (ISCD(un) && (cmd == TG_READ) && 29734 (un->un_f_blockcount_is_valid == TRUE) && 29735 ((start_block == (un->un_blockcount - 1))|| 29736 (start_block == (un->un_blockcount - 2)))) { 29737 path_flag = SD_PATH_DIRECT_PRIORITY; 29738 } 29739 29740 mutex_exit(SD_MUTEX(un)); 29741 if (cmd == TG_READ) { 29742 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29743 buffer_size, real_addr, path_flag); 29744 if (dkl != NULL) 29745 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29746 real_addr), bufaddr, reqlength); 29747 } else { 29748 if (dkl) { 29749 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29750 real_addr, path_flag); 29751 if (rval) { 29752 goto done1; 29753 } 29754 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29755 real_addr), reqlength); 29756 } 29757 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29758 buffer_size, real_addr, path_flag); 29759 } 29760 29761 done1: 29762 if (dkl != NULL) 29763 kmem_free(dkl, buffer_size); 29764 29765 if (rval != 0) { 29766 if (rval == EIO) 29767 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29768 else 29769 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29770 } 29771 done: 29772 sd_ssc_fini(ssc); 29773 return (rval); 29774 } 29775 29776 29777 static int 29778 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29779 { 29780 29781 struct sd_lun *un; 29782 diskaddr_t cap; 29783 uint32_t lbasize; 29784 int path_flag = (int)(uintptr_t)tg_cookie; 29785 int ret = 0; 29786 29787 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29788 if (un == NULL) 29789 return (ENXIO); 29790 29791 switch (cmd) { 29792 case TG_GETPHYGEOM: 29793 case TG_GETVIRTGEOM: 29794 case TG_GETCAPACITY: 29795 case TG_GETBLOCKSIZE: 29796 mutex_enter(SD_MUTEX(un)); 29797 29798 if ((un->un_f_blockcount_is_valid == TRUE) && 29799 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29800 cap = un->un_blockcount; 29801 lbasize = un->un_tgt_blocksize; 29802 mutex_exit(SD_MUTEX(un)); 29803 } else { 29804 sd_ssc_t *ssc; 29805 mutex_exit(SD_MUTEX(un)); 29806 ssc = sd_ssc_init(un); 29807 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29808 &lbasize, path_flag); 29809 if (ret != 0) { 29810 if (ret == EIO) 29811 sd_ssc_assessment(ssc, 29812 SD_FMT_STATUS_CHECK); 29813 else 29814 sd_ssc_assessment(ssc, 29815 SD_FMT_IGNORE); 29816 sd_ssc_fini(ssc); 29817 return (ret); 29818 } 29819 sd_ssc_fini(ssc); 29820 mutex_enter(SD_MUTEX(un)); 29821 sd_update_block_info(un, lbasize, cap); 29822 if ((un->un_f_blockcount_is_valid == FALSE) || 29823 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 29824 mutex_exit(SD_MUTEX(un)); 29825 return (EIO); 29826 } 29827 mutex_exit(SD_MUTEX(un)); 29828 } 29829 29830 if (cmd == TG_GETCAPACITY) { 29831 *(diskaddr_t *)arg = cap; 29832 return (0); 29833 } 29834 29835 if (cmd == TG_GETBLOCKSIZE) { 29836 *(uint32_t *)arg = lbasize; 29837 return (0); 29838 } 29839 29840 if (cmd == TG_GETPHYGEOM) 29841 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 29842 cap, lbasize, path_flag); 29843 else 29844 /* TG_GETVIRTGEOM */ 29845 ret = sd_get_virtual_geometry(un, 29846 (cmlb_geom_t *)arg, cap, lbasize); 29847 29848 return (ret); 29849 29850 case TG_GETATTR: 29851 mutex_enter(SD_MUTEX(un)); 29852 ((tg_attribute_t *)arg)->media_is_writable = 29853 un->un_f_mmc_writable_media; 29854 mutex_exit(SD_MUTEX(un)); 29855 return (0); 29856 default: 29857 return (ENOTTY); 29858 29859 } 29860 } 29861 29862 /* 29863 * Function: sd_ssc_ereport_post 29864 * 29865 * Description: Will be called when SD driver need to post an ereport. 29866 * 29867 * Context: Kernel thread or interrupt context. 29868 */ 29869 static void 29870 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 29871 { 29872 int uscsi_path_instance = 0; 29873 uchar_t uscsi_pkt_reason; 29874 uint32_t uscsi_pkt_state; 29875 uint32_t uscsi_pkt_statistics; 29876 uint64_t uscsi_ena; 29877 uchar_t op_code; 29878 uint8_t *sensep; 29879 union scsi_cdb *cdbp; 29880 uint_t cdblen = 0; 29881 uint_t senlen = 0; 29882 struct sd_lun *un; 29883 dev_info_t *dip; 29884 char *devid; 29885 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 29886 SSC_FLAGS_INVALID_STATUS | 29887 SSC_FLAGS_INVALID_SENSE | 29888 SSC_FLAGS_INVALID_DATA; 29889 char assessment[16]; 29890 29891 ASSERT(ssc != NULL); 29892 ASSERT(ssc->ssc_uscsi_cmd != NULL); 29893 ASSERT(ssc->ssc_uscsi_info != NULL); 29894 29895 un = ssc->ssc_un; 29896 ASSERT(un != NULL); 29897 29898 dip = un->un_sd->sd_dev; 29899 29900 /* 29901 * Get the devid: 29902 * devid will only be passed to non-transport error reports. 29903 */ 29904 devid = DEVI(dip)->devi_devid_str; 29905 29906 /* 29907 * If we are syncing or dumping, the command will not be executed 29908 * so we bypass this situation. 29909 */ 29910 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 29911 (un->un_state == SD_STATE_DUMPING)) 29912 return; 29913 29914 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 29915 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 29916 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 29917 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 29918 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 29919 29920 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 29921 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 29922 29923 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 29924 if (cdbp == NULL) { 29925 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29926 "sd_ssc_ereport_post meet empty cdb\n"); 29927 return; 29928 } 29929 29930 op_code = cdbp->scc_cmd; 29931 29932 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 29933 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 29934 ssc->ssc_uscsi_cmd->uscsi_rqresid); 29935 29936 if (senlen > 0) 29937 ASSERT(sensep != NULL); 29938 29939 /* 29940 * Initialize drv_assess to corresponding values. 29941 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 29942 * on the sense-key returned back. 29943 */ 29944 switch (drv_assess) { 29945 case SD_FM_DRV_RECOVERY: 29946 (void) sprintf(assessment, "%s", "recovered"); 29947 break; 29948 case SD_FM_DRV_RETRY: 29949 (void) sprintf(assessment, "%s", "retry"); 29950 break; 29951 case SD_FM_DRV_NOTICE: 29952 (void) sprintf(assessment, "%s", "info"); 29953 break; 29954 case SD_FM_DRV_FATAL: 29955 default: 29956 (void) sprintf(assessment, "%s", "unknown"); 29957 } 29958 /* 29959 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 29960 * command, we will post ereport.io.scsi.cmd.disk.recovered. 29961 * driver-assessment will always be "recovered" here. 29962 */ 29963 if (drv_assess == SD_FM_DRV_RECOVERY) { 29964 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29965 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 29966 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29967 "driver-assessment", DATA_TYPE_STRING, assessment, 29968 "op-code", DATA_TYPE_UINT8, op_code, 29969 "cdb", DATA_TYPE_UINT8_ARRAY, 29970 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29971 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29972 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 29973 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 29974 NULL); 29975 return; 29976 } 29977 29978 /* 29979 * If there is un-expected/un-decodable data, we should post 29980 * ereport.io.scsi.cmd.disk.dev.uderr. 29981 * driver-assessment will be set based on parameter drv_assess. 29982 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 29983 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 29984 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 29985 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 29986 */ 29987 if (ssc->ssc_flags & ssc_invalid_flags) { 29988 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 29989 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29990 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 29991 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29992 "driver-assessment", DATA_TYPE_STRING, 29993 drv_assess == SD_FM_DRV_FATAL ? 29994 "fail" : assessment, 29995 "op-code", DATA_TYPE_UINT8, op_code, 29996 "cdb", DATA_TYPE_UINT8_ARRAY, 29997 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29998 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29999 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30000 "pkt-stats", DATA_TYPE_UINT32, 30001 uscsi_pkt_statistics, 30002 "stat-code", DATA_TYPE_UINT8, 30003 ssc->ssc_uscsi_cmd->uscsi_status, 30004 "un-decode-info", DATA_TYPE_STRING, 30005 ssc->ssc_info, 30006 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30007 senlen, sensep, 30008 NULL); 30009 } else { 30010 /* 30011 * For other type of invalid data, the 30012 * un-decode-value field would be empty because the 30013 * un-decodable content could be seen from upper 30014 * level payload or inside un-decode-info. 30015 */ 30016 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30017 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30018 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30019 "driver-assessment", DATA_TYPE_STRING, 30020 drv_assess == SD_FM_DRV_FATAL ? 30021 "fail" : assessment, 30022 "op-code", DATA_TYPE_UINT8, op_code, 30023 "cdb", DATA_TYPE_UINT8_ARRAY, 30024 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30025 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30026 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30027 "pkt-stats", DATA_TYPE_UINT32, 30028 uscsi_pkt_statistics, 30029 "stat-code", DATA_TYPE_UINT8, 30030 ssc->ssc_uscsi_cmd->uscsi_status, 30031 "un-decode-info", DATA_TYPE_STRING, 30032 ssc->ssc_info, 30033 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30034 0, NULL, 30035 NULL); 30036 } 30037 ssc->ssc_flags &= ~ssc_invalid_flags; 30038 return; 30039 } 30040 30041 if (uscsi_pkt_reason != CMD_CMPLT || 30042 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30043 /* 30044 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30045 * set inside sd_start_cmds due to errors(bad packet or 30046 * fatal transport error), we should take it as a 30047 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30048 * driver-assessment will be set based on drv_assess. 30049 * We will set devid to NULL because it is a transport 30050 * error. 30051 */ 30052 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30053 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30054 30055 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30056 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30057 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30058 "driver-assessment", DATA_TYPE_STRING, 30059 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30060 "op-code", DATA_TYPE_UINT8, op_code, 30061 "cdb", DATA_TYPE_UINT8_ARRAY, 30062 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30063 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30064 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30065 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30066 NULL); 30067 } else { 30068 /* 30069 * If we got here, we have a completed command, and we need 30070 * to further investigate the sense data to see what kind 30071 * of ereport we should post. 30072 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30073 * if sense-key == 0x3. 30074 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30075 * driver-assessment will be set based on the parameter 30076 * drv_assess. 30077 */ 30078 if (senlen > 0) { 30079 /* 30080 * Here we have sense data available. 30081 */ 30082 uint8_t sense_key; 30083 sense_key = scsi_sense_key(sensep); 30084 if (sense_key == 0x3) { 30085 /* 30086 * sense-key == 0x3(medium error), 30087 * driver-assessment should be "fatal" if 30088 * drv_assess is SD_FM_DRV_FATAL. 30089 */ 30090 scsi_fm_ereport_post(un->un_sd, 30091 uscsi_path_instance, 30092 "cmd.disk.dev.rqs.merr", 30093 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30094 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30095 "driver-assessment", 30096 DATA_TYPE_STRING, 30097 drv_assess == SD_FM_DRV_FATAL ? 30098 "fatal" : assessment, 30099 "op-code", 30100 DATA_TYPE_UINT8, op_code, 30101 "cdb", 30102 DATA_TYPE_UINT8_ARRAY, cdblen, 30103 ssc->ssc_uscsi_cmd->uscsi_cdb, 30104 "pkt-reason", 30105 DATA_TYPE_UINT8, uscsi_pkt_reason, 30106 "pkt-state", 30107 DATA_TYPE_UINT8, uscsi_pkt_state, 30108 "pkt-stats", 30109 DATA_TYPE_UINT32, 30110 uscsi_pkt_statistics, 30111 "stat-code", 30112 DATA_TYPE_UINT8, 30113 ssc->ssc_uscsi_cmd->uscsi_status, 30114 "key", 30115 DATA_TYPE_UINT8, 30116 scsi_sense_key(sensep), 30117 "asc", 30118 DATA_TYPE_UINT8, 30119 scsi_sense_asc(sensep), 30120 "ascq", 30121 DATA_TYPE_UINT8, 30122 scsi_sense_ascq(sensep), 30123 "sense-data", 30124 DATA_TYPE_UINT8_ARRAY, 30125 senlen, sensep, 30126 "lba", 30127 DATA_TYPE_UINT64, 30128 ssc->ssc_uscsi_info->ui_lba, 30129 NULL); 30130 } else { 30131 /* 30132 * if sense-key == 0x4(hardware 30133 * error), driver-assessment should 30134 * be "fatal" if drv_assess is 30135 * SD_FM_DRV_FATAL. 30136 */ 30137 scsi_fm_ereport_post(un->un_sd, 30138 uscsi_path_instance, 30139 "cmd.disk.dev.rqs.derr", 30140 uscsi_ena, devid, DDI_NOSLEEP, 30141 FM_VERSION, 30142 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30143 "driver-assessment", 30144 DATA_TYPE_STRING, 30145 drv_assess == SD_FM_DRV_FATAL ? 30146 (sense_key == 0x4 ? 30147 "fatal" : "fail") : assessment, 30148 "op-code", 30149 DATA_TYPE_UINT8, op_code, 30150 "cdb", 30151 DATA_TYPE_UINT8_ARRAY, cdblen, 30152 ssc->ssc_uscsi_cmd->uscsi_cdb, 30153 "pkt-reason", 30154 DATA_TYPE_UINT8, uscsi_pkt_reason, 30155 "pkt-state", 30156 DATA_TYPE_UINT8, uscsi_pkt_state, 30157 "pkt-stats", 30158 DATA_TYPE_UINT32, 30159 uscsi_pkt_statistics, 30160 "stat-code", 30161 DATA_TYPE_UINT8, 30162 ssc->ssc_uscsi_cmd->uscsi_status, 30163 "key", 30164 DATA_TYPE_UINT8, 30165 scsi_sense_key(sensep), 30166 "asc", 30167 DATA_TYPE_UINT8, 30168 scsi_sense_asc(sensep), 30169 "ascq", 30170 DATA_TYPE_UINT8, 30171 scsi_sense_ascq(sensep), 30172 "sense-data", 30173 DATA_TYPE_UINT8_ARRAY, 30174 senlen, sensep, 30175 NULL); 30176 } 30177 } else { 30178 /* 30179 * For stat_code == STATUS_GOOD, this is not a 30180 * hardware error. 30181 */ 30182 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 30183 return; 30184 30185 /* 30186 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30187 * stat-code but with sense data unavailable. 30188 * driver-assessment will be set based on parameter 30189 * drv_assess. 30190 */ 30191 scsi_fm_ereport_post(un->un_sd, 30192 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30193 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30194 FM_EREPORT_VERS0, 30195 "driver-assessment", DATA_TYPE_STRING, 30196 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30197 "op-code", DATA_TYPE_UINT8, op_code, 30198 "cdb", 30199 DATA_TYPE_UINT8_ARRAY, 30200 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30201 "pkt-reason", 30202 DATA_TYPE_UINT8, uscsi_pkt_reason, 30203 "pkt-state", 30204 DATA_TYPE_UINT8, uscsi_pkt_state, 30205 "pkt-stats", 30206 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30207 "stat-code", 30208 DATA_TYPE_UINT8, 30209 ssc->ssc_uscsi_cmd->uscsi_status, 30210 NULL); 30211 } 30212 } 30213 } 30214 30215 /* 30216 * Function: sd_ssc_extract_info 30217 * 30218 * Description: Extract information available to help generate ereport. 30219 * 30220 * Context: Kernel thread or interrupt context. 30221 */ 30222 static void 30223 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30224 struct buf *bp, struct sd_xbuf *xp) 30225 { 30226 size_t senlen = 0; 30227 union scsi_cdb *cdbp; 30228 int path_instance; 30229 /* 30230 * Need scsi_cdb_size array to determine the cdb length. 30231 */ 30232 extern uchar_t scsi_cdb_size[]; 30233 30234 ASSERT(un != NULL); 30235 ASSERT(pktp != NULL); 30236 ASSERT(bp != NULL); 30237 ASSERT(xp != NULL); 30238 ASSERT(ssc != NULL); 30239 ASSERT(mutex_owned(SD_MUTEX(un))); 30240 30241 /* 30242 * Transfer the cdb buffer pointer here. 30243 */ 30244 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30245 30246 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30247 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30248 30249 /* 30250 * Transfer the sense data buffer pointer if sense data is available, 30251 * calculate the sense data length first. 30252 */ 30253 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 30254 (xp->xb_sense_state & STATE_ARQ_DONE)) { 30255 /* 30256 * For arq case, we will enter here. 30257 */ 30258 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30259 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30260 } else { 30261 senlen = SENSE_LENGTH; 30262 } 30263 } else { 30264 /* 30265 * For non-arq case, we will enter this branch. 30266 */ 30267 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30268 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 30269 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30270 } 30271 30272 } 30273 30274 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30275 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30276 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30277 30278 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30279 30280 /* 30281 * Only transfer path_instance when scsi_pkt was properly allocated. 30282 */ 30283 path_instance = pktp->pkt_path_instance; 30284 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30285 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30286 else 30287 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30288 30289 /* 30290 * Copy in the other fields we may need when posting ereport. 30291 */ 30292 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30293 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30294 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30295 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30296 30297 /* 30298 * For partially read/write command, we will not create ena 30299 * in case of a successful command be reconized as recovered. 30300 */ 30301 if ((pktp->pkt_reason == CMD_CMPLT) && 30302 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 30303 (senlen == 0)) { 30304 return; 30305 } 30306 30307 /* 30308 * To associate ereports of a single command execution flow, we 30309 * need a shared ena for a specific command. 30310 */ 30311 if (xp->xb_ena == 0) 30312 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30313 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30314 } 30315